v4.19.13 snapshot.
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
new file mode 100644
index 0000000..b9558ff
--- /dev/null
+++ b/drivers/bcma/Kconfig
@@ -0,0 +1,118 @@
+# SPDX-License-Identifier: GPL-2.0
+config BCMA_POSSIBLE
+	bool
+	depends on HAS_IOMEM && HAS_DMA
+	default y
+
+menuconfig BCMA
+	tristate "Broadcom specific AMBA"
+	depends on BCMA_POSSIBLE
+	help
+	  Bus driver for Broadcom specific Advanced Microcontroller Bus
+	  Architecture.
+
+if BCMA
+
+# Support for Block-I/O. SELECT this from the driver that needs it.
+config BCMA_BLOCKIO
+	bool
+
+config BCMA_HOST_PCI_POSSIBLE
+	bool
+	depends on PCI = y
+	default y
+
+config BCMA_HOST_PCI
+	bool "Support for BCMA on PCI-host bus"
+	depends on BCMA_HOST_PCI_POSSIBLE
+	select BCMA_DRIVER_PCI
+	default y
+
+config BCMA_HOST_SOC
+	bool "Support for BCMA in a SoC"
+	depends on HAS_IOMEM
+	help
+	  Host interface for a Broadcom AIX bus directly mapped into
+	  the memory. This only works with the Broadcom SoCs from the
+	  BCM47XX line.
+
+	  If unsure, say N
+
+config BCMA_DRIVER_PCI
+	bool "BCMA Broadcom PCI core driver"
+	depends on PCI
+	default y
+	help
+	  BCMA bus may have many versions of PCIe core. This driver
+	  supports:
+	  1) PCIe core working in clientmode
+	  2) PCIe Gen 2 clientmode core
+
+	  In general PCIe (Gen 2) clientmode core is required on PCIe
+	  hosted buses. It's responsible for initialization and basic
+	  hardware management.
+	  This driver is also prerequisite for a hostmode PCIe core
+	  support.
+
+config BCMA_DRIVER_PCI_HOSTMODE
+	bool "Driver for PCI core working in hostmode"
+	depends on MIPS && BCMA_DRIVER_PCI && PCI_DRIVERS_LEGACY && BCMA = y
+	help
+	  PCI core hostmode operation (external PCI bus).
+
+config BCMA_DRIVER_MIPS
+	bool "BCMA Broadcom MIPS core driver"
+	depends on MIPS || COMPILE_TEST
+	help
+	  Driver for the Broadcom MIPS core attached to Broadcom specific
+	  Advanced Microcontroller Bus.
+
+	  If unsure, say N
+
+config BCMA_PFLASH
+	bool
+	depends on BCMA_DRIVER_MIPS
+	default y
+
+config BCMA_SFLASH
+	bool "ChipCommon-attached serial flash support"
+	depends on BCMA_HOST_SOC
+	default y
+	help
+	  Some cheap devices have serial flash connected to the ChipCommon
+	  instead of independent SPI controller. It requires using a separated
+	  driver that implements ChipCommon specific interface communication.
+
+	  Enabling this symbol will let bcma recognize serial flash and register
+	  it as platform device.
+
+config BCMA_NFLASH
+	bool
+	depends on BCMA_DRIVER_MIPS
+	default y
+
+config BCMA_DRIVER_GMAC_CMN
+	bool "BCMA Broadcom GBIT MAC COMMON core driver"
+	help
+	  Driver for the Broadcom GBIT MAC COMMON core attached to Broadcom
+	  specific Advanced Microcontroller Bus.
+
+	  If unsure, say N
+
+config BCMA_DRIVER_GPIO
+	bool "BCMA GPIO driver"
+	depends on GPIOLIB
+	select GPIOLIB_IRQCHIP if BCMA_HOST_SOC
+	help
+	  Driver to provide access to the GPIO pins of the bcma bus.
+
+	  If unsure, say N
+
+config BCMA_DEBUG
+	bool "BCMA debugging"
+	help
+	  This turns on additional debugging messages.
+
+	  If unsure, say N
+
+endif # BCMA
diff --git a/drivers/bcma/Makefile b/drivers/bcma/Makefile
new file mode 100644
index 0000000..f8c37de
--- /dev/null
+++ b/drivers/bcma/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+bcma-y					+= main.o scan.o core.o sprom.o
+bcma-y					+= driver_chipcommon.o driver_chipcommon_pmu.o
+bcma-y					+= driver_chipcommon_b.o
+bcma-$(CONFIG_BCMA_PFLASH)		+= driver_chipcommon_pflash.o
+bcma-$(CONFIG_BCMA_SFLASH)		+= driver_chipcommon_sflash.o
+bcma-$(CONFIG_BCMA_NFLASH)		+= driver_chipcommon_nflash.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)		+= driver_pci.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI)		+= driver_pcie2.o
+bcma-$(CONFIG_BCMA_DRIVER_PCI_HOSTMODE)	+= driver_pci_host.o
+bcma-$(CONFIG_BCMA_DRIVER_MIPS)		+= driver_mips.o
+bcma-$(CONFIG_BCMA_DRIVER_GMAC_CMN)	+= driver_gmac_cmn.o
+bcma-$(CONFIG_BCMA_DRIVER_GPIO)		+= driver_gpio.o
+bcma-$(CONFIG_BCMA_HOST_PCI)		+= host_pci.o
+bcma-$(CONFIG_BCMA_HOST_SOC)		+= host_soc.o
+obj-$(CONFIG_BCMA)			+= bcma.o
+
+ccflags-$(CONFIG_BCMA_DEBUG)		:= -DDEBUG
diff --git a/drivers/bcma/README b/drivers/bcma/README
new file mode 100644
index 0000000..f7e7ce4
--- /dev/null
+++ b/drivers/bcma/README
@@ -0,0 +1,19 @@
+Broadcom introduced new bus as replacement for older SSB. It is based on AMBA,
+however from programming point of view there is nothing AMBA specific we use.
+
+Standard AMBA drivers are platform specific, have hardcoded addresses and use
+AMBA standard fields like CID and PID.
+
+In case of Broadcom's cards every device consists of:
+1) Broadcom specific AMBA device. It is put on AMBA bus, but can not be treated
+   as standard AMBA device. Reading it's CID or PID can cause machine lockup.
+2) AMBA standard devices called ports or wrappers. They have CIDs (AMBA_CID)
+   and PIDs (0x103BB369), but we do not use that info for anything. One of that
+   devices is used for managing Broadcom specific core.
+
+Addresses of AMBA devices are not hardcoded in driver and have to be read from
+EPROM.
+
+In this situation we decided to introduce separated bus. It can contain up to
+16 devices identified by Broadcom specific fields: manufacturer, id, revision
+and class.
diff --git a/drivers/bcma/TODO b/drivers/bcma/TODO
new file mode 100644
index 0000000..da7aa99
--- /dev/null
+++ b/drivers/bcma/TODO
@@ -0,0 +1,3 @@
+- Interrupts
+- Defines for PCI core driver
+- Create kernel Documentation (use info from README)
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
new file mode 100644
index 0000000..a4aac37
--- /dev/null
+++ b/drivers/bcma/bcma_private.h
@@ -0,0 +1,216 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef LINUX_BCMA_PRIVATE_H_
+#define LINUX_BCMA_PRIVATE_H_
+
+#ifndef pr_fmt
+#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/bcma/bcma.h>
+#include <linux/delay.h>
+
+#define bcma_err(bus, fmt, ...) \
+	pr_err("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_warn(bus, fmt, ...) \
+	pr_warn("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_info(bus, fmt, ...) \
+	pr_info("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+#define bcma_debug(bus, fmt, ...) \
+	pr_debug("bus%d: " fmt, (bus)->num, ##__VA_ARGS__)
+
+struct bcma_bus;
+
+/* main.c */
+bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
+		     int timeout);
+void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core);
+void bcma_init_bus(struct bcma_bus *bus);
+void bcma_unregister_cores(struct bcma_bus *bus);
+int bcma_bus_register(struct bcma_bus *bus);
+void bcma_bus_unregister(struct bcma_bus *bus);
+int __init bcma_bus_early_register(struct bcma_bus *bus);
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus);
+
+/* scan.c */
+void bcma_detect_chip(struct bcma_bus *bus);
+int bcma_bus_scan(struct bcma_bus *bus);
+
+/* sprom.c */
+int bcma_sprom_get(struct bcma_bus *bus);
+
+/* driver_chipcommon.c */
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
+
+/* driver_chipcommon_b.c */
+int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
+void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb);
+
+/* driver_chipcommon_pmu.c */
+void bcma_pmu_early_init(struct bcma_drv_cc *cc);
+void bcma_pmu_init(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc);
+u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc);
+
+/**************************************************
+ * driver_chipcommon_sflash.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_PFLASH
+extern struct platform_device bcma_pflash_dev;
+int bcma_pflash_init(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+	bcma_err(cc->core->bus, "Parallel flash not supported\n");
+	return 0;
+}
+#endif /* CONFIG_BCMA_PFLASH */
+
+#ifdef CONFIG_BCMA_SFLASH
+/* driver_chipcommon_sflash.c */
+int bcma_sflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_sflash_dev;
+#else
+static inline int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+	bcma_err(cc->core->bus, "Serial flash not supported\n");
+	return 0;
+}
+#endif /* CONFIG_BCMA_SFLASH */
+
+#ifdef CONFIG_BCMA_NFLASH
+/* driver_chipcommon_nflash.c */
+int bcma_nflash_init(struct bcma_drv_cc *cc);
+extern struct platform_device bcma_nflash_dev;
+#else
+static inline int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+	bcma_err(cc->core->bus, "NAND flash not supported\n");
+	return 0;
+}
+#endif /* CONFIG_BCMA_NFLASH */
+
+#ifdef CONFIG_BCMA_HOST_PCI
+/* host_pci.c */
+extern int __init bcma_host_pci_init(void);
+extern void __exit bcma_host_pci_exit(void);
+#endif /* CONFIG_BCMA_HOST_PCI */
+
+/* host_soc.c */
+#if defined(CONFIG_BCMA_HOST_SOC) && defined(CONFIG_OF)
+extern int __init bcma_host_soc_register_driver(void);
+extern void __exit bcma_host_soc_unregister_driver(void);
+#else
+static inline int __init bcma_host_soc_register_driver(void)
+{
+	return 0;
+}
+static inline void __exit bcma_host_soc_unregister_driver(void)
+{
+}
+#endif /* CONFIG_BCMA_HOST_SOC && CONFIG_OF */
+
+/* driver_pci.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
+u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address);
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_init(struct bcma_drv_pci *pc);
+void bcma_core_pci_up(struct bcma_drv_pci *pc);
+void bcma_core_pci_down(struct bcma_drv_pci *pc);
+#else
+static inline void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+	WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+static inline void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+	/* Initialization is required for PCI hosted bus */
+	WARN_ON(pc->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
+
+/* driver_pcie2.c */
+#ifdef CONFIG_BCMA_DRIVER_PCI
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2);
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2);
+#else
+static inline void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+	/* Initialization is required for PCI hosted bus */
+	WARN_ON(pcie2->core->bus->hosttype == BCMA_HOSTTYPE_PCI);
+}
+#endif
+
+extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
+
+#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
+#else
+static inline bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+{
+	return false;
+}
+static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+{
+}
+#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
+
+/**************************************************
+ * driver_mips.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+unsigned int bcma_core_mips_irq(struct bcma_device *dev);
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore);
+void bcma_core_mips_init(struct bcma_drv_mips *mcore);
+#else
+static inline unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+	return 0;
+}
+static inline void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+}
+static inline void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+}
+#endif
+
+/**************************************************
+ * driver_gmac_cmn.c
+ **************************************************/
+
+#ifdef CONFIG_BCMA_DRIVER_GMAC_CMN
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc);
+#else
+static inline void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+}
+#endif
+
+#ifdef CONFIG_BCMA_DRIVER_GPIO
+/* driver_gpio.c */
+int bcma_gpio_init(struct bcma_drv_cc *cc);
+int bcma_gpio_unregister(struct bcma_drv_cc *cc);
+#else
+static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+	return -ENOTSUPP;
+}
+static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+	return 0;
+}
+#endif /* CONFIG_BCMA_DRIVER_GPIO */
+
+#endif
diff --git a/drivers/bcma/core.c b/drivers/bcma/core.c
new file mode 100644
index 0000000..37a5ffe
--- /dev/null
+++ b/drivers/bcma/core.c
@@ -0,0 +1,156 @@
+/*
+ * Broadcom specific AMBA
+ * Core ops
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+static bool bcma_core_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+				 u32 value, int timeout)
+{
+	unsigned long deadline = jiffies + timeout;
+	u32 val;
+
+	do {
+		val = bcma_aread32(core, reg);
+		if ((val & mask) == value)
+			return true;
+		cpu_relax();
+		udelay(10);
+	} while (!time_after_eq(jiffies, deadline));
+
+	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
+
+	return false;
+}
+
+bool bcma_core_is_enabled(struct bcma_device *core)
+{
+	if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC))
+	    != BCMA_IOCTL_CLK)
+		return false;
+	if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
+		return false;
+	return true;
+}
+EXPORT_SYMBOL_GPL(bcma_core_is_enabled);
+
+void bcma_core_disable(struct bcma_device *core, u32 flags)
+{
+	if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
+		return;
+
+	bcma_core_wait_value(core, BCMA_RESET_ST, ~0, 0, 300);
+
+	bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET);
+	bcma_aread32(core, BCMA_RESET_CTL);
+	udelay(1);
+
+	bcma_awrite32(core, BCMA_IOCTL, flags);
+	bcma_aread32(core, BCMA_IOCTL);
+	udelay(10);
+}
+EXPORT_SYMBOL_GPL(bcma_core_disable);
+
+int bcma_core_enable(struct bcma_device *core, u32 flags)
+{
+	bcma_core_disable(core, flags);
+
+	bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
+	bcma_aread32(core, BCMA_IOCTL);
+
+	bcma_awrite32(core, BCMA_RESET_CTL, 0);
+	bcma_aread32(core, BCMA_RESET_CTL);
+	udelay(1);
+
+	bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
+	bcma_aread32(core, BCMA_IOCTL);
+	udelay(1);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(bcma_core_enable);
+
+void bcma_core_set_clockmode(struct bcma_device *core,
+			     enum bcma_clkmode clkmode)
+{
+	u16 i;
+
+	WARN_ON(core->id.id != BCMA_CORE_CHIPCOMMON &&
+		core->id.id != BCMA_CORE_PCIE &&
+		core->id.id != BCMA_CORE_80211);
+
+	switch (clkmode) {
+	case BCMA_CLKMODE_FAST:
+		bcma_set32(core, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
+		usleep_range(64, 300);
+		for (i = 0; i < 1500; i++) {
+			if (bcma_read32(core, BCMA_CLKCTLST) &
+			    BCMA_CLKCTLST_HAVEHT) {
+				i = 0;
+				break;
+			}
+			udelay(10);
+		}
+		if (i)
+			bcma_err(core->bus, "HT force timeout\n");
+		break;
+	case BCMA_CLKMODE_DYNAMIC:
+		bcma_set32(core, BCMA_CLKCTLST, ~BCMA_CLKCTLST_FORCEHT);
+		break;
+	}
+}
+EXPORT_SYMBOL_GPL(bcma_core_set_clockmode);
+
+void bcma_core_pll_ctl(struct bcma_device *core, u32 req, u32 status, bool on)
+{
+	u16 i;
+
+	WARN_ON(req & ~BCMA_CLKCTLST_EXTRESREQ);
+	WARN_ON(status & ~BCMA_CLKCTLST_EXTRESST);
+
+	if (on) {
+		bcma_set32(core, BCMA_CLKCTLST, req);
+		for (i = 0; i < 10000; i++) {
+			if ((bcma_read32(core, BCMA_CLKCTLST) & status) ==
+			    status) {
+				i = 0;
+				break;
+			}
+			udelay(10);
+		}
+		if (i)
+			bcma_err(core->bus, "PLL enable timeout\n");
+	} else {
+		/*
+		 * Mask the PLL but don't wait for it to be disabled. PLL may be
+		 * shared between cores and will be still up if there is another
+		 * core using it.
+		 */
+		bcma_mask32(core, BCMA_CLKCTLST, ~req);
+		bcma_read32(core, BCMA_CLKCTLST);
+	}
+}
+EXPORT_SYMBOL_GPL(bcma_core_pll_ctl);
+
+u32 bcma_core_dma_translation(struct bcma_device *core)
+{
+	switch (core->bus->hosttype) {
+	case BCMA_HOSTTYPE_SOC:
+		return 0;
+	case BCMA_HOSTTYPE_PCI:
+		if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64)
+			return BCMA_DMA_TRANSLATION_DMA64_CMT;
+		else
+			return BCMA_DMA_TRANSLATION_DMA32_CMT;
+	default:
+		bcma_err(core->bus, "DMA translation unknown for host %d\n",
+			 core->bus->hosttype);
+	}
+	return BCMA_DMA_TRANSLATION_NONE;
+}
+EXPORT_SYMBOL(bcma_core_dma_translation);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
new file mode 100644
index 0000000..62f5bfa
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon.c
@@ -0,0 +1,421 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon core driver
+ *
+ * Copyright 2005, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcm47xx_wdt.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
+					 u32 mask, u32 value)
+{
+	value &= mask;
+	value |= bcma_cc_read32(cc, offset) & ~mask;
+	bcma_cc_write32(cc, offset, value);
+
+	return value;
+}
+
+u32 bcma_chipco_get_alp_clock(struct bcma_drv_cc *cc)
+{
+	if (cc->capabilities & BCMA_CC_CAP_PMU)
+		return bcma_pmu_get_alp_clock(cc);
+
+	return 20000000;
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_get_alp_clock);
+
+static bool bcma_core_cc_has_pmu_watchdog(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (cc->capabilities & BCMA_CC_CAP_PMU) {
+		if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573) {
+			WARN(bus->chipinfo.rev <= 1, "No watchdog available\n");
+			/* 53573B0 and 53573B1 have bugged PMU watchdog. It can
+			 * be enabled but timer can't be bumped. Use CC one
+			 * instead.
+			 */
+			return false;
+		}
+		return true;
+	} else {
+		return false;
+	}
+}
+
+static u32 bcma_chipco_watchdog_get_max_timer(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 nb;
+
+	if (bcma_core_cc_has_pmu_watchdog(cc)) {
+		if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+			nb = 32;
+		else if (cc->core->id.rev < 26)
+			nb = 16;
+		else
+			nb = (cc->core->id.rev >= 37) ? 32 : 24;
+	} else {
+		nb = 28;
+	}
+	if (nb == 32)
+		return 0xffffffff;
+	else
+		return (1 << nb) - 1;
+}
+
+static u32 bcma_chipco_watchdog_timer_set_wdt(struct bcm47xx_wdt *wdt,
+					      u32 ticks)
+{
+	struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
+
+	return bcma_chipco_watchdog_timer_set(cc, ticks);
+}
+
+static u32 bcma_chipco_watchdog_timer_set_ms_wdt(struct bcm47xx_wdt *wdt,
+						 u32 ms)
+{
+	struct bcma_drv_cc *cc = bcm47xx_wdt_get_drvdata(wdt);
+	u32 ticks;
+
+	ticks = bcma_chipco_watchdog_timer_set(cc, cc->ticks_per_ms * ms);
+	return ticks / cc->ticks_per_ms;
+}
+
+static int bcma_chipco_watchdog_ticks_per_ms(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (cc->capabilities & BCMA_CC_CAP_PMU) {
+		if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+			/* 4706 CC and PMU watchdogs are clocked at 1/4 of ALP
+			 * clock
+			 */
+			return bcma_chipco_get_alp_clock(cc) / 4000;
+		else
+			/* based on 32KHz ILP clock */
+			return 32;
+	} else {
+		return bcma_chipco_get_alp_clock(cc) / 1000;
+	}
+}
+
+int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	struct bcm47xx_wdt wdt = {};
+	struct platform_device *pdev;
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53573 &&
+	    bus->chipinfo.rev <= 1) {
+		pr_debug("No watchdog on 53573A0 / 53573A1\n");
+		return 0;
+	}
+
+	wdt.driver_data = cc;
+	wdt.timer_set = bcma_chipco_watchdog_timer_set_wdt;
+	wdt.timer_set_ms = bcma_chipco_watchdog_timer_set_ms_wdt;
+	wdt.max_timer_ms =
+		bcma_chipco_watchdog_get_max_timer(cc) / cc->ticks_per_ms;
+
+	pdev = platform_device_register_data(NULL, "bcm47xx-wdt",
+					     bus->num, &wdt,
+					     sizeof(wdt));
+	if (IS_ERR(pdev))
+		return PTR_ERR(pdev);
+
+	cc->watchdog = pdev;
+
+	return 0;
+}
+
+static void bcma_core_chipcommon_flash_detect(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+	case BCMA_CC_FLASHT_STSER:
+	case BCMA_CC_FLASHT_ATSER:
+		bcma_debug(bus, "Found serial flash\n");
+		bcma_sflash_init(cc);
+		break;
+	case BCMA_CC_FLASHT_PARA:
+		bcma_debug(bus, "Found parallel flash\n");
+		bcma_pflash_init(cc);
+		break;
+	default:
+		bcma_err(bus, "Flash type not supported\n");
+	}
+
+	if (cc->core->id.rev == 38 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+		if (cc->capabilities & BCMA_CC_CAP_NFLASH) {
+			bcma_debug(bus, "Found NAND flash\n");
+			bcma_nflash_init(cc);
+		}
+	}
+}
+
+void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (cc->early_setup_done)
+		return;
+
+	spin_lock_init(&cc->gpio_lock);
+
+	if (cc->core->id.rev >= 11)
+		cc->status = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+	cc->capabilities = bcma_cc_read32(cc, BCMA_CC_CAP);
+	if (cc->core->id.rev >= 35)
+		cc->capabilities_ext = bcma_cc_read32(cc, BCMA_CC_CAP_EXT);
+
+	if (cc->capabilities & BCMA_CC_CAP_PMU)
+		bcma_pmu_early_init(cc);
+
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+		bcma_core_chipcommon_flash_detect(cc);
+
+	cc->early_setup_done = true;
+}
+
+void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
+{
+	u32 leddc_on = 10;
+	u32 leddc_off = 90;
+
+	if (cc->setup_done)
+		return;
+
+	bcma_core_chipcommon_early_init(cc);
+
+	if (cc->core->id.rev >= 20) {
+		u32 pullup = 0, pulldown = 0;
+
+		if (cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM43142) {
+			pullup = 0x402e0;
+			pulldown = 0x20500;
+		}
+
+		bcma_cc_write32(cc, BCMA_CC_GPIOPULLUP, pullup);
+		bcma_cc_write32(cc, BCMA_CC_GPIOPULLDOWN, pulldown);
+	}
+
+	if (cc->capabilities & BCMA_CC_CAP_PMU)
+		bcma_pmu_init(cc);
+	if (cc->capabilities & BCMA_CC_CAP_PCTL)
+		bcma_err(cc->core->bus, "Power control not implemented!\n");
+
+	if (cc->core->id.rev >= 16) {
+		if (cc->core->bus->sprom.leddc_on_time &&
+		    cc->core->bus->sprom.leddc_off_time) {
+			leddc_on = cc->core->bus->sprom.leddc_on_time;
+			leddc_off = cc->core->bus->sprom.leddc_off_time;
+		}
+		bcma_cc_write32(cc, BCMA_CC_GPIOTIMER,
+			((leddc_on << BCMA_CC_GPIOTIMER_ONTIME_SHIFT) |
+			 (leddc_off << BCMA_CC_GPIOTIMER_OFFTIME_SHIFT)));
+	}
+	cc->ticks_per_ms = bcma_chipco_watchdog_ticks_per_ms(cc);
+
+	cc->setup_done = true;
+}
+
+/* Set chip watchdog reset timer to fire in 'ticks' backplane cycles */
+u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
+{
+	u32 maxt;
+
+	maxt = bcma_chipco_watchdog_get_max_timer(cc);
+	if (bcma_core_cc_has_pmu_watchdog(cc)) {
+		if (ticks == 1)
+			ticks = 2;
+		else if (ticks > maxt)
+			ticks = maxt;
+		bcma_pmu_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
+	} else {
+		struct bcma_bus *bus = cc->core->bus;
+
+		if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
+		    bus->chipinfo.id != BCMA_CHIP_ID_BCM47094 &&
+		    bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
+			bcma_core_set_clockmode(cc->core,
+						ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
+
+		if (ticks > maxt)
+			ticks = maxt;
+		/* instant NMI */
+		bcma_cc_write32(cc, BCMA_CC_WATCHDOG, ticks);
+	}
+	return ticks;
+}
+
+void bcma_chipco_irq_mask(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	bcma_cc_write32_masked(cc, BCMA_CC_IRQMASK, mask, value);
+}
+
+u32 bcma_chipco_irq_status(struct bcma_drv_cc *cc, u32 mask)
+{
+	return bcma_cc_read32(cc, BCMA_CC_IRQSTAT) & mask;
+}
+
+u32 bcma_chipco_gpio_in(struct bcma_drv_cc *cc, u32 mask)
+{
+	return bcma_cc_read32(cc, BCMA_CC_GPIOIN) & mask;
+}
+
+u32 bcma_chipco_gpio_out(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUT, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_gpio_out);
+
+u32 bcma_chipco_gpio_outen(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOOUTEN, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_gpio_outen);
+
+/*
+ * If the bit is set to 0, chipcommon controlls this GPIO,
+ * if the bit is set to 1, it is used by some part of the chip and not our code.
+ */
+u32 bcma_chipco_gpio_control(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOCTL, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_gpio_control);
+
+u32 bcma_chipco_gpio_intmask(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOIRQ, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+
+u32 bcma_chipco_gpio_polarity(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPOL, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+
+u32 bcma_chipco_gpio_pullup(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	if (cc->core->id.rev < 20)
+		return 0;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLUP, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+
+u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
+{
+	unsigned long flags;
+	u32 res;
+
+	if (cc->core->id.rev < 20)
+		return 0;
+
+	spin_lock_irqsave(&cc->gpio_lock, flags);
+	res = bcma_cc_write32_masked(cc, BCMA_CC_GPIOPULLDOWN, mask, value);
+	spin_unlock_irqrestore(&cc->gpio_lock, flags);
+
+	return res;
+}
+
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+{
+	unsigned int irq;
+	u32 baud_base;
+	u32 i;
+	unsigned int ccrev = cc->core->id.rev;
+	struct bcma_serial_port *ports = cc->serial_ports;
+
+	if (ccrev >= 11 && ccrev != 15) {
+		baud_base = bcma_chipco_get_alp_clock(cc);
+		if (ccrev >= 21) {
+			/* Turn off UART clock before switching clocksource. */
+			bcma_cc_write32(cc, BCMA_CC_CORECTL,
+				       bcma_cc_read32(cc, BCMA_CC_CORECTL)
+				       & ~BCMA_CC_CORECTL_UARTCLKEN);
+		}
+		/* Set the override bit so we don't divide it */
+		bcma_cc_write32(cc, BCMA_CC_CORECTL,
+			       bcma_cc_read32(cc, BCMA_CC_CORECTL)
+			       | BCMA_CC_CORECTL_UARTCLK0);
+		if (ccrev >= 21) {
+			/* Re-enable the UART clock. */
+			bcma_cc_write32(cc, BCMA_CC_CORECTL,
+				       bcma_cc_read32(cc, BCMA_CC_CORECTL)
+				       | BCMA_CC_CORECTL_UARTCLKEN);
+		}
+	} else {
+		bcma_err(cc->core->bus, "serial not supported on this device ccrev: 0x%x\n",
+			 ccrev);
+		return;
+	}
+
+	irq = bcma_core_irq(cc->core, 0);
+
+	/* Determine the registers of the UARTs */
+	cc->nr_serial_ports = (cc->capabilities & BCMA_CC_CAP_NRUART);
+	for (i = 0; i < cc->nr_serial_ports; i++) {
+		ports[i].regs = cc->core->io_addr + BCMA_CC_UART0_DATA +
+				(i * 256);
+		ports[i].irq = irq;
+		ports[i].baud_base = baud_base;
+		ports[i].reg_shift = 0;
+	}
+}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_chipcommon_b.c b/drivers/bcma/driver_chipcommon_b.c
new file mode 100644
index 0000000..57f10b5
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_b.c
@@ -0,0 +1,62 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon B Unit driver
+ *
+ * Copyright 2014, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask,
+			  u32 value, int timeout)
+{
+	unsigned long deadline = jiffies + timeout;
+	u32 val;
+
+	do {
+		val = readl(addr);
+		if ((val & mask) == value)
+			return true;
+		cpu_relax();
+		udelay(10);
+	} while (!time_after_eq(jiffies, deadline));
+
+	bcma_err(bus, "Timeout waiting for register %p\n", addr);
+
+	return false;
+}
+
+void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value)
+{
+	struct bcma_bus *bus = ccb->core->bus;
+	void __iomem *mii = ccb->mii;
+
+	writel(offset, mii + BCMA_CCB_MII_MNG_CTL);
+	bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
+	writel(value, mii + BCMA_CCB_MII_MNG_CMD_DATA);
+	bcma_wait_reg(bus, mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, 100);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write);
+
+int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb)
+{
+	if (ccb->setup_done)
+		return 0;
+
+	ccb->setup_done = 1;
+	ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE);
+	if (!ccb->mii)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb)
+{
+	if (ccb->mii)
+		iounmap(ccb->mii);
+}
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c
new file mode 100644
index 0000000..d4f699a
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_nflash.c
@@ -0,0 +1,44 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon NAND flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+struct platform_device bcma_nflash_dev = {
+	.name		= "bcma_nflash",
+	.num_resources	= 0,
+};
+
+/* Initialize NAND flash access */
+int bcma_nflash_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
+	    cc->core->id.rev != 38) {
+		bcma_err(bus, "NAND flash on unsupported board!\n");
+		return -ENOTSUPP;
+	}
+
+	if (!(cc->capabilities & BCMA_CC_CAP_NFLASH)) {
+		bcma_err(bus, "NAND flash not present according to ChipCommon\n");
+		return -ENODEV;
+	}
+
+	cc->nflash.present = true;
+	if (cc->core->id.rev == 38 &&
+	    (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT))
+		cc->nflash.boot = true;
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_nflash_dev.dev.platform_data = &cc->nflash;
+
+	return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pflash.c b/drivers/bcma/driver_chipcommon_pflash.c
new file mode 100644
index 0000000..3b497c9
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_pflash.c
@@ -0,0 +1,49 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon parallel flash
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/mtd/physmap.h>
+#include <linux/platform_device.h>
+
+static const char * const part_probes[] = { "bcm47xxpart", NULL };
+
+static struct physmap_flash_data bcma_pflash_data = {
+	.part_probe_types	= part_probes,
+};
+
+static struct resource bcma_pflash_resource = {
+	.name	= "bcma_pflash",
+	.flags  = IORESOURCE_MEM,
+};
+
+struct platform_device bcma_pflash_dev = {
+	.name		= "physmap-flash",
+	.dev		= {
+		.platform_data  = &bcma_pflash_data,
+	},
+	.resource	= &bcma_pflash_resource,
+	.num_resources	= 1,
+};
+
+int bcma_pflash_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_pflash *pflash = &cc->pflash;
+
+	pflash->present = true;
+
+	if (!(bcma_read32(cc->core, BCMA_CC_FLASH_CFG) & BCMA_CC_FLASH_CFG_DS))
+		bcma_pflash_data.width = 1;
+	else
+		bcma_pflash_data.width = 2;
+
+	bcma_pflash_resource.start = BCMA_SOC_FLASH2;
+	bcma_pflash_resource.end = BCMA_SOC_FLASH2 + BCMA_SOC_FLASH2_SZ;
+
+	return 0;
+}
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
new file mode 100644
index 0000000..f416106
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -0,0 +1,662 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon Power Management Unit driver
+ *
+ * Copyright 2009, Michael Buesch <m@bues.ch>
+ * Copyright 2007, 2011, Broadcom Corporation
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+u32 bcma_chipco_pll_read(struct bcma_drv_cc *cc, u32 offset)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+	bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+	return bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_read);
+
+void bcma_chipco_pll_write(struct bcma_drv_cc *cc, u32 offset, u32 value)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+	bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_write);
+
+void bcma_chipco_pll_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+			     u32 set)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+	bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_ADDR);
+	bcma_pmu_maskset32(cc, BCMA_CC_PMU_PLLCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_pll_maskset);
+
+void bcma_chipco_chipctl_maskset(struct bcma_drv_cc *cc,
+				 u32 offset, u32 mask, u32 set)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_CHIPCTL_ADDR, offset);
+	bcma_pmu_read32(cc, BCMA_CC_PMU_CHIPCTL_ADDR);
+	bcma_pmu_maskset32(cc, BCMA_CC_PMU_CHIPCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_chipctl_maskset);
+
+void bcma_chipco_regctl_maskset(struct bcma_drv_cc *cc, u32 offset, u32 mask,
+				u32 set)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_REGCTL_ADDR, offset);
+	bcma_pmu_read32(cc, BCMA_CC_PMU_REGCTL_ADDR);
+	bcma_pmu_maskset32(cc, BCMA_CC_PMU_REGCTL_DATA, mask, set);
+}
+EXPORT_SYMBOL_GPL(bcma_chipco_regctl_maskset);
+
+static u32 bcma_pmu_xtalfreq(struct bcma_drv_cc *cc)
+{
+	u32 ilp_ctl, alp_hz;
+
+	if (!(bcma_pmu_read32(cc, BCMA_CC_PMU_STAT) &
+	      BCMA_CC_PMU_STAT_EXT_LPO_AVAIL))
+		return 0;
+
+	bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ,
+			 BIT(BCMA_CC_PMU_XTAL_FREQ_MEASURE_SHIFT));
+	usleep_range(1000, 2000);
+
+	ilp_ctl = bcma_pmu_read32(cc, BCMA_CC_PMU_XTAL_FREQ);
+	ilp_ctl &= BCMA_CC_PMU_XTAL_FREQ_ILPCTL_MASK;
+
+	bcma_pmu_write32(cc, BCMA_CC_PMU_XTAL_FREQ, 0);
+
+	alp_hz = ilp_ctl * 32768 / 4;
+	return (alp_hz + 50000) / 100000 * 100;
+}
+
+static void bcma_pmu2_pll_init0(struct bcma_drv_cc *cc, u32 xtalfreq)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 freq_tgt_target = 0, freq_tgt_current;
+	u32 pll0, mask;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM43142:
+		/* pmu2_xtaltab0_adfll_485 */
+		switch (xtalfreq) {
+		case 12000:
+			freq_tgt_target = 0x50D52;
+			break;
+		case 20000:
+			freq_tgt_target = 0x307FE;
+			break;
+		case 26000:
+			freq_tgt_target = 0x254EA;
+			break;
+		case 37400:
+			freq_tgt_target = 0x19EF8;
+			break;
+		case 52000:
+			freq_tgt_target = 0x12A75;
+			break;
+		}
+		break;
+	}
+
+	if (!freq_tgt_target) {
+		bcma_err(bus, "Unknown TGT frequency for xtalfreq %d\n",
+			 xtalfreq);
+		return;
+	}
+
+	pll0 = bcma_chipco_pll_read(cc, BCMA_CC_PMU15_PLL_PLLCTL0);
+	freq_tgt_current = (pll0 & BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK) >>
+		BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
+
+	if (freq_tgt_current == freq_tgt_target) {
+		bcma_debug(bus, "Target TGT frequency already set\n");
+		return;
+	}
+
+	/* Turn off PLL */
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM43142:
+		mask = (u32)~(BCMA_RES_4314_HT_AVAIL |
+			      BCMA_RES_4314_MACPHY_CLK_AVAIL);
+
+		bcma_pmu_mask32(cc, BCMA_CC_PMU_MINRES_MSK, mask);
+		bcma_pmu_mask32(cc, BCMA_CC_PMU_MAXRES_MSK, mask);
+		bcma_wait_value(cc->core, BCMA_CLKCTLST,
+				BCMA_CLKCTLST_HAVEHT, 0, 20000);
+		break;
+	}
+
+	pll0 &= ~BCMA_CC_PMU15_PLL_PC0_FREQTGT_MASK;
+	pll0 |= freq_tgt_target << BCMA_CC_PMU15_PLL_PC0_FREQTGT_SHIFT;
+	bcma_chipco_pll_write(cc, BCMA_CC_PMU15_PLL_PLLCTL0, pll0);
+
+	/* Flush */
+	if (cc->pmu.rev >= 2)
+		bcma_pmu_set32(cc, BCMA_CC_PMU_CTL, BCMA_CC_PMU_CTL_PLL_UPD);
+
+	/* TODO: Do we need to update OTP? */
+}
+
+static void bcma_pmu_pll_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 xtalfreq = bcma_pmu_xtalfreq(cc);
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM43142:
+		if (xtalfreq == 0)
+			xtalfreq = 20000;
+		bcma_pmu2_pll_init0(cc, xtalfreq);
+		break;
+	}
+}
+
+static void bcma_pmu_resources_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 min_msk = 0, max_msk = 0;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4313:
+		min_msk = 0x200D;
+		max_msk = 0xFFFF;
+		break;
+	case BCMA_CHIP_ID_BCM43142:
+		min_msk = BCMA_RES_4314_LPLDO_PU |
+			  BCMA_RES_4314_PMU_SLEEP_DIS |
+			  BCMA_RES_4314_PMU_BG_PU |
+			  BCMA_RES_4314_CBUCK_LPOM_PU |
+			  BCMA_RES_4314_CBUCK_PFM_PU |
+			  BCMA_RES_4314_CLDO_PU |
+			  BCMA_RES_4314_LPLDO2_LVM |
+			  BCMA_RES_4314_WL_PMU_PU |
+			  BCMA_RES_4314_LDO3P3_PU |
+			  BCMA_RES_4314_OTP_PU |
+			  BCMA_RES_4314_WL_PWRSW_PU |
+			  BCMA_RES_4314_LQ_AVAIL |
+			  BCMA_RES_4314_LOGIC_RET |
+			  BCMA_RES_4314_MEM_SLEEP |
+			  BCMA_RES_4314_MACPHY_RET |
+			  BCMA_RES_4314_WL_CORE_READY;
+		max_msk = 0x3FFFFFFF;
+		break;
+	default:
+		bcma_debug(bus, "PMU resource config unknown or not needed for device 0x%04X\n",
+			   bus->chipinfo.id);
+	}
+
+	/* Set the resource masks. */
+	if (min_msk)
+		bcma_pmu_write32(cc, BCMA_CC_PMU_MINRES_MSK, min_msk);
+	if (max_msk)
+		bcma_pmu_write32(cc, BCMA_CC_PMU_MAXRES_MSK, max_msk);
+
+	/*
+	 * Add some delay; allow resources to come up and settle.
+	 * Delay is required for SoC (early init).
+	 */
+	usleep_range(2000, 2500);
+}
+
+/* Disable to allow reading SPROM. Don't know the adventages of enabling it. */
+void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 val;
+
+	val = bcma_cc_read32(cc, BCMA_CC_CHIPCTL);
+	if (enable) {
+		val |= BCMA_CHIPCTL_4331_EXTPA_EN;
+		if (bus->chipinfo.pkg == 9 || bus->chipinfo.pkg == 11)
+			val |= BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+		else if (bus->chipinfo.rev > 0)
+			val |= BCMA_CHIPCTL_4331_EXTPA_EN2;
+	} else {
+		val &= ~BCMA_CHIPCTL_4331_EXTPA_EN;
+		val &= ~BCMA_CHIPCTL_4331_EXTPA_EN2;
+		val &= ~BCMA_CHIPCTL_4331_EXTPA_ON_GPIO2_5;
+	}
+	bcma_cc_write32(cc, BCMA_CC_CHIPCTL, val);
+}
+
+static void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4313:
+		/* enable 12 mA drive strenth for 4313 and set chipControl
+		   register bit 1 */
+		bcma_chipco_chipctl_maskset(cc, 0,
+					    ~BCMA_CCTRL_4313_12MA_LED_DRIVE,
+					    BCMA_CCTRL_4313_12MA_LED_DRIVE);
+		break;
+	case BCMA_CHIP_ID_BCM4331:
+	case BCMA_CHIP_ID_BCM43431:
+		/* Ext PA lines must be enabled for tx on BCM4331 */
+		bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
+		break;
+	case BCMA_CHIP_ID_BCM43224:
+	case BCMA_CHIP_ID_BCM43421:
+		/* enable 12 mA drive strenth for 43224 and set chipControl
+		   register bit 15 */
+		if (bus->chipinfo.rev == 0) {
+			bcma_cc_maskset32(cc, BCMA_CC_CHIPCTL,
+					  ~BCMA_CCTRL_43224_GPIO_TOGGLE,
+					  BCMA_CCTRL_43224_GPIO_TOGGLE);
+			bcma_chipco_chipctl_maskset(cc, 0,
+						    ~BCMA_CCTRL_43224A0_12MA_LED_DRIVE,
+						    BCMA_CCTRL_43224A0_12MA_LED_DRIVE);
+		} else {
+			bcma_chipco_chipctl_maskset(cc, 0,
+						    ~BCMA_CCTRL_43224B0_12MA_LED_DRIVE,
+						    BCMA_CCTRL_43224B0_12MA_LED_DRIVE);
+		}
+		break;
+	default:
+		bcma_debug(bus, "Workarounds unknown or not needed for device 0x%04X\n",
+			   bus->chipinfo.id);
+	}
+}
+
+void bcma_pmu_early_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	u32 pmucap;
+
+	if (cc->core->id.rev >= 35 &&
+	    cc->capabilities_ext & BCMA_CC_CAP_EXT_AOB_PRESENT) {
+		cc->pmu.core = bcma_find_core(bus, BCMA_CORE_PMU);
+		if (!cc->pmu.core)
+			bcma_warn(bus, "Couldn't find expected PMU core");
+	}
+	if (!cc->pmu.core)
+		cc->pmu.core = cc->core;
+
+	pmucap = bcma_pmu_read32(cc, BCMA_CC_PMU_CAP);
+	cc->pmu.rev = (pmucap & BCMA_CC_PMU_CAP_REVISION);
+
+	bcma_debug(bus, "Found rev %u PMU (capabilities 0x%08X)\n", cc->pmu.rev,
+		   pmucap);
+}
+
+void bcma_pmu_init(struct bcma_drv_cc *cc)
+{
+	if (cc->pmu.rev == 1)
+		bcma_pmu_mask32(cc, BCMA_CC_PMU_CTL,
+				~BCMA_CC_PMU_CTL_NOILPONW);
+	else
+		bcma_pmu_set32(cc, BCMA_CC_PMU_CTL,
+			       BCMA_CC_PMU_CTL_NOILPONW);
+
+	bcma_pmu_pll_init(cc);
+	bcma_pmu_resources_init(cc);
+	bcma_pmu_workarounds(cc);
+}
+
+u32 bcma_pmu_get_alp_clock(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4313:
+	case BCMA_CHIP_ID_BCM43224:
+	case BCMA_CHIP_ID_BCM43225:
+	case BCMA_CHIP_ID_BCM43227:
+	case BCMA_CHIP_ID_BCM43228:
+	case BCMA_CHIP_ID_BCM4331:
+	case BCMA_CHIP_ID_BCM43421:
+	case BCMA_CHIP_ID_BCM43428:
+	case BCMA_CHIP_ID_BCM43431:
+	case BCMA_CHIP_ID_BCM4716:
+	case BCMA_CHIP_ID_BCM47162:
+	case BCMA_CHIP_ID_BCM4748:
+	case BCMA_CHIP_ID_BCM4749:
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM53572:
+	case BCMA_CHIP_ID_BCM6362:
+		/* always 20Mhz */
+		return 20000 * 1000;
+	case BCMA_CHIP_ID_BCM4706:
+	case BCMA_CHIP_ID_BCM5356:
+		/* always 25Mhz */
+		return 25000 * 1000;
+	case BCMA_CHIP_ID_BCM43460:
+	case BCMA_CHIP_ID_BCM4352:
+	case BCMA_CHIP_ID_BCM4360:
+		if (cc->status & BCMA_CC_CHIPST_4360_XTAL_40MZ)
+			return 40000 * 1000;
+		else
+			return 20000 * 1000;
+	default:
+		bcma_warn(bus, "No ALP clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+			  bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_ALP_CLOCK);
+	}
+	return BCMA_CC_PMU_ALP_CLOCK;
+}
+
+/* Find the output of the "m" pll divider given pll controls that start with
+ * pllreg "pll0" i.e. 12 for main 6 for phy, 0 for misc.
+ */
+static u32 bcma_pmu_pll_clock(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+	u32 tmp, div, ndiv, p1, p2, fc;
+	struct bcma_bus *bus = cc->core->bus;
+
+	BUG_ON((pll0 & 3) || (pll0 > BCMA_CC_PMU4716_MAINPLL_PLL0));
+
+	BUG_ON(!m || m > 4);
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) {
+		/* Detect failure in clock setting */
+		tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+		if (tmp & 0x40000)
+			return 133 * 1000000;
+	}
+
+	tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_P1P2_OFF);
+	p1 = (tmp & BCMA_CC_PPL_P1_MASK) >> BCMA_CC_PPL_P1_SHIFT;
+	p2 = (tmp & BCMA_CC_PPL_P2_MASK) >> BCMA_CC_PPL_P2_SHIFT;
+
+	tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_M14_OFF);
+	div = (tmp >> ((m - 1) * BCMA_CC_PPL_MDIV_WIDTH)) &
+		BCMA_CC_PPL_MDIV_MASK;
+
+	tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PPL_NM5_OFF);
+	ndiv = (tmp & BCMA_CC_PPL_NDIV_MASK) >> BCMA_CC_PPL_NDIV_SHIFT;
+
+	/* Do calculation in Mhz */
+	fc = bcma_pmu_get_alp_clock(cc) / 1000000;
+	fc = (p1 * ndiv * fc) / p2;
+
+	/* Return clock in Hertz */
+	return (fc / div) * 1000000;
+}
+
+static u32 bcma_pmu_pll_clock_bcm4706(struct bcma_drv_cc *cc, u32 pll0, u32 m)
+{
+	u32 tmp, ndiv, p1div, p2div;
+	u32 clock;
+
+	BUG_ON(!m || m > 4);
+
+	/* Get N, P1 and P2 dividers to determine CPU clock */
+	tmp = bcma_chipco_pll_read(cc, pll0 + BCMA_CC_PMU6_4706_PROCPLL_OFF);
+	ndiv = (tmp & BCMA_CC_PMU6_4706_PROC_NDIV_INT_MASK)
+		>> BCMA_CC_PMU6_4706_PROC_NDIV_INT_SHIFT;
+	p1div = (tmp & BCMA_CC_PMU6_4706_PROC_P1DIV_MASK)
+		>> BCMA_CC_PMU6_4706_PROC_P1DIV_SHIFT;
+	p2div = (tmp & BCMA_CC_PMU6_4706_PROC_P2DIV_MASK)
+		>> BCMA_CC_PMU6_4706_PROC_P2DIV_SHIFT;
+
+	tmp = bcma_cc_read32(cc, BCMA_CC_CHIPSTAT);
+	if (tmp & BCMA_CC_CHIPST_4706_PKG_OPTION)
+		/* Low cost bonding: Fixed reference clock 25MHz and m = 4 */
+		clock = (25000000 / 4) * ndiv * p2div / p1div;
+	else
+		/* Fixed reference clock 25MHz and m = 2 */
+		clock = (25000000 / 2) * ndiv * p2div / p1div;
+
+	if (m == BCMA_CC_PMU5_MAINPLL_SSB)
+		clock = clock / 4;
+
+	return clock;
+}
+
+/* query bus clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_bus_clock(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4716:
+	case BCMA_CHIP_ID_BCM4748:
+	case BCMA_CHIP_ID_BCM47162:
+		return bcma_pmu_pll_clock(cc, BCMA_CC_PMU4716_MAINPLL_PLL0,
+					  BCMA_CC_PMU5_MAINPLL_SSB);
+	case BCMA_CHIP_ID_BCM5356:
+		return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5356_MAINPLL_PLL0,
+					  BCMA_CC_PMU5_MAINPLL_SSB);
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM4749:
+		return bcma_pmu_pll_clock(cc, BCMA_CC_PMU5357_MAINPLL_PLL0,
+					  BCMA_CC_PMU5_MAINPLL_SSB);
+	case BCMA_CHIP_ID_BCM4706:
+		return bcma_pmu_pll_clock_bcm4706(cc,
+						  BCMA_CC_PMU4706_MAINPLL_PLL0,
+						  BCMA_CC_PMU5_MAINPLL_SSB);
+	case BCMA_CHIP_ID_BCM53572:
+		return 75000000;
+	default:
+		bcma_warn(bus, "No bus clock specified for %04X device, pmu rev. %d, using default %d Hz\n",
+			  bus->chipinfo.id, cc->pmu.rev, BCMA_CC_PMU_HT_CLOCK);
+	}
+	return BCMA_CC_PMU_HT_CLOCK;
+}
+EXPORT_SYMBOL_GPL(bcma_pmu_get_bus_clock);
+
+/* query cpu clock frequency for PMU-enabled chipcommon */
+u32 bcma_pmu_get_cpu_clock(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM53572)
+		return 300000000;
+
+	/* New PMUs can have different clock for bus and CPU */
+	if (cc->pmu.rev >= 5) {
+		u32 pll;
+		switch (bus->chipinfo.id) {
+		case BCMA_CHIP_ID_BCM4706:
+			return bcma_pmu_pll_clock_bcm4706(cc,
+						BCMA_CC_PMU4706_MAINPLL_PLL0,
+						BCMA_CC_PMU5_MAINPLL_CPU);
+		case BCMA_CHIP_ID_BCM5356:
+			pll = BCMA_CC_PMU5356_MAINPLL_PLL0;
+			break;
+		case BCMA_CHIP_ID_BCM5357:
+		case BCMA_CHIP_ID_BCM4749:
+			pll = BCMA_CC_PMU5357_MAINPLL_PLL0;
+			break;
+		default:
+			pll = BCMA_CC_PMU4716_MAINPLL_PLL0;
+			break;
+		}
+
+		return bcma_pmu_pll_clock(cc, pll, BCMA_CC_PMU5_MAINPLL_CPU);
+	}
+
+	/* On old PMUs CPU has the same clock as the bus */
+	return bcma_pmu_get_bus_clock(cc);
+}
+
+static void bcma_pmu_spuravoid_pll_write(struct bcma_drv_cc *cc, u32 offset,
+					 u32 value)
+{
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR, offset);
+	bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, value);
+}
+
+void bcma_pmu_spuravoid_pllupdate(struct bcma_drv_cc *cc, int spuravoid)
+{
+	u32 tmp = 0;
+	u8 phypll_offset = 0;
+	u8 bcm5357_bcm43236_p1div[] = {0x1, 0x5, 0x5};
+	u8 bcm5357_bcm43236_ndiv[] = {0x30, 0xf6, 0xfc};
+	struct bcma_bus *bus = cc->core->bus;
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM4749:
+	case BCMA_CHIP_ID_BCM53572:
+		/* 5357[ab]0, 43236[ab]0, and 6362b0 */
+
+		/* BCM5357 needs to touch PLL1_PLLCTL[02],
+		   so offset PLL0_PLLCTL[02] by 6 */
+		phypll_offset = (bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+		       bus->chipinfo.id == BCMA_CHIP_ID_BCM4749 ||
+		       bus->chipinfo.id == BCMA_CHIP_ID_BCM53572) ? 6 : 0;
+
+		/* RMW only the P1 divider */
+		bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
+				BCMA_CC_PMU_PLL_CTL0 + phypll_offset);
+		tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
+		tmp &= (~(BCMA_CC_PMU1_PLL0_PC0_P1DIV_MASK));
+		tmp |= (bcm5357_bcm43236_p1div[spuravoid] << BCMA_CC_PMU1_PLL0_PC0_P1DIV_SHIFT);
+		bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
+
+		/* RMW only the int feedback divider */
+		bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_ADDR,
+				BCMA_CC_PMU_PLL_CTL2 + phypll_offset);
+		tmp = bcma_pmu_read32(cc, BCMA_CC_PMU_PLLCTL_DATA);
+		tmp &= ~(BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_MASK);
+		tmp |= (bcm5357_bcm43236_ndiv[spuravoid]) << BCMA_CC_PMU1_PLL0_PC2_NDIV_INT_SHIFT;
+		bcma_pmu_write32(cc, BCMA_CC_PMU_PLLCTL_DATA, tmp);
+
+		tmp = BCMA_CC_PMU_CTL_PLL_UPD;
+		break;
+
+	case BCMA_CHIP_ID_BCM4331:
+	case BCMA_CHIP_ID_BCM43431:
+		if (spuravoid == 2) {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11500014);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x0FC00a08);
+		} else if (spuravoid == 1) {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11500014);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x0F600a08);
+		} else {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11100014);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x03000a08);
+		}
+		tmp = BCMA_CC_PMU_CTL_PLL_UPD;
+		break;
+
+	case BCMA_CHIP_ID_BCM43224:
+	case BCMA_CHIP_ID_BCM43225:
+	case BCMA_CHIP_ID_BCM43421:
+		if (spuravoid == 1) {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11500010);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x000C0C06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x0F600a08);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x2001E920);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		} else {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11100010);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x000c0c06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x03000a08);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x200005c0);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		}
+		tmp = BCMA_CC_PMU_CTL_PLL_UPD;
+		break;
+
+	case BCMA_CHIP_ID_BCM4716:
+	case BCMA_CHIP_ID_BCM4748:
+	case BCMA_CHIP_ID_BCM47162:
+		if (spuravoid == 1) {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11500060);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x080C0C06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x0F600000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x2001E924);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		} else {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11100060);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x080c0c06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x03000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x200005c0);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		}
+
+		tmp = BCMA_CC_PMU_CTL_PLL_UPD | BCMA_CC_PMU_CTL_NOILPONW;
+		break;
+
+	case BCMA_CHIP_ID_BCM43131:
+	case BCMA_CHIP_ID_BCM43217:
+	case BCMA_CHIP_ID_BCM43227:
+	case BCMA_CHIP_ID_BCM43228:
+	case BCMA_CHIP_ID_BCM43428:
+		/* LCNXN */
+		/* PLL Settings for spur avoidance on/off mode,
+		   no on2 support for 43228A0 */
+		if (spuravoid == 1) {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x01100014);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x040C0C06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x03140A08);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00333333);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x202C2820);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		} else {
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL0,
+						     0x11100014);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL1,
+						     0x040c0c06);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL2,
+						     0x03000a08);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL3,
+						     0x00000000);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL4,
+						     0x200005c0);
+			bcma_pmu_spuravoid_pll_write(cc, BCMA_CC_PMU_PLL_CTL5,
+						     0x88888815);
+		}
+		tmp = BCMA_CC_PMU_CTL_PLL_UPD;
+		break;
+	default:
+		bcma_err(bus, "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
+			 bus->chipinfo.id);
+		break;
+	}
+
+	tmp |= bcma_pmu_read32(cc, BCMA_CC_PMU_CTL);
+	bcma_pmu_write32(cc, BCMA_CC_PMU_CTL, tmp);
+}
+EXPORT_SYMBOL_GPL(bcma_pmu_spuravoid_pllupdate);
diff --git a/drivers/bcma/driver_chipcommon_sflash.c b/drivers/bcma/driver_chipcommon_sflash.c
new file mode 100644
index 0000000..35b13a0
--- /dev/null
+++ b/drivers/bcma/driver_chipcommon_sflash.c
@@ -0,0 +1,165 @@
+/*
+ * Broadcom specific AMBA
+ * ChipCommon serial flash interface
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+static struct resource bcma_sflash_resource = {
+	.name	= "bcma_sflash",
+	.start	= BCMA_SOC_FLASH2,
+	.end	= 0,
+	.flags  = IORESOURCE_MEM | IORESOURCE_READONLY,
+};
+
+struct platform_device bcma_sflash_dev = {
+	.name		= "bcma_sflash",
+	.resource	= &bcma_sflash_resource,
+	.num_resources	= 1,
+};
+
+struct bcma_sflash_tbl_e {
+	char *name;
+	u32 id;
+	u32 blocksize;
+	u16 numblocks;
+};
+
+static const struct bcma_sflash_tbl_e bcma_sflash_st_tbl[] = {
+	{ "M25P20", 0x11, 0x10000, 4, },
+	{ "M25P40", 0x12, 0x10000, 8, },
+
+	{ "M25P16", 0x14, 0x10000, 32, },
+	{ "M25P32", 0x15, 0x10000, 64, },
+	{ "M25P64", 0x16, 0x10000, 128, },
+	{ "M25FL128", 0x17, 0x10000, 256, },
+	{ "MX25L25635F", 0x18, 0x10000, 512, },
+	{ NULL },
+};
+
+static const struct bcma_sflash_tbl_e bcma_sflash_sst_tbl[] = {
+	{ "SST25WF512", 1, 0x1000, 16, },
+	{ "SST25VF512", 0x48, 0x1000, 16, },
+	{ "SST25WF010", 2, 0x1000, 32, },
+	{ "SST25VF010", 0x49, 0x1000, 32, },
+	{ "SST25WF020", 3, 0x1000, 64, },
+	{ "SST25VF020", 0x43, 0x1000, 64, },
+	{ "SST25WF040", 4, 0x1000, 128, },
+	{ "SST25VF040", 0x44, 0x1000, 128, },
+	{ "SST25VF040B", 0x8d, 0x1000, 128, },
+	{ "SST25WF080", 5, 0x1000, 256, },
+	{ "SST25VF080B", 0x8e, 0x1000, 256, },
+	{ "SST25VF016", 0x41, 0x1000, 512, },
+	{ "SST25VF032", 0x4a, 0x1000, 1024, },
+	{ "SST25VF064", 0x4b, 0x1000, 2048, },
+	{ NULL },
+};
+
+static const struct bcma_sflash_tbl_e bcma_sflash_at_tbl[] = {
+	{ "AT45DB011", 0xc, 256, 512, },
+	{ "AT45DB021", 0x14, 256, 1024, },
+	{ "AT45DB041", 0x1c, 256, 2048, },
+	{ "AT45DB081", 0x24, 256, 4096, },
+	{ "AT45DB161", 0x2c, 512, 4096, },
+	{ "AT45DB321", 0x34, 512, 8192, },
+	{ "AT45DB642", 0x3c, 1024, 8192, },
+	{ NULL },
+};
+
+static void bcma_sflash_cmd(struct bcma_drv_cc *cc, u32 opcode)
+{
+	int i;
+	bcma_cc_write32(cc, BCMA_CC_FLASHCTL,
+			BCMA_CC_FLASHCTL_START | opcode);
+	for (i = 0; i < 1000; i++) {
+		if (!(bcma_cc_read32(cc, BCMA_CC_FLASHCTL) &
+		      BCMA_CC_FLASHCTL_BUSY))
+			return;
+		cpu_relax();
+	}
+	bcma_err(cc->core->bus, "SFLASH control command failed (timeout)!\n");
+}
+
+/* Initialize serial flash access */
+int bcma_sflash_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	struct bcma_sflash *sflash = &cc->sflash;
+	const struct bcma_sflash_tbl_e *e;
+	u32 id, id2;
+
+	switch (cc->capabilities & BCMA_CC_CAP_FLASHT) {
+	case BCMA_CC_FLASHT_STSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_DP);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 0);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		bcma_cc_write32(cc, BCMA_CC_FLASHADDR, 1);
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_ST_RES);
+		id2 = bcma_cc_read32(cc, BCMA_CC_FLASHDATA);
+
+		switch (id) {
+		case 0xbf:
+			for (e = bcma_sflash_sst_tbl; e->name; e++) {
+				if (e->id == id2)
+					break;
+			}
+			break;
+		case 0x13:
+			return -ENOTSUPP;
+		default:
+			for (e = bcma_sflash_st_tbl; e->name; e++) {
+				if (e->id == id)
+					break;
+			}
+			break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported ST serial flash (id: 0x%X, id2: 0x%X)\n", id, id2);
+			return -ENOTSUPP;
+		}
+
+		break;
+	case BCMA_CC_FLASHT_ATSER:
+		bcma_sflash_cmd(cc, BCMA_CC_FLASHCTL_AT_STATUS);
+		id = bcma_cc_read32(cc, BCMA_CC_FLASHDATA) & 0x3c;
+
+		for (e = bcma_sflash_at_tbl; e->name; e++) {
+			if (e->id == id)
+				break;
+		}
+		if (!e->name) {
+			bcma_err(bus, "Unsupported Atmel serial flash (id: 0x%X)\n", id);
+			return -ENOTSUPP;
+		}
+
+		break;
+	default:
+		bcma_err(bus, "Unsupported flash type\n");
+		return -ENOTSUPP;
+	}
+
+	sflash->blocksize = e->blocksize;
+	sflash->numblocks = e->numblocks;
+	sflash->size = sflash->blocksize * sflash->numblocks;
+	sflash->present = true;
+
+	bcma_info(bus, "Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+		  e->name, sflash->size / 1024, sflash->blocksize,
+		  sflash->numblocks);
+
+	/* Prepare platform device, but don't register it yet. It's too early,
+	 * malloc (required by device_private_init) is not available yet. */
+	bcma_sflash_dev.resource[0].end = bcma_sflash_dev.resource[0].start +
+					  sflash->size;
+	bcma_sflash_dev.dev.platform_data = sflash;
+
+	return 0;
+}
diff --git a/drivers/bcma/driver_gmac_cmn.c b/drivers/bcma/driver_gmac_cmn.c
new file mode 100644
index 0000000..dcb1379
--- /dev/null
+++ b/drivers/bcma/driver_gmac_cmn.c
@@ -0,0 +1,14 @@
+/*
+ * Broadcom specific AMBA
+ * GBIT MAC COMMON Core
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+
+void bcma_core_gmac_cmn_init(struct bcma_drv_gmac_cmn *gc)
+{
+	mutex_init(&gc->phy_mutex);
+}
diff --git a/drivers/bcma/driver_gpio.c b/drivers/bcma/driver_gpio.c
new file mode 100644
index 0000000..2c0ffb7
--- /dev/null
+++ b/drivers/bcma/driver_gpio.c
@@ -0,0 +1,233 @@
+/*
+ * Broadcom specific AMBA
+ * GPIO driver
+ *
+ * Copyright 2011, Broadcom Corporation
+ * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/gpio/driver.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+#include "bcma_private.h"
+
+#define BCMA_GPIO_MAX_PINS	32
+
+static int bcma_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	return !!bcma_chipco_gpio_in(cc, 1 << gpio);
+}
+
+static void bcma_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
+				int value)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+}
+
+static int bcma_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	bcma_chipco_gpio_outen(cc, 1 << gpio, 0);
+	return 0;
+}
+
+static int bcma_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
+				      int value)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	bcma_chipco_gpio_outen(cc, 1 << gpio, 1 << gpio);
+	bcma_chipco_gpio_out(cc, 1 << gpio, value ? 1 << gpio : 0);
+	return 0;
+}
+
+static int bcma_gpio_request(struct gpio_chip *chip, unsigned gpio)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	bcma_chipco_gpio_control(cc, 1 << gpio, 0);
+	/* clear pulldown */
+	bcma_chipco_gpio_pulldown(cc, 1 << gpio, 0);
+	/* Set pullup */
+	bcma_chipco_gpio_pullup(cc, 1 << gpio, 1 << gpio);
+
+	return 0;
+}
+
+static void bcma_gpio_free(struct gpio_chip *chip, unsigned gpio)
+{
+	struct bcma_drv_cc *cc = gpiochip_get_data(chip);
+
+	/* clear pullup */
+	bcma_chipco_gpio_pullup(cc, 1 << gpio, 0);
+}
+
+#if IS_BUILTIN(CONFIG_BCM47XX) || IS_BUILTIN(CONFIG_ARCH_BCM_5301X)
+
+static void bcma_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = gpiochip_get_data(gc);
+	int gpio = irqd_to_hwirq(d);
+	u32 val = bcma_chipco_gpio_in(cc, BIT(gpio));
+
+	bcma_chipco_gpio_polarity(cc, BIT(gpio), val);
+	bcma_chipco_gpio_intmask(cc, BIT(gpio), BIT(gpio));
+}
+
+static void bcma_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcma_drv_cc *cc = gpiochip_get_data(gc);
+	int gpio = irqd_to_hwirq(d);
+
+	bcma_chipco_gpio_intmask(cc, BIT(gpio), 0);
+}
+
+static struct irq_chip bcma_gpio_irq_chip = {
+	.name		= "BCMA-GPIO",
+	.irq_mask	= bcma_gpio_irq_mask,
+	.irq_unmask	= bcma_gpio_irq_unmask,
+};
+
+static irqreturn_t bcma_gpio_irq_handler(int irq, void *dev_id)
+{
+	struct bcma_drv_cc *cc = dev_id;
+	struct gpio_chip *gc = &cc->gpio;
+	u32 val = bcma_cc_read32(cc, BCMA_CC_GPIOIN);
+	u32 mask = bcma_cc_read32(cc, BCMA_CC_GPIOIRQ);
+	u32 pol = bcma_cc_read32(cc, BCMA_CC_GPIOPOL);
+	unsigned long irqs = (val ^ pol) & mask;
+	int gpio;
+
+	if (!irqs)
+		return IRQ_NONE;
+
+	for_each_set_bit(gpio, &irqs, gc->ngpio)
+		generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
+	bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
+
+	return IRQ_HANDLED;
+}
+
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
+{
+	struct gpio_chip *chip = &cc->gpio;
+	int hwirq, err;
+
+	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+		return 0;
+
+	hwirq = bcma_core_irq(cc->core, 0);
+	err = request_irq(hwirq, bcma_gpio_irq_handler, IRQF_SHARED, "gpio",
+			  cc);
+	if (err)
+		return err;
+
+	bcma_chipco_gpio_intmask(cc, ~0, 0);
+	bcma_cc_set32(cc, BCMA_CC_IRQMASK, BCMA_CC_IRQ_GPIO);
+
+	err =  gpiochip_irqchip_add(chip,
+				    &bcma_gpio_irq_chip,
+				    0,
+				    handle_simple_irq,
+				    IRQ_TYPE_NONE);
+	if (err) {
+		free_irq(hwirq, cc);
+		return err;
+	}
+
+	return 0;
+}
+
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
+{
+	if (cc->core->bus->hosttype != BCMA_HOSTTYPE_SOC)
+		return;
+
+	bcma_cc_mask32(cc, BCMA_CC_IRQMASK, ~BCMA_CC_IRQ_GPIO);
+	free_irq(bcma_core_irq(cc->core, 0), cc);
+}
+#else
+static int bcma_gpio_irq_init(struct bcma_drv_cc *cc)
+{
+	return 0;
+}
+
+static void bcma_gpio_irq_exit(struct bcma_drv_cc *cc)
+{
+}
+#endif
+
+int bcma_gpio_init(struct bcma_drv_cc *cc)
+{
+	struct bcma_bus *bus = cc->core->bus;
+	struct gpio_chip *chip = &cc->gpio;
+	int err;
+
+	chip->label		= "bcma_gpio";
+	chip->owner		= THIS_MODULE;
+	chip->request		= bcma_gpio_request;
+	chip->free		= bcma_gpio_free;
+	chip->get		= bcma_gpio_get_value;
+	chip->set		= bcma_gpio_set_value;
+	chip->direction_input	= bcma_gpio_direction_input;
+	chip->direction_output	= bcma_gpio_direction_output;
+	chip->owner		= THIS_MODULE;
+	chip->parent		= bcma_bus_get_host_dev(bus);
+#if IS_BUILTIN(CONFIG_OF)
+	chip->of_node		= cc->core->dev.of_node;
+#endif
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4707:
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM53572:
+	case BCMA_CHIP_ID_BCM53573:
+	case BCMA_CHIP_ID_BCM47094:
+		chip->ngpio	= 32;
+		break;
+	default:
+		chip->ngpio	= 16;
+	}
+
+	/*
+	 * Register SoC GPIO devices with absolute GPIO pin base.
+	 * On MIPS, we don't have Device Tree and we can't use relative (per chip)
+	 * GPIO numbers.
+	 * On some ARM devices, user space may want to access some system GPIO
+	 * pins directly, which is easier to do with a predictable GPIO base.
+	 */
+	if (IS_BUILTIN(CONFIG_BCM47XX) ||
+	    cc->core->bus->hosttype == BCMA_HOSTTYPE_SOC)
+		chip->base		= bus->num * BCMA_GPIO_MAX_PINS;
+	else
+		chip->base		= -1;
+
+	err = gpiochip_add_data(chip, cc);
+	if (err)
+		return err;
+
+	err = bcma_gpio_irq_init(cc);
+	if (err) {
+		gpiochip_remove(chip);
+		return err;
+	}
+
+	return 0;
+}
+
+int bcma_gpio_unregister(struct bcma_drv_cc *cc)
+{
+	bcma_gpio_irq_exit(cc);
+	gpiochip_remove(&cc->gpio);
+	return 0;
+}
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
new file mode 100644
index 0000000..27e9686
--- /dev/null
+++ b/drivers/bcma/driver_mips.c
@@ -0,0 +1,383 @@
+/*
+ * Broadcom specific AMBA
+ * Broadcom MIPS32 74K core driver
+ *
+ * Copyright 2009, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <mb@bu3sch.de>
+ * Copyright 2010, Bernhard Loos <bernhardloos@googlemail.com>
+ * Copyright 2011, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <linux/time.h>
+#ifdef CONFIG_BCM47XX
+#include <linux/bcm47xx_nvram.h>
+#endif
+
+enum bcma_boot_dev {
+	BCMA_BOOT_DEV_UNK = 0,
+	BCMA_BOOT_DEV_ROM,
+	BCMA_BOOT_DEV_PARALLEL,
+	BCMA_BOOT_DEV_SERIAL,
+	BCMA_BOOT_DEV_NAND,
+};
+
+/* The 47162a0 hangs when reading MIPS DMP registers registers */
+static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
+{
+	return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
+	       dev->bus->chipinfo.rev == 0 && dev->id.id == BCMA_CORE_MIPS_74K;
+}
+
+/* The 5357b0 hangs when reading USB20H DMP registers */
+static inline bool bcma_core_mips_bcm5357b0_quirk(struct bcma_device *dev)
+{
+	return (dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM5357 ||
+		dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM4749) &&
+	       dev->bus->chipinfo.pkg == 11 &&
+	       dev->id.id == BCMA_CORE_USB20_HOST;
+}
+
+static inline u32 mips_read32(struct bcma_drv_mips *mcore,
+			      u16 offset)
+{
+	return bcma_read32(mcore->core, offset);
+}
+
+static inline void mips_write32(struct bcma_drv_mips *mcore,
+				u16 offset,
+				u32 value)
+{
+	bcma_write32(mcore->core, offset, value);
+}
+
+static const u32 ipsflag_irq_mask[] = {
+	0,
+	BCMA_MIPS_IPSFLAG_IRQ1,
+	BCMA_MIPS_IPSFLAG_IRQ2,
+	BCMA_MIPS_IPSFLAG_IRQ3,
+	BCMA_MIPS_IPSFLAG_IRQ4,
+};
+
+static const u32 ipsflag_irq_shift[] = {
+	0,
+	BCMA_MIPS_IPSFLAG_IRQ1_SHIFT,
+	BCMA_MIPS_IPSFLAG_IRQ2_SHIFT,
+	BCMA_MIPS_IPSFLAG_IRQ3_SHIFT,
+	BCMA_MIPS_IPSFLAG_IRQ4_SHIFT,
+};
+
+static u32 bcma_core_mips_irqflag(struct bcma_device *dev)
+{
+	u32 flag;
+
+	if (bcma_core_mips_bcm47162a0_quirk(dev))
+		return dev->core_index;
+	if (bcma_core_mips_bcm5357b0_quirk(dev))
+		return dev->core_index;
+	flag = bcma_aread32(dev, BCMA_MIPS_OOBSELOUTA30);
+
+	if (flag)
+		return flag & 0x1F;
+	else
+		return 0x3f;
+}
+
+/* Get the MIPS IRQ assignment for a specified device.
+ * If unassigned, 0 is returned.
+ * If disabled, 5 is returned.
+ * If not supported, 6 is returned.
+ */
+unsigned int bcma_core_mips_irq(struct bcma_device *dev)
+{
+	struct bcma_device *mdev = dev->bus->drv_mips.core;
+	u32 irqflag;
+	unsigned int irq;
+
+	irqflag = bcma_core_mips_irqflag(dev);
+	if (irqflag == 0x3f)
+		return 6;
+
+	for (irq = 0; irq <= 4; irq++)
+		if (bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq)) &
+		    (1 << irqflag))
+			return irq;
+
+	return 5;
+}
+
+static void bcma_core_mips_set_irq(struct bcma_device *dev, unsigned int irq)
+{
+	unsigned int oldirq = bcma_core_mips_irq(dev);
+	struct bcma_bus *bus = dev->bus;
+	struct bcma_device *mdev = bus->drv_mips.core;
+	u32 irqflag;
+
+	irqflag = bcma_core_mips_irqflag(dev);
+	BUG_ON(oldirq == 6);
+
+	dev->irq = irq + 2;
+
+	/* clear the old irq */
+	if (oldirq == 0)
+		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
+			    bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) &
+			    ~(1 << irqflag));
+	else if (oldirq != 5)
+		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(oldirq), 0);
+
+	/* assign the new one */
+	if (irq == 0) {
+		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0),
+			    bcma_read32(mdev, BCMA_MIPS_MIPS74K_INTMASK(0)) |
+			    (1 << irqflag));
+	} else {
+		u32 irqinitmask = bcma_read32(mdev,
+					      BCMA_MIPS_MIPS74K_INTMASK(irq));
+		if (irqinitmask) {
+			struct bcma_device *core;
+
+			/* backplane irq line is in use, find out who uses
+			 * it and set user to irq 0
+			 */
+			list_for_each_entry(core, &bus->cores, list) {
+				if ((1 << bcma_core_mips_irqflag(core)) ==
+				    irqinitmask) {
+					bcma_core_mips_set_irq(core, 0);
+					break;
+				}
+			}
+		}
+		bcma_write32(mdev, BCMA_MIPS_MIPS74K_INTMASK(irq),
+			     1 << irqflag);
+	}
+
+	bcma_debug(bus, "set_irq: core 0x%04x, irq %d => %d\n",
+		   dev->id.id, oldirq <= 4 ? oldirq + 2 : 0, irq + 2);
+}
+
+static void bcma_core_mips_set_irq_name(struct bcma_bus *bus, unsigned int irq,
+					u16 coreid, u8 unit)
+{
+	struct bcma_device *core;
+
+	core = bcma_find_core_unit(bus, coreid, unit);
+	if (!core) {
+		bcma_warn(bus,
+			  "Can not find core (id: 0x%x, unit %i) for IRQ configuration.\n",
+			  coreid, unit);
+		return;
+	}
+
+	bcma_core_mips_set_irq(core, irq);
+}
+
+static void bcma_core_mips_print_irq(struct bcma_device *dev, unsigned int irq)
+{
+	int i;
+	static const char *irq_name[] = {"2(S)", "3", "4", "5", "6", "D", "I"};
+	char interrupts[25];
+	char *ints = interrupts;
+
+	for (i = 0; i < ARRAY_SIZE(irq_name); i++)
+		ints += sprintf(ints, " %s%c",
+				irq_name[i], i == irq ? '*' : ' ');
+
+	bcma_debug(dev->bus, "core 0x%04x, irq:%s\n", dev->id.id, interrupts);
+}
+
+static void bcma_core_mips_dump_irq(struct bcma_bus *bus)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		bcma_core_mips_print_irq(core, bcma_core_mips_irq(core));
+	}
+}
+
+u32 bcma_cpu_clock(struct bcma_drv_mips *mcore)
+{
+	struct bcma_bus *bus = mcore->core->bus;
+
+	if (bus->drv_cc.capabilities & BCMA_CC_CAP_PMU)
+		return bcma_pmu_get_cpu_clock(&bus->drv_cc);
+
+	bcma_err(bus, "No PMU available, need this to get the cpu clock\n");
+	return 0;
+}
+EXPORT_SYMBOL(bcma_cpu_clock);
+
+static enum bcma_boot_dev bcma_boot_dev(struct bcma_bus *bus)
+{
+	struct bcma_drv_cc *cc = &bus->drv_cc;
+	u8 cc_rev = cc->core->id.rev;
+
+	if (cc_rev == 42) {
+		struct bcma_device *core;
+
+		core = bcma_find_core(bus, BCMA_CORE_NS_ROM);
+		if (core) {
+			switch (bcma_aread32(core, BCMA_IOST) &
+				BCMA_NS_ROM_IOST_BOOT_DEV_MASK) {
+			case BCMA_NS_ROM_IOST_BOOT_DEV_NOR:
+				return BCMA_BOOT_DEV_SERIAL;
+			case BCMA_NS_ROM_IOST_BOOT_DEV_NAND:
+				return BCMA_BOOT_DEV_NAND;
+			case BCMA_NS_ROM_IOST_BOOT_DEV_ROM:
+			default:
+				return BCMA_BOOT_DEV_ROM;
+			}
+		}
+	} else {
+		if (cc_rev == 38) {
+			if (cc->status & BCMA_CC_CHIPST_5357_NAND_BOOT)
+				return BCMA_BOOT_DEV_NAND;
+			else if (cc->status & BIT(5))
+				return BCMA_BOOT_DEV_ROM;
+		}
+
+		if ((cc->capabilities & BCMA_CC_CAP_FLASHT) ==
+		    BCMA_CC_FLASHT_PARA)
+			return BCMA_BOOT_DEV_PARALLEL;
+		else
+			return BCMA_BOOT_DEV_SERIAL;
+	}
+
+	return BCMA_BOOT_DEV_SERIAL;
+}
+
+static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
+{
+	struct bcma_bus *bus = mcore->core->bus;
+	enum bcma_boot_dev boot_dev;
+
+	/* Determine flash type this SoC boots from */
+	boot_dev = bcma_boot_dev(bus);
+	switch (boot_dev) {
+	case BCMA_BOOT_DEV_PARALLEL:
+	case BCMA_BOOT_DEV_SERIAL:
+#ifdef CONFIG_BCM47XX
+		bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH2,
+					    BCMA_SOC_FLASH2_SZ);
+#endif
+		break;
+	case BCMA_BOOT_DEV_NAND:
+#ifdef CONFIG_BCM47XX
+		bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH1,
+					    BCMA_SOC_FLASH1_SZ);
+#endif
+		break;
+	default:
+		break;
+	}
+}
+
+void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
+{
+	struct bcma_bus *bus = mcore->core->bus;
+
+	if (mcore->early_setup_done)
+		return;
+
+	bcma_chipco_serial_init(&bus->drv_cc);
+	bcma_core_mips_nvram_init(mcore);
+
+	mcore->early_setup_done = true;
+}
+
+static void bcma_fix_i2s_irqflag(struct bcma_bus *bus)
+{
+	struct bcma_device *cpu, *pcie, *i2s;
+
+	/* Fixup the interrupts in 4716/4748 for i2s core (2010 Broadcom SDK)
+	 * (IRQ flags > 7 are ignored when setting the interrupt masks)
+	 */
+	if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4716 &&
+	    bus->chipinfo.id != BCMA_CHIP_ID_BCM4748)
+		return;
+
+	cpu = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+	pcie = bcma_find_core(bus, BCMA_CORE_PCIE);
+	i2s = bcma_find_core(bus, BCMA_CORE_I2S);
+	if (cpu && pcie && i2s &&
+	    bcma_aread32(cpu, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+	    bcma_aread32(pcie, BCMA_MIPS_OOBSELINA74) == 0x08060504 &&
+	    bcma_aread32(i2s, BCMA_MIPS_OOBSELOUTA30) == 0x88) {
+		bcma_awrite32(cpu, BCMA_MIPS_OOBSELINA74, 0x07060504);
+		bcma_awrite32(pcie, BCMA_MIPS_OOBSELINA74, 0x07060504);
+		bcma_awrite32(i2s, BCMA_MIPS_OOBSELOUTA30, 0x87);
+		bcma_debug(bus,
+			   "Moved i2s interrupt to oob line 7 instead of 8\n");
+	}
+}
+
+void bcma_core_mips_init(struct bcma_drv_mips *mcore)
+{
+	struct bcma_bus *bus;
+	struct bcma_device *core;
+	bus = mcore->core->bus;
+
+	if (mcore->setup_done)
+		return;
+
+	bcma_debug(bus, "Initializing MIPS core...\n");
+
+	bcma_core_mips_early_init(mcore);
+
+	bcma_fix_i2s_irqflag(bus);
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4716:
+	case BCMA_CHIP_ID_BCM4748:
+		bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+		bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+		bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+		bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_PCIE, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+		break;
+	case BCMA_CHIP_ID_BCM5356:
+	case BCMA_CHIP_ID_BCM47162:
+	case BCMA_CHIP_ID_BCM53572:
+		bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+		bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+		break;
+	case BCMA_CHIP_ID_BCM5357:
+	case BCMA_CHIP_ID_BCM4749:
+		bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_80211, 0);
+		bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_MAC_GBIT, 0);
+		bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_USB20_HOST, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_CHIPCOMMON, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_I2S, 0);
+		break;
+	case BCMA_CHIP_ID_BCM4706:
+		bcma_core_mips_set_irq_name(bus, 1, BCMA_CORE_PCIE, 0);
+		bcma_core_mips_set_irq_name(bus, 2, BCMA_CORE_4706_MAC_GBIT,
+					    0);
+		bcma_core_mips_set_irq_name(bus, 3, BCMA_CORE_PCIE, 1);
+		bcma_core_mips_set_irq_name(bus, 4, BCMA_CORE_USB20_HOST, 0);
+		bcma_core_mips_set_irq_name(bus, 0, BCMA_CORE_4706_CHIPCOMMON,
+					    0);
+		break;
+	default:
+		list_for_each_entry(core, &bus->cores, list) {
+			core->irq = bcma_core_irq(core, 0);
+		}
+		bcma_err(bus,
+			 "Unknown device (0x%x) found, can not configure IRQs\n",
+			 bus->chipinfo.id);
+	}
+	bcma_debug(bus, "IRQ reconfiguration done\n");
+	bcma_core_mips_dump_irq(bus);
+
+	mcore->setup_done = true;
+}
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
new file mode 100644
index 0000000..f499a46
--- /dev/null
+++ b/drivers/bcma/driver_pci.c
@@ -0,0 +1,306 @@
+/*
+ * Broadcom specific AMBA
+ * PCI Core
+ *
+ * Copyright 2005, 2011, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+u32 bcma_pcie_read(struct bcma_drv_pci *pc, u32 address)
+{
+	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
+	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
+	return pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_DATA);
+}
+
+static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
+{
+	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_ADDR, address);
+	pcicore_read32(pc, BCMA_CORE_PCI_PCIEIND_ADDR);
+	pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
+}
+
+static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
+{
+	u32 v;
+	int i;
+
+	v = BCMA_CORE_PCI_MDIODATA_START;
+	v |= BCMA_CORE_PCI_MDIODATA_WRITE;
+	v |= (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
+	      BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
+	v |= (BCMA_CORE_PCI_MDIODATA_BLK_ADDR <<
+	      BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
+	v |= BCMA_CORE_PCI_MDIODATA_TA;
+	v |= (phy << 4);
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+
+	udelay(10);
+	for (i = 0; i < 200; i++) {
+		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
+		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
+			break;
+		usleep_range(1000, 2000);
+	}
+}
+
+static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
+{
+	int max_retries = 10;
+	u16 ret = 0;
+	u32 v;
+	int i;
+
+	/* enable mdio access to SERDES */
+	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
+	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
+
+	if (pc->core->id.rev >= 10) {
+		max_retries = 200;
+		bcma_pcie_mdio_set_phy(pc, device);
+		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
+		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
+		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
+	} else {
+		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
+		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+	}
+
+	v = BCMA_CORE_PCI_MDIODATA_START;
+	v |= BCMA_CORE_PCI_MDIODATA_READ;
+	v |= BCMA_CORE_PCI_MDIODATA_TA;
+
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+	/* Wait for the device to complete the transaction */
+	udelay(10);
+	for (i = 0; i < max_retries; i++) {
+		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
+		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE) {
+			udelay(10);
+			ret = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_DATA);
+			break;
+		}
+		usleep_range(1000, 2000);
+	}
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
+	return ret;
+}
+
+static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
+				u8 address, u16 data)
+{
+	int max_retries = 10;
+	u32 v;
+	int i;
+
+	/* enable mdio access to SERDES */
+	v = BCMA_CORE_PCI_MDIOCTL_PREAM_EN;
+	v |= BCMA_CORE_PCI_MDIOCTL_DIVISOR_VAL;
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, v);
+
+	if (pc->core->id.rev >= 10) {
+		max_retries = 200;
+		bcma_pcie_mdio_set_phy(pc, device);
+		v = (BCMA_CORE_PCI_MDIODATA_DEV_ADDR <<
+		     BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF);
+		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF);
+	} else {
+		v = (device << BCMA_CORE_PCI_MDIODATA_DEVADDR_SHF_OLD);
+		v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+	}
+
+	v = BCMA_CORE_PCI_MDIODATA_START;
+	v |= BCMA_CORE_PCI_MDIODATA_WRITE;
+	v |= BCMA_CORE_PCI_MDIODATA_TA;
+	v |= data;
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_DATA, v);
+	/* Wait for the device to complete the transaction */
+	udelay(10);
+	for (i = 0; i < max_retries; i++) {
+		v = pcicore_read32(pc, BCMA_CORE_PCI_MDIO_CONTROL);
+		if (v & BCMA_CORE_PCI_MDIOCTL_ACCESS_DONE)
+			break;
+		usleep_range(1000, 2000);
+	}
+	pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
+}
+
+static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
+				    u8 address, u16 data)
+{
+	bcma_pcie_mdio_write(pc, device, address, data);
+	return bcma_pcie_mdio_read(pc, device, address);
+}
+
+/**************************************************
+ * Early init.
+ **************************************************/
+
+static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
+{
+	struct bcma_device *core = pc->core;
+	u16 val16, core_index;
+	uint regoff;
+
+	regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
+	core_index = (u16)core->core_index;
+
+	val16 = pcicore_read16(pc, regoff);
+	if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
+	     != core_index) {
+		val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
+			(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
+		pcicore_write16(pc, regoff, val16);
+	}
+}
+
+/*
+ * Apply some early fixes required before accessing SPROM.
+ * See also si_pci_fixcfg.
+ */
+void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
+{
+	if (pc->early_setup_done)
+		return;
+
+	pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
+	if (pc->hostmode)
+		goto out;
+
+	bcma_core_pci_fixcfg(pc);
+
+out:
+	pc->early_setup_done = true;
+}
+
+/**************************************************
+ * Workarounds.
+ **************************************************/
+
+static u8 bcma_pcicore_polarity_workaround(struct bcma_drv_pci *pc)
+{
+	u32 tmp;
+
+	tmp = bcma_pcie_read(pc, BCMA_CORE_PCI_PLP_STATUSREG);
+	if (tmp & BCMA_CORE_PCI_PLP_POLARITYINV_STAT)
+		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE |
+		       BCMA_CORE_PCI_SERDES_RX_CTRL_POLARITY;
+	else
+		return BCMA_CORE_PCI_SERDES_RX_CTRL_FORCE;
+}
+
+static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
+{
+	u16 tmp;
+
+	bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_RX,
+	                     BCMA_CORE_PCI_SERDES_RX_CTRL,
+			     bcma_pcicore_polarity_workaround(pc));
+	tmp = bcma_pcie_mdio_read(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
+	                          BCMA_CORE_PCI_SERDES_PLL_CTRL);
+	if (tmp & BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN)
+		bcma_pcie_mdio_write(pc, BCMA_CORE_PCI_MDIODATA_DEV_PLL,
+		                     BCMA_CORE_PCI_SERDES_PLL_CTRL,
+		                     tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
+}
+
+/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
+/* Needs to happen when coming out of 'standby'/'hibernate' */
+static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
+{
+	u16 val16;
+	uint regoff;
+
+	regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_MISC_CONFIG);
+
+	val16 = pcicore_read16(pc, regoff);
+
+	if (!(val16 & BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST)) {
+		val16 |= BCMA_CORE_PCI_SPROM_L23READY_EXIT_NOPERST;
+		pcicore_write16(pc, regoff, val16);
+	}
+}
+
+/**************************************************
+ * Init.
+ **************************************************/
+
+static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
+{
+	bcma_pcicore_serdes_workaround(pc);
+	bcma_core_pci_config_fixup(pc);
+}
+
+void bcma_core_pci_init(struct bcma_drv_pci *pc)
+{
+	if (pc->setup_done)
+		return;
+
+	bcma_core_pci_early_init(pc);
+
+	if (pc->hostmode)
+		bcma_core_pci_hostmode_init(pc);
+	else
+		bcma_core_pci_clientmode_init(pc);
+}
+
+void bcma_core_pci_power_save(struct bcma_bus *bus, bool up)
+{
+	struct bcma_drv_pci *pc;
+	u16 data;
+
+	if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+		return;
+
+	pc = &bus->drv_pci[0];
+
+	if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
+		data = up ? 0x74 : 0x7C;
+		bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+					 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
+		bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+					 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+	} else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
+		data = up ? 0x75 : 0x7D;
+		bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+					 BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
+		bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+					 BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+	}
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_power_save);
+
+static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
+{
+	u32 w;
+
+	w = bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+	if (extend)
+		w |= BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+	else
+		w &= ~BCMA_CORE_PCI_ASPMTIMER_EXTEND;
+	bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
+	bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
+}
+
+void bcma_core_pci_up(struct bcma_drv_pci *pc)
+{
+	bcma_core_pci_extend_L1timer(pc, true);
+}
+
+void bcma_core_pci_down(struct bcma_drv_pci *pc)
+{
+	bcma_core_pci_extend_L1timer(pc, false);
+}
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
new file mode 100644
index 0000000..c42cec7
--- /dev/null
+++ b/drivers/bcma/driver_pci_host.c
@@ -0,0 +1,623 @@
+/*
+ * Broadcom specific AMBA
+ * PCI Core in hostmode
+ *
+ * Copyright 2005 - 2011, Broadcom Corporation
+ * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/bcma/bcma.h>
+#include <asm/paccess.h>
+
+/* Probe a 32bit value on the bus and catch bus exceptions.
+ * Returns nonzero on a bus exception.
+ * This is MIPS specific */
+#define mips_busprobe32(val, addr)	get_dbe((val), ((u32 *)(addr)))
+
+/* Assume one-hot slot wiring */
+#define BCMA_PCI_SLOT_MAX	16
+#define	PCI_CONFIG_SPACE_SIZE	256
+
+bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
+{
+	struct bcma_bus *bus = pc->core->bus;
+	u16 chipid_top;
+	u32 tmp;
+
+	chipid_top = (bus->chipinfo.id & 0xFF00);
+	if (chipid_top != 0x4700 &&
+	    chipid_top != 0x5300)
+		return false;
+
+	bcma_core_enable(pc->core, 0);
+
+	return !mips_busprobe32(tmp, pc->core->io_addr);
+}
+
+static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
+{
+	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
+	pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
+	return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
+}
+
+static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
+				   u32 data)
+{
+	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
+	pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
+	pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
+}
+
+static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
+			     unsigned int func, unsigned int off)
+{
+	u32 addr = 0;
+
+	/* Issue config commands only when the data link is up (atleast
+	 * one external pcie device is present).
+	 */
+	if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
+			  & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
+		goto out;
+
+	/* Type 0 transaction */
+	/* Slide the PCI window to the appropriate slot */
+	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
+	/* Calculate the address */
+	addr = pc->host_controller->host_cfg_addr;
+	addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
+	addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
+	addr |= (off & ~3);
+
+out:
+	return addr;
+}
+
+static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
+				  unsigned int func, unsigned int off,
+				  void *buf, int len)
+{
+	int err = -EINVAL;
+	u32 addr, val;
+	void __iomem *mmio = 0;
+
+	WARN_ON(!pc->hostmode);
+	if (unlikely(len != 1 && len != 2 && len != 4))
+		goto out;
+	if (dev == 0) {
+		/* we support only two functions on device 0 */
+		if (func > 1)
+			goto out;
+
+		/* accesses to config registers with offsets >= 256
+		 * requires indirect access.
+		 */
+		if (off >= PCI_CONFIG_SPACE_SIZE) {
+			addr = (func << 12);
+			addr |= (off & 0x0FFC);
+			val = bcma_pcie_read_config(pc, addr);
+		} else {
+			addr = BCMA_CORE_PCI_PCICFG0;
+			addr |= (func << 8);
+			addr |= (off & 0xFC);
+			val = pcicore_read32(pc, addr);
+		}
+	} else {
+		addr = bcma_get_cfgspace_addr(pc, dev, func, off);
+		if (unlikely(!addr))
+			goto out;
+		err = -ENOMEM;
+		mmio = ioremap_nocache(addr, sizeof(val));
+		if (!mmio)
+			goto out;
+
+		if (mips_busprobe32(val, mmio)) {
+			val = 0xFFFFFFFF;
+			goto unmap;
+		}
+	}
+	val >>= (8 * (off & 3));
+
+	switch (len) {
+	case 1:
+		*((u8 *)buf) = (u8)val;
+		break;
+	case 2:
+		*((u16 *)buf) = (u16)val;
+		break;
+	case 4:
+		*((u32 *)buf) = (u32)val;
+		break;
+	}
+	err = 0;
+unmap:
+	if (mmio)
+		iounmap(mmio);
+out:
+	return err;
+}
+
+static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
+				   unsigned int func, unsigned int off,
+				   const void *buf, int len)
+{
+	int err = -EINVAL;
+	u32 addr, val;
+	void __iomem *mmio = 0;
+	u16 chipid = pc->core->bus->chipinfo.id;
+
+	WARN_ON(!pc->hostmode);
+	if (unlikely(len != 1 && len != 2 && len != 4))
+		goto out;
+	if (dev == 0) {
+		/* we support only two functions on device 0 */
+		if (func > 1)
+			goto out;
+
+		/* accesses to config registers with offsets >= 256
+		 * requires indirect access.
+		 */
+		if (off >= PCI_CONFIG_SPACE_SIZE) {
+			addr = (func << 12);
+			addr |= (off & 0x0FFC);
+			val = bcma_pcie_read_config(pc, addr);
+		} else {
+			addr = BCMA_CORE_PCI_PCICFG0;
+			addr |= (func << 8);
+			addr |= (off & 0xFC);
+			val = pcicore_read32(pc, addr);
+		}
+	} else {
+		addr = bcma_get_cfgspace_addr(pc, dev, func, off);
+		if (unlikely(!addr))
+			goto out;
+		err = -ENOMEM;
+		mmio = ioremap_nocache(addr, sizeof(val));
+		if (!mmio)
+			goto out;
+
+		if (mips_busprobe32(val, mmio)) {
+			val = 0xFFFFFFFF;
+			goto unmap;
+		}
+	}
+
+	switch (len) {
+	case 1:
+		val &= ~(0xFF << (8 * (off & 3)));
+		val |= *((const u8 *)buf) << (8 * (off & 3));
+		break;
+	case 2:
+		val &= ~(0xFFFF << (8 * (off & 3)));
+		val |= *((const u16 *)buf) << (8 * (off & 3));
+		break;
+	case 4:
+		val = *((const u32 *)buf);
+		break;
+	}
+	if (dev == 0) {
+		/* accesses to config registers with offsets >= 256
+		 * requires indirect access.
+		 */
+		if (off >= PCI_CONFIG_SPACE_SIZE)
+			bcma_pcie_write_config(pc, addr, val);
+		else
+			pcicore_write32(pc, addr, val);
+	} else {
+		writel(val, mmio);
+
+		if (chipid == BCMA_CHIP_ID_BCM4716 ||
+		    chipid == BCMA_CHIP_ID_BCM4748)
+			readl(mmio);
+	}
+
+	err = 0;
+unmap:
+	if (mmio)
+		iounmap(mmio);
+out:
+	return err;
+}
+
+static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
+					      unsigned int devfn,
+					      int reg, int size, u32 *val)
+{
+	unsigned long flags;
+	int err;
+	struct bcma_drv_pci *pc;
+	struct bcma_drv_pci_host *pc_host;
+
+	pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
+	pc = pc_host->pdev;
+
+	spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
+	err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
+				     PCI_FUNC(devfn), reg, val, size);
+	spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
+
+	return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
+					       unsigned int devfn,
+					       int reg, int size, u32 val)
+{
+	unsigned long flags;
+	int err;
+	struct bcma_drv_pci *pc;
+	struct bcma_drv_pci_host *pc_host;
+
+	pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
+	pc = pc_host->pdev;
+
+	spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
+	err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
+				      PCI_FUNC(devfn), reg, &val, size);
+	spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
+
+	return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
+}
+
+/* return cap_offset if requested capability exists in the PCI config space */
+static u8 bcma_find_pci_capability(struct bcma_drv_pci *pc, unsigned int dev,
+				   unsigned int func, u8 req_cap_id,
+				   unsigned char *buf, u32 *buflen)
+{
+	u8 cap_id;
+	u8 cap_ptr = 0;
+	u32 bufsize;
+	u8 byte_val;
+
+	/* check for Header type 0 */
+	bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
+				sizeof(u8));
+	if ((byte_val & 0x7F) != PCI_HEADER_TYPE_NORMAL)
+		return cap_ptr;
+
+	/* check if the capability pointer field exists */
+	bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
+				sizeof(u8));
+	if (!(byte_val & PCI_STATUS_CAP_LIST))
+		return cap_ptr;
+
+	/* check if the capability pointer is 0x00 */
+	bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
+				sizeof(u8));
+	if (cap_ptr == 0x00)
+		return cap_ptr;
+
+	/* loop thr'u the capability list and see if the requested capabilty
+	 * exists */
+	bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
+	while (cap_id != req_cap_id) {
+		bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
+					sizeof(u8));
+		if (cap_ptr == 0x00)
+			return cap_ptr;
+		bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
+					sizeof(u8));
+	}
+
+	/* found the caller requested capability */
+	if ((buf != NULL) && (buflen != NULL)) {
+		u8 cap_data;
+
+		bufsize = *buflen;
+		if (!bufsize)
+			return cap_ptr;
+
+		*buflen = 0;
+
+		/* copy the cpability data excluding cap ID and next ptr */
+		cap_data = cap_ptr + 2;
+		if ((bufsize + cap_data)  > PCI_CONFIG_SPACE_SIZE)
+			bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
+		*buflen = bufsize;
+		while (bufsize--) {
+			bcma_extpci_read_config(pc, dev, func, cap_data, buf,
+						sizeof(u8));
+			cap_data++;
+			buf++;
+		}
+	}
+
+	return cap_ptr;
+}
+
+/* If the root port is capable of returning Config Request
+ * Retry Status (CRS) Completion Status to software then
+ * enable the feature.
+ */
+static void bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
+{
+	struct bcma_bus *bus = pc->core->bus;
+	u8 cap_ptr, root_ctrl, root_cap, dev;
+	u16 val16;
+	int i;
+
+	cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
+					   NULL);
+	root_cap = cap_ptr + PCI_EXP_RTCAP;
+	bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
+	if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
+		/* Enable CRS software visibility */
+		root_ctrl = cap_ptr + PCI_EXP_RTCTL;
+		val16 = PCI_EXP_RTCTL_CRSSVE;
+		bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
+					sizeof(u16));
+
+		/* Initiate a configuration request to read the vendor id
+		 * field of the device function's config space header after
+		 * 100 ms wait time from the end of Reset. If the device is
+		 * not done with its internal initialization, it must at
+		 * least return a completion TLP, with a completion status
+		 * of "Configuration Request Retry Status (CRS)". The root
+		 * complex must complete the request to the host by returning
+		 * a read-data value of 0001h for the Vendor ID field and
+		 * all 1s for any additional bytes included in the request.
+		 * Poll using the config reads for max wait time of 1 sec or
+		 * until we receive the successful completion status. Repeat
+		 * the procedure for all the devices.
+		 */
+		for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
+			for (i = 0; i < 100000; i++) {
+				bcma_extpci_read_config(pc, dev, 0,
+							PCI_VENDOR_ID, &val16,
+							sizeof(val16));
+				if (val16 != 0x1)
+					break;
+				udelay(10);
+			}
+			if (val16 == 0x1)
+				bcma_err(bus, "PCI: Broken device in slot %d\n",
+					 dev);
+		}
+	}
+}
+
+void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
+{
+	struct bcma_bus *bus = pc->core->bus;
+	struct bcma_drv_pci_host *pc_host;
+	u32 tmp;
+	u32 pci_membase_1G;
+	unsigned long io_map_base;
+
+	bcma_info(bus, "PCIEcore in host mode found\n");
+
+	if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
+		bcma_info(bus, "This PCIE core is disabled and not working\n");
+		return;
+	}
+
+	pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
+	if (!pc_host)  {
+		bcma_err(bus, "can not allocate memory");
+		return;
+	}
+
+	spin_lock_init(&pc_host->cfgspace_lock);
+
+	pc->host_controller = pc_host;
+	pc_host->pci_controller.io_resource = &pc_host->io_resource;
+	pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
+	pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
+	pc_host->pdev = pc;
+
+	pci_membase_1G = BCMA_SOC_PCI_DMA;
+	pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
+
+	pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
+	pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
+
+	pc_host->mem_resource.name = "BCMA PCIcore external memory",
+	pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
+	pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
+	pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
+
+	pc_host->io_resource.name = "BCMA PCIcore external I/O",
+	pc_host->io_resource.start = 0x100;
+	pc_host->io_resource.end = 0x7FF;
+	pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
+
+	/* Reset RC */
+	usleep_range(3000, 5000);
+	pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
+	msleep(50);
+	pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
+			BCMA_CORE_PCI_CTL_RST_OE);
+
+	/* 64 MB I/O access window. On 4716, use
+	 * sbtopcie0 to access the device registers. We
+	 * can't use address match 2 (1 GB window) region
+	 * as mips can't generate 64-bit address on the
+	 * backplane.
+	 */
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
+		pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
+		pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
+					    BCMA_SOC_PCI_MEM_SZ - 1;
+		pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
+				BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
+	} else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+		tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
+		tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
+		tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
+		if (pc->core->core_unit == 0) {
+			pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
+			pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
+						    BCMA_SOC_PCI_MEM_SZ - 1;
+			pc_host->io_resource.start = 0x100;
+			pc_host->io_resource.end = 0x47F;
+			pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
+			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
+					tmp | BCMA_SOC_PCI_MEM);
+		} else if (pc->core->core_unit == 1) {
+			pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
+			pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
+						    BCMA_SOC_PCI_MEM_SZ - 1;
+			pc_host->io_resource.start = 0x480;
+			pc_host->io_resource.end = 0x7FF;
+			pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
+			pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
+			pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
+					tmp | BCMA_SOC_PCI1_MEM);
+		}
+	} else
+		pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
+				BCMA_CORE_PCI_SBTOPCI_IO);
+
+	/* 64 MB configuration access window */
+	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
+
+	/* 1 GB memory access window */
+	pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
+			BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
+
+
+	/* As per PCI Express Base Spec 1.1 we need to wait for
+	 * at least 100 ms from the end of a reset (cold/warm/hot)
+	 * before issuing configuration requests to PCI Express
+	 * devices.
+	 */
+	msleep(100);
+
+	bcma_core_pci_enable_crs(pc);
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
+		u16 val16;
+		bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+					&val16, sizeof(val16));
+		val16 |= (2 << 5);	/* Max payload size of 512 */
+		val16 |= (2 << 12);	/* MRRS 512 */
+		bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
+					 &val16, sizeof(val16));
+	}
+
+	/* Enable PCI bridge BAR0 memory & master access */
+	tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
+	bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
+
+	/* Enable PCI interrupts */
+	pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
+
+	/* Ok, ready to run, register it to the system.
+	 * The following needs change, if we want to port hostmode
+	 * to non-MIPS platform. */
+	io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
+						     resource_size(&pc_host->mem_resource));
+	pc_host->pci_controller.io_map_base = io_map_base;
+	set_io_port_base(pc_host->pci_controller.io_map_base);
+	/* Give some time to the PCI controller to configure itself with the new
+	 * values. Not waiting at this point causes crashes of the machine. */
+	usleep_range(10000, 15000);
+	register_pci_controller(&pc_host->pci_controller);
+	return;
+}
+
+/* Early PCI fixup for a device on the PCI-core bridge. */
+static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
+{
+	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
+		/* This is not a device on the PCI-core bridge. */
+		return;
+	}
+	if (PCI_SLOT(dev->devfn) != 0)
+		return;
+
+	pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
+
+	/* Enable PCI bridge bus mastering and memory space */
+	pci_set_master(dev);
+	if (pcibios_enable_device(dev, ~0) < 0) {
+		pr_err("PCI: BCMA bridge enable failed\n");
+		return;
+	}
+
+	/* Enable PCI bridge BAR1 prefetch and burst */
+	pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
+
+/* Early PCI fixup for all PCI-cores to set the correct memory address. */
+static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
+{
+	struct resource *res;
+	int pos, err;
+
+	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
+		/* This is not a device on the PCI-core bridge. */
+		return;
+	}
+	if (PCI_SLOT(dev->devfn) == 0)
+		return;
+
+	pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
+
+	for (pos = 0; pos < 6; pos++) {
+		res = &dev->resource[pos];
+		if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
+			err = pci_assign_resource(dev, pos);
+			if (err)
+				pr_err("PCI: Problem fixing up the addresses on %s\n",
+				       pci_name(dev));
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
+
+/* This function is called when doing a pci_enable_device().
+ * We must first check if the device is a device on the PCI-core bridge. */
+int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
+{
+	struct bcma_drv_pci_host *pc_host;
+	int readrq;
+
+	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
+		/* This is not a device on the PCI-core bridge. */
+		return -ENODEV;
+	}
+	pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
+			       pci_ops);
+
+	pr_info("PCI: Fixing up device %s\n", pci_name(dev));
+
+	/* Fix up interrupt lines */
+	dev->irq = bcma_core_irq(pc_host->pdev->core, 0);
+	pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+
+	readrq = pcie_get_readrq(dev);
+	if (readrq > 128) {
+		pr_info("change PCIe max read request size from %i to 128\n", readrq);
+		pcie_set_readrq(dev, 128);
+	}
+	return 0;
+}
+EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
+
+/* PCI device IRQ mapping. */
+int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
+{
+	struct bcma_drv_pci_host *pc_host;
+
+	if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
+		/* This is not a device on the PCI-core bridge. */
+		return -ENODEV;
+	}
+
+	pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
+			       pci_ops);
+	return bcma_core_irq(pc_host->pdev->core, 0);
+}
+EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);
diff --git a/drivers/bcma/driver_pcie2.c b/drivers/bcma/driver_pcie2.c
new file mode 100644
index 0000000..cf889fc
--- /dev/null
+++ b/drivers/bcma/driver_pcie2.c
@@ -0,0 +1,201 @@
+/*
+ * Broadcom specific AMBA
+ * PCIe Gen 2 Core
+ *
+ * Copyright 2014, Broadcom Corporation
+ * Copyright 2014, Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/bcma/bcma.h>
+#include <linux/pci.h>
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+#if 0
+static u32 bcma_core_pcie2_cfg_read(struct bcma_drv_pcie2 *pcie2, u32 addr)
+{
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
+	pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR);
+	return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
+}
+#endif
+
+static void bcma_core_pcie2_cfg_write(struct bcma_drv_pcie2 *pcie2, u32 addr,
+				      u32 val)
+{
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, addr);
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, val);
+}
+
+/**************************************************
+ * Init.
+ **************************************************/
+
+static u32 bcma_core_pcie2_war_delay_perst_enab(struct bcma_drv_pcie2 *pcie2,
+						bool enable)
+{
+	u32 val;
+
+	/* restore back to default */
+	val = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
+	val |= PCIE2_CLKC_DLYPERST;
+	val &= ~PCIE2_CLKC_DISSPROMLD;
+	if (enable) {
+		val &= ~PCIE2_CLKC_DLYPERST;
+		val |= PCIE2_CLKC_DISSPROMLD;
+	}
+	pcie2_write32(pcie2, (BCMA_CORE_PCIE2_CLK_CONTROL), val);
+	/* flush */
+	return pcie2_read32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL);
+}
+
+static void bcma_core_pcie2_set_ltr_vals(struct bcma_drv_pcie2 *pcie2)
+{
+	/* LTR0 */
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x844);
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x883c883c);
+	/* LTR1 */
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x848);
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x88648864);
+	/* LTR2 */
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, 0x84C);
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x90039003);
+}
+
+static void bcma_core_pcie2_hw_ltr_war(struct bcma_drv_pcie2 *pcie2)
+{
+	u8 core_rev = pcie2->core->id.rev;
+	u32 devstsctr2;
+
+	if (core_rev < 2 || core_rev == 10 || core_rev > 13)
+		return;
+
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+		      PCIE2_CAP_DEVSTSCTRL2_OFFSET);
+	devstsctr2 = pcie2_read32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA);
+	if (devstsctr2 & PCIE2_CAP_DEVSTSCTRL2_LTRENAB) {
+		/* force the right LTR values */
+		bcma_core_pcie2_set_ltr_vals(pcie2);
+
+		/* TODO:
+		 *si_core_wrapperreg(pcie2, 3, 0x60, 0x8080, 0);
+		 */
+
+		/* enable the LTR */
+		devstsctr2 |= PCIE2_CAP_DEVSTSCTRL2_LTRENAB;
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+			      PCIE2_CAP_DEVSTSCTRL2_OFFSET);
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, devstsctr2);
+
+		/* set the LTR state to be active */
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
+			      PCIE2_LTR_ACTIVE);
+		usleep_range(1000, 2000);
+
+		/* set the LTR state to be sleep */
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_LTR_STATE,
+			      PCIE2_LTR_SLEEP);
+		usleep_range(1000, 2000);
+	}
+}
+
+static void pciedev_crwlpciegen2(struct bcma_drv_pcie2 *pcie2)
+{
+	u8 core_rev = pcie2->core->id.rev;
+	bool pciewar160, pciewar162;
+
+	pciewar160 = core_rev == 7 || core_rev == 9 || core_rev == 11;
+	pciewar162 = core_rev == 5 || core_rev == 7 || core_rev == 8 ||
+		     core_rev == 9 || core_rev == 11;
+
+	if (!pciewar160 && !pciewar162)
+		return;
+
+/* TODO */
+#if 0
+	pcie2_set32(pcie2, BCMA_CORE_PCIE2_CLK_CONTROL,
+		    PCIE_DISABLE_L1CLK_GATING);
+#if 0
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+		      PCIEGEN2_COE_PVT_TL_CTRL_0);
+	pcie2_mask32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA,
+		     ~(1 << COE_PVT_TL_CTRL_0_PM_DIS_L1_REENTRY_BIT));
+#endif
+#endif
+}
+
+static void pciedev_crwlpciegen2_180(struct bcma_drv_pcie2 *pcie2)
+{
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_PMCR_REFUP);
+	pcie2_set32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 0x1f);
+}
+
+static void pciedev_crwlpciegen2_182(struct bcma_drv_pcie2 *pcie2)
+{
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR, PCIE2_SBMBX);
+	pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, 1 << 0);
+}
+
+static void pciedev_reg_pm_clk_period(struct bcma_drv_pcie2 *pcie2)
+{
+	struct bcma_drv_cc *drv_cc = &pcie2->core->bus->drv_cc;
+	u8 core_rev = pcie2->core->id.rev;
+	u32 alp_khz, pm_value;
+
+	if (core_rev <= 13) {
+		alp_khz = bcma_pmu_get_alp_clock(drv_cc) / 1000;
+		pm_value = (1000000 * 2) / alp_khz;
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDADDR,
+			      PCIE2_PVT_REG_PM_CLK_PERIOD);
+		pcie2_write32(pcie2, BCMA_CORE_PCIE2_CONFIGINDDATA, pm_value);
+	}
+}
+
+void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2)
+{
+	struct bcma_bus *bus = pcie2->core->bus;
+	struct bcma_chipinfo *ci = &bus->chipinfo;
+	u32 tmp;
+
+	tmp = pcie2_read32(pcie2, BCMA_CORE_PCIE2_SPROM(54));
+	if ((tmp & 0xe) >> 1 == 2)
+		bcma_core_pcie2_cfg_write(pcie2, 0x4e0, 0x17);
+
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4360:
+	case BCMA_CHIP_ID_BCM4352:
+		pcie2->reqsize = 1024;
+		break;
+	default:
+		pcie2->reqsize = 128;
+		break;
+	}
+
+	if (ci->id == BCMA_CHIP_ID_BCM4360 && ci->rev > 3)
+		bcma_core_pcie2_war_delay_perst_enab(pcie2, true);
+	bcma_core_pcie2_hw_ltr_war(pcie2);
+	pciedev_crwlpciegen2(pcie2);
+	pciedev_reg_pm_clk_period(pcie2);
+	pciedev_crwlpciegen2_180(pcie2);
+	pciedev_crwlpciegen2_182(pcie2);
+}
+
+/**************************************************
+ * Runtime ops.
+ **************************************************/
+
+void bcma_core_pcie2_up(struct bcma_drv_pcie2 *pcie2)
+{
+	struct bcma_bus *bus = pcie2->core->bus;
+	struct pci_dev *dev = bus->host_pci;
+	int err;
+
+	err = pcie_set_readrq(dev, pcie2->reqsize);
+	if (err)
+		bcma_err(bus, "Error setting PCI_EXP_DEVCTL_READRQ: %d\n", err);
+}
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
new file mode 100644
index 0000000..63410ec
--- /dev/null
+++ b/drivers/bcma/host_pci.c
@@ -0,0 +1,390 @@
+/*
+ * Broadcom specific AMBA
+ * PCI Host
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/slab.h>
+#include <linux/bcma/bcma.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+
+static void bcma_host_pci_switch_core(struct bcma_device *core)
+{
+	int win2 = core->bus->host_is_pcie2 ?
+		BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
+
+	pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
+			       core->addr);
+	pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
+	core->bus->mapped_core = core;
+	bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
+}
+
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
+{
+	switch (core->id.id) {
+	case BCMA_CORE_CHIPCOMMON:
+		return 3 * BCMA_CORE_SIZE;
+	case BCMA_CORE_PCIE:
+		return 2 * BCMA_CORE_SIZE;
+	}
+
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	return ioread8(core->bus->mmio + offset);
+}
+
+static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	return ioread16(core->bus->mmio + offset);
+}
+
+static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	return ioread32(core->bus->mmio + offset);
+}
+
+static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
+				 u8 value)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	iowrite8(value, core->bus->mmio + offset);
+}
+
+static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
+				 u16 value)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	iowrite16(value, core->bus->mmio + offset);
+}
+
+static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
+				 u32 value)
+{
+	offset += bcma_host_pci_provide_access_to_core(core);
+	iowrite32(value, core->bus->mmio + offset);
+}
+
+#ifdef CONFIG_BCMA_BLOCKIO
+static void bcma_host_pci_block_read(struct bcma_device *core, void *buffer,
+				     size_t count, u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->bus->mmio + offset;
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	switch (reg_width) {
+	case sizeof(u8):
+		ioread8_rep(addr, buffer, count);
+		break;
+	case sizeof(u16):
+		WARN_ON(count & 1);
+		ioread16_rep(addr, buffer, count >> 1);
+		break;
+	case sizeof(u32):
+		WARN_ON(count & 3);
+		ioread32_rep(addr, buffer, count >> 2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static void bcma_host_pci_block_write(struct bcma_device *core,
+				      const void *buffer, size_t count,
+				      u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->bus->mmio + offset;
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	switch (reg_width) {
+	case sizeof(u8):
+		iowrite8_rep(addr, buffer, count);
+		break;
+	case sizeof(u16):
+		WARN_ON(count & 1);
+		iowrite16_rep(addr, buffer, count >> 1);
+		break;
+	case sizeof(u32):
+		WARN_ON(count & 3);
+		iowrite32_rep(addr, buffer, count >> 2);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+#endif
+
+static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset)
+{
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
+}
+
+static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset,
+				  u32 value)
+{
+	if (core->bus->mapped_core != core)
+		bcma_host_pci_switch_core(core);
+	iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset);
+}
+
+static const struct bcma_host_ops bcma_host_pci_ops = {
+	.read8		= bcma_host_pci_read8,
+	.read16		= bcma_host_pci_read16,
+	.read32		= bcma_host_pci_read32,
+	.write8		= bcma_host_pci_write8,
+	.write16	= bcma_host_pci_write16,
+	.write32	= bcma_host_pci_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+	.block_read	= bcma_host_pci_block_read,
+	.block_write	= bcma_host_pci_block_write,
+#endif
+	.aread32	= bcma_host_pci_aread32,
+	.awrite32	= bcma_host_pci_awrite32,
+};
+
+static int bcma_host_pci_probe(struct pci_dev *dev,
+			       const struct pci_device_id *id)
+{
+	struct bcma_bus *bus;
+	int err = -ENOMEM;
+	const char *name;
+	u32 val;
+
+	/* Alloc */
+	bus = kzalloc(sizeof(*bus), GFP_KERNEL);
+	if (!bus)
+		goto out;
+
+	/* Basic PCI configuration */
+	err = pci_enable_device(dev);
+	if (err)
+		goto err_kfree_bus;
+
+	name = dev_name(&dev->dev);
+	if (dev->driver && dev->driver->name)
+		name = dev->driver->name;
+	err = pci_request_regions(dev, name);
+	if (err)
+		goto err_pci_disable;
+	pci_set_master(dev);
+
+	/* Disable the RETRY_TIMEOUT register (0x41) to keep
+	 * PCI Tx retries from interfering with C3 CPU state */
+	pci_read_config_dword(dev, 0x40, &val);
+	if ((val & 0x0000ff00) != 0)
+		pci_write_config_dword(dev, 0x40, val & 0xffff00ff);
+
+	/* SSB needed additional powering up, do we have any AMBA PCI cards? */
+	if (!pci_is_pcie(dev)) {
+		bcma_err(bus, "PCI card detected, they are not supported.\n");
+		err = -ENXIO;
+		goto err_pci_release_regions;
+	}
+
+	/* Map MMIO */
+	err = -ENOMEM;
+	bus->mmio = pci_iomap(dev, 0, ~0UL);
+	if (!bus->mmio)
+		goto err_pci_release_regions;
+
+	/* Host specific */
+	bus->host_pci = dev;
+	bus->hosttype = BCMA_HOSTTYPE_PCI;
+	bus->ops = &bcma_host_pci_ops;
+
+	bus->boardinfo.vendor = bus->host_pci->subsystem_vendor;
+	bus->boardinfo.type = bus->host_pci->subsystem_device;
+
+	/* Initialize struct, detect chip */
+	bcma_init_bus(bus);
+
+	/* Scan bus to find out generation of PCIe core */
+	err = bcma_bus_scan(bus);
+	if (err)
+		goto err_pci_unmap_mmio;
+
+	if (bcma_find_core(bus, BCMA_CORE_PCIE2))
+		bus->host_is_pcie2 = true;
+
+	/* Register */
+	err = bcma_bus_register(bus);
+	if (err)
+		goto err_unregister_cores;
+
+	pci_set_drvdata(dev, bus);
+
+out:
+	return err;
+
+err_unregister_cores:
+	bcma_unregister_cores(bus);
+err_pci_unmap_mmio:
+	pci_iounmap(dev, bus->mmio);
+err_pci_release_regions:
+	pci_release_regions(dev);
+err_pci_disable:
+	pci_disable_device(dev);
+err_kfree_bus:
+	kfree(bus);
+	return err;
+}
+
+static void bcma_host_pci_remove(struct pci_dev *dev)
+{
+	struct bcma_bus *bus = pci_get_drvdata(dev);
+
+	bcma_bus_unregister(bus);
+	pci_iounmap(dev, bus->mmio);
+	pci_release_regions(dev);
+	pci_disable_device(dev);
+	kfree(bus);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcma_host_pci_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+	bus->mapped_core = NULL;
+
+	return bcma_bus_suspend(bus);
+}
+
+static int bcma_host_pci_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+	return bcma_bus_resume(bus);
+}
+
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+			 bcma_host_pci_resume);
+#define BCMA_PM_OPS	(&bcma_pm_ops)
+
+#else /* CONFIG_PM_SLEEP */
+
+#define BCMA_PM_OPS     NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct pci_device_id bcma_pci_bridge_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) },	/* 0xa8d8 */
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4358) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4360) },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0016) },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_DELL, 0x0018) },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_FOXCONN, 0xe092) },
+	{ PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, 0x4365, PCI_VENDOR_ID_HP, 0x804a) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a0) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43b1) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) },	/* 0xa8db, BCM43217 (sic!) */
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) },	/* 0xa8dc */
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
+
+static struct pci_driver bcma_pci_bridge_driver = {
+	.name = "bcma-pci-bridge",
+	.id_table = bcma_pci_bridge_tbl,
+	.probe = bcma_host_pci_probe,
+	.remove = bcma_host_pci_remove,
+	.driver.pm = BCMA_PM_OPS,
+};
+
+int __init bcma_host_pci_init(void)
+{
+	return pci_register_driver(&bcma_pci_bridge_driver);
+}
+
+void __exit bcma_host_pci_exit(void)
+{
+	pci_unregister_driver(&bcma_pci_bridge_driver);
+}
+
+/**************************************************
+ * Runtime ops for drivers.
+ **************************************************/
+
+/* See also pcicore_up */
+void bcma_host_pci_up(struct bcma_bus *bus)
+{
+	if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+		return;
+
+	if (bus->host_is_pcie2)
+		bcma_core_pcie2_up(&bus->drv_pcie2);
+	else
+		bcma_core_pci_up(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_up);
+
+/* See also pcicore_down */
+void bcma_host_pci_down(struct bcma_bus *bus)
+{
+	if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+		return;
+
+	if (!bus->host_is_pcie2)
+		bcma_core_pci_down(&bus->drv_pci[0]);
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_down);
+
+/* See also si_pci_setup */
+int bcma_host_pci_irq_ctl(struct bcma_bus *bus, struct bcma_device *core,
+			  bool enable)
+{
+	struct pci_dev *pdev;
+	u32 coremask, tmp;
+	int err = 0;
+
+	if (bus->hosttype != BCMA_HOSTTYPE_PCI) {
+		/* This bcma device is not on a PCI host-bus. So the IRQs are
+		 * not routed through the PCI core.
+		 * So we must not enable routing through the PCI core. */
+		goto out;
+	}
+
+	pdev = bus->host_pci;
+
+	err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
+	if (err)
+		goto out;
+
+	coremask = BIT(core->core_index) << 8;
+	if (enable)
+		tmp |= coremask;
+	else
+		tmp &= ~coremask;
+
+	err = pci_write_config_dword(pdev, BCMA_PCI_IRQMASK, tmp);
+
+out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(bcma_host_pci_irq_ctl);
diff --git a/drivers/bcma/host_soc.c b/drivers/bcma/host_soc.c
new file mode 100644
index 0000000..2dce347
--- /dev/null
+++ b/drivers/bcma/host_soc.c
@@ -0,0 +1,278 @@
+/*
+ * Broadcom specific AMBA
+ * System on Chip (SoC) Host
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include "scan.h"
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_soc.h>
+
+static u8 bcma_host_soc_read8(struct bcma_device *core, u16 offset)
+{
+	return readb(core->io_addr + offset);
+}
+
+static u16 bcma_host_soc_read16(struct bcma_device *core, u16 offset)
+{
+	return readw(core->io_addr + offset);
+}
+
+static u32 bcma_host_soc_read32(struct bcma_device *core, u16 offset)
+{
+	return readl(core->io_addr + offset);
+}
+
+static void bcma_host_soc_write8(struct bcma_device *core, u16 offset,
+				 u8 value)
+{
+	writeb(value, core->io_addr + offset);
+}
+
+static void bcma_host_soc_write16(struct bcma_device *core, u16 offset,
+				 u16 value)
+{
+	writew(value, core->io_addr + offset);
+}
+
+static void bcma_host_soc_write32(struct bcma_device *core, u16 offset,
+				 u32 value)
+{
+	writel(value, core->io_addr + offset);
+}
+
+#ifdef CONFIG_BCMA_BLOCKIO
+static void bcma_host_soc_block_read(struct bcma_device *core, void *buffer,
+				     size_t count, u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->io_addr + offset;
+
+	switch (reg_width) {
+	case sizeof(u8): {
+		u8 *buf = buffer;
+
+		while (count) {
+			*buf = __raw_readb(addr);
+			buf++;
+			count--;
+		}
+		break;
+	}
+	case sizeof(u16): {
+		__le16 *buf = buffer;
+
+		WARN_ON(count & 1);
+		while (count) {
+			*buf = (__force __le16)__raw_readw(addr);
+			buf++;
+			count -= 2;
+		}
+		break;
+	}
+	case sizeof(u32): {
+		__le32 *buf = buffer;
+
+		WARN_ON(count & 3);
+		while (count) {
+			*buf = (__force __le32)__raw_readl(addr);
+			buf++;
+			count -= 4;
+		}
+		break;
+	}
+	default:
+		WARN_ON(1);
+	}
+}
+
+static void bcma_host_soc_block_write(struct bcma_device *core,
+				      const void *buffer,
+				      size_t count, u16 offset, u8 reg_width)
+{
+	void __iomem *addr = core->io_addr + offset;
+
+	switch (reg_width) {
+	case sizeof(u8): {
+		const u8 *buf = buffer;
+
+		while (count) {
+			__raw_writeb(*buf, addr);
+			buf++;
+			count--;
+		}
+		break;
+	}
+	case sizeof(u16): {
+		const __le16 *buf = buffer;
+
+		WARN_ON(count & 1);
+		while (count) {
+			__raw_writew((__force u16)(*buf), addr);
+			buf++;
+			count -= 2;
+		}
+		break;
+	}
+	case sizeof(u32): {
+		const __le32 *buf = buffer;
+
+		WARN_ON(count & 3);
+		while (count) {
+			__raw_writel((__force u32)(*buf), addr);
+			buf++;
+			count -= 4;
+		}
+		break;
+	}
+	default:
+		WARN_ON(1);
+	}
+}
+#endif /* CONFIG_BCMA_BLOCKIO */
+
+static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset)
+{
+	if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n"))
+		return ~0;
+	return readl(core->io_wrap + offset);
+}
+
+static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset,
+				  u32 value)
+{
+	if (WARN_ONCE(!core->io_wrap, "Accessed core has no wrapper/agent\n"))
+		return;
+	writel(value, core->io_wrap + offset);
+}
+
+static const struct bcma_host_ops bcma_host_soc_ops = {
+	.read8		= bcma_host_soc_read8,
+	.read16		= bcma_host_soc_read16,
+	.read32		= bcma_host_soc_read32,
+	.write8		= bcma_host_soc_write8,
+	.write16	= bcma_host_soc_write16,
+	.write32	= bcma_host_soc_write32,
+#ifdef CONFIG_BCMA_BLOCKIO
+	.block_read	= bcma_host_soc_block_read,
+	.block_write	= bcma_host_soc_block_write,
+#endif
+	.aread32	= bcma_host_soc_aread32,
+	.awrite32	= bcma_host_soc_awrite32,
+};
+
+int __init bcma_host_soc_register(struct bcma_soc *soc)
+{
+	struct bcma_bus *bus = &soc->bus;
+
+	/* iomap only first core. We have to read some register on this core
+	 * to scan the bus.
+	 */
+	bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1);
+	if (!bus->mmio)
+		return -ENOMEM;
+
+	/* Host specific */
+	bus->hosttype = BCMA_HOSTTYPE_SOC;
+	bus->ops = &bcma_host_soc_ops;
+	bus->host_pdev = NULL;
+
+	/* Initialize struct, detect chip */
+	bcma_init_bus(bus);
+
+	return 0;
+}
+
+int __init bcma_host_soc_init(struct bcma_soc *soc)
+{
+	struct bcma_bus *bus = &soc->bus;
+	int err;
+
+	/* Scan bus and initialize it */
+	err = bcma_bus_early_register(bus);
+	if (err)
+		iounmap(bus->mmio);
+
+	return err;
+}
+
+#ifdef CONFIG_OF
+static int bcma_host_soc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct bcma_bus *bus;
+	int err;
+
+	/* Alloc */
+	bus = devm_kzalloc(dev, sizeof(*bus), GFP_KERNEL);
+	if (!bus)
+		return -ENOMEM;
+
+	/* Map MMIO */
+	bus->mmio = of_iomap(np, 0);
+	if (!bus->mmio)
+		return -ENOMEM;
+
+	/* Host specific */
+	bus->hosttype = BCMA_HOSTTYPE_SOC;
+	bus->ops = &bcma_host_soc_ops;
+	bus->host_pdev = pdev;
+
+	/* Initialize struct, detect chip */
+	bcma_init_bus(bus);
+
+	/* Register */
+	err = bcma_bus_register(bus);
+	if (err)
+		goto err_unmap_mmio;
+
+	platform_set_drvdata(pdev, bus);
+
+	return err;
+
+err_unmap_mmio:
+	iounmap(bus->mmio);
+	return err;
+}
+
+static int bcma_host_soc_remove(struct platform_device *pdev)
+{
+	struct bcma_bus *bus = platform_get_drvdata(pdev);
+
+	bcma_bus_unregister(bus);
+	iounmap(bus->mmio);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+
+static const struct of_device_id bcma_host_soc_of_match[] = {
+	{ .compatible = "brcm,bus-axi", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcma_host_soc_of_match);
+
+static struct platform_driver bcma_host_soc_driver = {
+	.driver = {
+		.name = "bcma-host-soc",
+		.of_match_table = bcma_host_soc_of_match,
+	},
+	.probe		= bcma_host_soc_probe,
+	.remove		= bcma_host_soc_remove,
+};
+
+int __init bcma_host_soc_register_driver(void)
+{
+	return platform_driver_register(&bcma_host_soc_driver);
+}
+
+void __exit bcma_host_soc_unregister_driver(void)
+{
+	platform_driver_unregister(&bcma_host_soc_driver);
+}
+#endif /* CONFIG_OF */
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
new file mode 100644
index 0000000..fc1f4ac
--- /dev/null
+++ b/drivers/bcma/main.c
@@ -0,0 +1,723 @@
+/*
+ * Broadcom specific AMBA
+ * Bus subsystem
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+#include <linux/module.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+#include <linux/bcma/bcma.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
+MODULE_LICENSE("GPL");
+
+/* contains the number the next bus should get. */
+static unsigned int bcma_bus_next_num = 0;
+
+/* bcma_buses_mutex locks the bcma_bus_next_num */
+static DEFINE_MUTEX(bcma_buses_mutex);
+
+static int bcma_bus_match(struct device *dev, struct device_driver *drv);
+static int bcma_device_probe(struct device *dev);
+static int bcma_device_remove(struct device *dev);
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env);
+
+static ssize_t manuf_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	return sprintf(buf, "0x%03X\n", core->id.manuf);
+}
+static DEVICE_ATTR_RO(manuf);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	return sprintf(buf, "0x%03X\n", core->id.id);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t rev_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	return sprintf(buf, "0x%02X\n", core->id.rev);
+}
+static DEVICE_ATTR_RO(rev);
+
+static ssize_t class_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	return sprintf(buf, "0x%X\n", core->id.class);
+}
+static DEVICE_ATTR_RO(class);
+
+static struct attribute *bcma_device_attrs[] = {
+	&dev_attr_manuf.attr,
+	&dev_attr_id.attr,
+	&dev_attr_rev.attr,
+	&dev_attr_class.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(bcma_device);
+
+static struct bus_type bcma_bus_type = {
+	.name		= "bcma",
+	.match		= bcma_bus_match,
+	.probe		= bcma_device_probe,
+	.remove		= bcma_device_remove,
+	.uevent		= bcma_device_uevent,
+	.dev_groups	= bcma_device_groups,
+};
+
+static u16 bcma_cc_core_id(struct bcma_bus *bus)
+{
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706)
+		return BCMA_CORE_4706_CHIPCOMMON;
+	return BCMA_CORE_CHIPCOMMON;
+}
+
+struct bcma_device *bcma_find_core_unit(struct bcma_bus *bus, u16 coreid,
+					u8 unit)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		if (core->id.id == coreid && core->core_unit == unit)
+			return core;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(bcma_find_core_unit);
+
+bool bcma_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value,
+		     int timeout)
+{
+	unsigned long deadline = jiffies + timeout;
+	u32 val;
+
+	do {
+		val = bcma_read32(core, reg);
+		if ((val & mask) == value)
+			return true;
+		cpu_relax();
+		udelay(10);
+	} while (!time_after_eq(jiffies, deadline));
+
+	bcma_warn(core->bus, "Timeout waiting for register 0x%04X!\n", reg);
+
+	return false;
+}
+
+static void bcma_release_core_dev(struct device *dev)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	if (core->io_addr)
+		iounmap(core->io_addr);
+	if (core->io_wrap)
+		iounmap(core->io_wrap);
+	kfree(core);
+}
+
+static bool bcma_is_core_needed_early(u16 core_id)
+{
+	switch (core_id) {
+	case BCMA_CORE_NS_NAND:
+	case BCMA_CORE_NS_QSPI:
+		return true;
+	}
+
+	return false;
+}
+
+static struct device_node *bcma_of_find_child_device(struct device *parent,
+						     struct bcma_device *core)
+{
+	struct device_node *node;
+	u64 size;
+	const __be32 *reg;
+
+	if (!parent->of_node)
+		return NULL;
+
+	for_each_child_of_node(parent->of_node, node) {
+		reg = of_get_address(node, 0, &size, NULL);
+		if (!reg)
+			continue;
+		if (of_translate_address(node, reg) == core->addr)
+			return node;
+	}
+	return NULL;
+}
+
+static int bcma_of_irq_parse(struct device *parent,
+			     struct bcma_device *core,
+			     struct of_phandle_args *out_irq, int num)
+{
+	__be32 laddr[1];
+	int rc;
+
+	if (core->dev.of_node) {
+		rc = of_irq_parse_one(core->dev.of_node, num, out_irq);
+		if (!rc)
+			return rc;
+	}
+
+	out_irq->np = parent->of_node;
+	out_irq->args_count = 1;
+	out_irq->args[0] = num;
+
+	laddr[0] = cpu_to_be32(core->addr);
+	return of_irq_parse_raw(laddr, out_irq);
+}
+
+static unsigned int bcma_of_get_irq(struct device *parent,
+				    struct bcma_device *core, int num)
+{
+	struct of_phandle_args out_irq;
+	int ret;
+
+	if (!IS_ENABLED(CONFIG_OF_IRQ) || !parent->of_node)
+		return 0;
+
+	ret = bcma_of_irq_parse(parent, core, &out_irq, num);
+	if (ret) {
+		bcma_debug(core->bus, "bcma_of_get_irq() failed with rc=%d\n",
+			   ret);
+		return 0;
+	}
+
+	return irq_create_of_mapping(&out_irq);
+}
+
+static void bcma_of_fill_device(struct device *parent,
+				struct bcma_device *core)
+{
+	struct device_node *node;
+
+	node = bcma_of_find_child_device(parent, core);
+	if (node)
+		core->dev.of_node = node;
+
+	core->irq = bcma_of_get_irq(parent, core, 0);
+
+	of_dma_configure(&core->dev, node, false);
+}
+
+unsigned int bcma_core_irq(struct bcma_device *core, int num)
+{
+	struct bcma_bus *bus = core->bus;
+	unsigned int mips_irq;
+
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		return bus->host_pci->irq;
+	case BCMA_HOSTTYPE_SOC:
+		if (bus->drv_mips.core && num == 0) {
+			mips_irq = bcma_core_mips_irq(core);
+			return mips_irq <= 4 ? mips_irq + 2 : 0;
+		}
+		if (bus->host_pdev)
+			return bcma_of_get_irq(&bus->host_pdev->dev, core, num);
+		return 0;
+	case BCMA_HOSTTYPE_SDIO:
+		return 0;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(bcma_core_irq);
+
+void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
+{
+	core->dev.release = bcma_release_core_dev;
+	core->dev.bus = &bcma_bus_type;
+	dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
+	core->dev.parent = bcma_bus_get_host_dev(bus);
+	if (core->dev.parent)
+		bcma_of_fill_device(core->dev.parent, core);
+
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		core->dma_dev = &bus->host_pci->dev;
+		core->irq = bus->host_pci->irq;
+		break;
+	case BCMA_HOSTTYPE_SOC:
+		if (IS_ENABLED(CONFIG_OF) && bus->host_pdev) {
+			core->dma_dev = &bus->host_pdev->dev;
+		} else {
+			core->dev.dma_mask = &core->dev.coherent_dma_mask;
+			core->dma_dev = &core->dev;
+		}
+		break;
+	case BCMA_HOSTTYPE_SDIO:
+		break;
+	}
+}
+
+struct device *bcma_bus_get_host_dev(struct bcma_bus *bus)
+{
+	switch (bus->hosttype) {
+	case BCMA_HOSTTYPE_PCI:
+		if (bus->host_pci)
+			return &bus->host_pci->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SOC:
+		if (bus->host_pdev)
+			return &bus->host_pdev->dev;
+		else
+			return NULL;
+	case BCMA_HOSTTYPE_SDIO:
+		if (bus->host_sdio)
+			return &bus->host_sdio->dev;
+		else
+			return NULL;
+	}
+	return NULL;
+}
+
+void bcma_init_bus(struct bcma_bus *bus)
+{
+	mutex_lock(&bcma_buses_mutex);
+	bus->num = bcma_bus_next_num++;
+	mutex_unlock(&bcma_buses_mutex);
+
+	INIT_LIST_HEAD(&bus->cores);
+	bus->nr_cores = 0;
+
+	bcma_detect_chip(bus);
+}
+
+static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
+{
+	int err;
+
+	err = device_register(&core->dev);
+	if (err) {
+		bcma_err(bus, "Could not register dev for core 0x%03X\n",
+			 core->id.id);
+		put_device(&core->dev);
+		return;
+	}
+	core->dev_registered = true;
+}
+
+static int bcma_register_devices(struct bcma_bus *bus)
+{
+	struct bcma_device *core;
+	int err;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		/* We support that cores ourself */
+		switch (core->id.id) {
+		case BCMA_CORE_4706_CHIPCOMMON:
+		case BCMA_CORE_CHIPCOMMON:
+		case BCMA_CORE_NS_CHIPCOMMON_B:
+		case BCMA_CORE_PCI:
+		case BCMA_CORE_PCIE:
+		case BCMA_CORE_PCIE2:
+		case BCMA_CORE_MIPS_74K:
+		case BCMA_CORE_4706_MAC_GBIT_COMMON:
+			continue;
+		}
+
+		/* Early cores were already registered */
+		if (bcma_is_core_needed_early(core->id.id))
+			continue;
+
+		/* Only first GMAC core on BCM4706 is connected and working */
+		if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+		    core->core_unit > 0)
+			continue;
+
+		bcma_register_core(bus, core);
+	}
+
+#ifdef CONFIG_BCMA_PFLASH
+	if (bus->drv_cc.pflash.present) {
+		err = platform_device_register(&bcma_pflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering parallel flash\n");
+	}
+#endif
+
+#ifdef CONFIG_BCMA_SFLASH
+	if (bus->drv_cc.sflash.present) {
+		err = platform_device_register(&bcma_sflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering serial flash\n");
+	}
+#endif
+
+#ifdef CONFIG_BCMA_NFLASH
+	if (bus->drv_cc.nflash.present) {
+		err = platform_device_register(&bcma_nflash_dev);
+		if (err)
+			bcma_err(bus, "Error registering NAND flash\n");
+	}
+#endif
+	err = bcma_gpio_init(&bus->drv_cc);
+	if (err == -ENOTSUPP)
+		bcma_debug(bus, "GPIO driver not activated\n");
+	else if (err)
+		bcma_err(bus, "Error registering GPIO driver: %i\n", err);
+
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+		err = bcma_chipco_watchdog_register(&bus->drv_cc);
+		if (err)
+			bcma_err(bus, "Error registering watchdog driver\n");
+	}
+
+	return 0;
+}
+
+void bcma_unregister_cores(struct bcma_bus *bus)
+{
+	struct bcma_device *core, *tmp;
+
+	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
+		if (!core->dev_registered)
+			continue;
+		list_del(&core->list);
+		device_unregister(&core->dev);
+	}
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+		platform_device_unregister(bus->drv_cc.watchdog);
+
+	/* Now noone uses internally-handled cores, we can free them */
+	list_for_each_entry_safe(core, tmp, &bus->cores, list) {
+		list_del(&core->list);
+		kfree(core);
+	}
+}
+
+int bcma_bus_register(struct bcma_bus *bus)
+{
+	int err;
+	struct bcma_device *core;
+	struct device *dev;
+
+	/* Scan for devices (cores) */
+	err = bcma_bus_scan(bus);
+	if (err) {
+		bcma_err(bus, "Failed to scan: %d\n", err);
+		return err;
+	}
+
+	/* Early init CC core */
+	core = bcma_find_core(bus, bcma_cc_core_id(bus));
+	if (core) {
+		bus->drv_cc.core = core;
+		bcma_core_chipcommon_early_init(&bus->drv_cc);
+	}
+
+	/* Early init PCIE core */
+	core = bcma_find_core(bus, BCMA_CORE_PCIE);
+	if (core) {
+		bus->drv_pci[0].core = core;
+		bcma_core_pci_early_init(&bus->drv_pci[0]);
+	}
+
+	dev = bcma_bus_get_host_dev(bus);
+	if (dev) {
+		of_platform_default_populate(dev->of_node, NULL, dev);
+	}
+
+	/* Cores providing flash access go before SPROM init */
+	list_for_each_entry(core, &bus->cores, list) {
+		if (bcma_is_core_needed_early(core->id.id))
+			bcma_register_core(bus, core);
+	}
+
+	/* Try to get SPROM */
+	err = bcma_sprom_get(bus);
+	if (err == -ENOENT) {
+		bcma_err(bus, "No SPROM available\n");
+	} else if (err)
+		bcma_err(bus, "Failed to get SPROM: %d\n", err);
+
+	/* Init CC core */
+	core = bcma_find_core(bus, bcma_cc_core_id(bus));
+	if (core) {
+		bus->drv_cc.core = core;
+		bcma_core_chipcommon_init(&bus->drv_cc);
+	}
+
+	/* Init CC core */
+	core = bcma_find_core(bus, BCMA_CORE_NS_CHIPCOMMON_B);
+	if (core) {
+		bus->drv_cc_b.core = core;
+		bcma_core_chipcommon_b_init(&bus->drv_cc_b);
+	}
+
+	/* Init MIPS core */
+	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+	if (core) {
+		bus->drv_mips.core = core;
+		bcma_core_mips_init(&bus->drv_mips);
+	}
+
+	/* Init PCIE core */
+	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 0);
+	if (core) {
+		bus->drv_pci[0].core = core;
+		bcma_core_pci_init(&bus->drv_pci[0]);
+	}
+
+	/* Init PCIE core */
+	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE, 1);
+	if (core) {
+		bus->drv_pci[1].core = core;
+		bcma_core_pci_init(&bus->drv_pci[1]);
+	}
+
+	/* Init PCIe Gen 2 core */
+	core = bcma_find_core_unit(bus, BCMA_CORE_PCIE2, 0);
+	if (core) {
+		bus->drv_pcie2.core = core;
+		bcma_core_pcie2_init(&bus->drv_pcie2);
+	}
+
+	/* Init GBIT MAC COMMON core */
+	core = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
+	if (core) {
+		bus->drv_gmac_cmn.core = core;
+		bcma_core_gmac_cmn_init(&bus->drv_gmac_cmn);
+	}
+
+	/* Register found cores */
+	bcma_register_devices(bus);
+
+	bcma_info(bus, "Bus registered\n");
+
+	return 0;
+}
+
+void bcma_bus_unregister(struct bcma_bus *bus)
+{
+	int err;
+
+	err = bcma_gpio_unregister(&bus->drv_cc);
+	if (err == -EBUSY)
+		bcma_err(bus, "Some GPIOs are still in use.\n");
+	else if (err)
+		bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
+
+	bcma_core_chipcommon_b_free(&bus->drv_cc_b);
+
+	bcma_unregister_cores(bus);
+}
+
+/*
+ * This is a special version of bus registration function designed for SoCs.
+ * It scans bus and performs basic initialization of main cores only.
+ * Please note it requires memory allocation, however it won't try to sleep.
+ */
+int __init bcma_bus_early_register(struct bcma_bus *bus)
+{
+	int err;
+	struct bcma_device *core;
+
+	/* Scan for devices (cores) */
+	err = bcma_bus_scan(bus);
+	if (err) {
+		bcma_err(bus, "Failed to scan bus: %d\n", err);
+		return -1;
+	}
+
+	/* Early init CC core */
+	core = bcma_find_core(bus, bcma_cc_core_id(bus));
+	if (core) {
+		bus->drv_cc.core = core;
+		bcma_core_chipcommon_early_init(&bus->drv_cc);
+	}
+
+	/* Early init MIPS core */
+	core = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
+	if (core) {
+		bus->drv_mips.core = core;
+		bcma_core_mips_early_init(&bus->drv_mips);
+	}
+
+	bcma_info(bus, "Early bus registered\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		struct device_driver *drv = core->dev.driver;
+		if (drv) {
+			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+			if (adrv->suspend)
+				adrv->suspend(core);
+		}
+	}
+	return 0;
+}
+
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+	struct bcma_device *core;
+
+	/* Init CC core */
+	if (bus->drv_cc.core) {
+		bus->drv_cc.setup_done = false;
+		bcma_core_chipcommon_init(&bus->drv_cc);
+	}
+
+	list_for_each_entry(core, &bus->cores, list) {
+		struct device_driver *drv = core->dev.driver;
+		if (drv) {
+			struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+			if (adrv->resume)
+				adrv->resume(core);
+		}
+	}
+
+	return 0;
+}
+#endif
+
+int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
+{
+	drv->drv.name = drv->name;
+	drv->drv.bus = &bcma_bus_type;
+	drv->drv.owner = owner;
+
+	return driver_register(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(__bcma_driver_register);
+
+void bcma_driver_unregister(struct bcma_driver *drv)
+{
+	driver_unregister(&drv->drv);
+}
+EXPORT_SYMBOL_GPL(bcma_driver_unregister);
+
+static int bcma_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+	const struct bcma_device_id *cid = &core->id;
+	const struct bcma_device_id *did;
+
+	for (did = adrv->id_table; did->manuf || did->id || did->rev; did++) {
+	    if ((did->manuf == cid->manuf || did->manuf == BCMA_ANY_MANUF) &&
+		(did->id == cid->id || did->id == BCMA_ANY_ID) &&
+		(did->rev == cid->rev || did->rev == BCMA_ANY_REV) &&
+		(did->class == cid->class || did->class == BCMA_ANY_CLASS))
+			return 1;
+	}
+	return 0;
+}
+
+static int bcma_device_probe(struct device *dev)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
+					       drv);
+	int err = 0;
+
+	get_device(dev);
+	if (adrv->probe)
+		err = adrv->probe(core);
+	if (err)
+		put_device(dev);
+
+	return err;
+}
+
+static int bcma_device_remove(struct device *dev)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+	struct bcma_driver *adrv = container_of(dev->driver, struct bcma_driver,
+					       drv);
+
+	if (adrv->remove)
+		adrv->remove(core);
+	put_device(dev);
+
+	return 0;
+}
+
+static int bcma_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct bcma_device *core = container_of(dev, struct bcma_device, dev);
+
+	return add_uevent_var(env,
+			      "MODALIAS=bcma:m%04Xid%04Xrev%02Xcl%02X",
+			      core->id.manuf, core->id.id,
+			      core->id.rev, core->id.class);
+}
+
+static unsigned int bcma_bus_registered;
+
+/*
+ * If built-in, bus has to be registered early, before any driver calls
+ * bcma_driver_register.
+ * Otherwise registering driver would trigger BUG in driver_register.
+ */
+static int __init bcma_init_bus_register(void)
+{
+	int err;
+
+	if (bcma_bus_registered)
+		return 0;
+
+	err = bus_register(&bcma_bus_type);
+	if (!err)
+		bcma_bus_registered = 1;
+
+	return err;
+}
+#ifndef MODULE
+fs_initcall(bcma_init_bus_register);
+#endif
+
+/* Main initialization has to be done with SPI/mtd/NAND/SPROM available */
+static int __init bcma_modinit(void)
+{
+	int err;
+
+	err = bcma_init_bus_register();
+	if (err)
+		return err;
+
+	err = bcma_host_soc_register_driver();
+	if (err) {
+		pr_err("SoC host initialization failed\n");
+		err = 0;
+	}
+#ifdef CONFIG_BCMA_HOST_PCI
+	err = bcma_host_pci_init();
+	if (err) {
+		pr_err("PCI host initialization failed\n");
+		err = 0;
+	}
+#endif
+
+	return err;
+}
+module_init(bcma_modinit);
+
+static void __exit bcma_modexit(void)
+{
+#ifdef CONFIG_BCMA_HOST_PCI
+	bcma_host_pci_exit();
+#endif
+	bcma_host_soc_unregister_driver();
+	bus_unregister(&bcma_bus_type);
+}
+module_exit(bcma_modexit)
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
new file mode 100644
index 0000000..4a2d1b2
--- /dev/null
+++ b/drivers/bcma/scan.c
@@ -0,0 +1,530 @@
+/*
+ * Broadcom specific AMBA
+ * Bus scanning
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "scan.h"
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+struct bcma_device_id_name {
+	u16 id;
+	const char *name;
+};
+
+static const struct bcma_device_id_name bcma_arm_device_names[] = {
+	{ BCMA_CORE_4706_MAC_GBIT_COMMON, "BCM4706 GBit MAC Common" },
+	{ BCMA_CORE_ARM_1176, "ARM 1176" },
+	{ BCMA_CORE_ARM_7TDMI, "ARM 7TDMI" },
+	{ BCMA_CORE_ARM_CM3, "ARM CM3" },
+};
+
+static const struct bcma_device_id_name bcma_bcm_device_names[] = {
+	{ BCMA_CORE_OOB_ROUTER, "OOB Router" },
+	{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
+	{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
+	{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+	{ BCMA_CORE_NS_PCIEG2, "PCIe Gen 2" },
+	{ BCMA_CORE_NS_DMA, "DMA" },
+	{ BCMA_CORE_NS_SDIO3, "SDIO3" },
+	{ BCMA_CORE_NS_USB20, "USB 2.0" },
+	{ BCMA_CORE_NS_USB30, "USB 3.0" },
+	{ BCMA_CORE_NS_A9JTAG, "ARM Cortex A9 JTAG" },
+	{ BCMA_CORE_NS_DDR23, "Denali DDR2/DDR3 memory controller" },
+	{ BCMA_CORE_NS_ROM, "ROM" },
+	{ BCMA_CORE_NS_NAND, "NAND flash controller" },
+	{ BCMA_CORE_NS_QSPI, "SPI flash controller" },
+	{ BCMA_CORE_NS_CHIPCOMMON_B, "Chipcommon B" },
+	{ BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
+	{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
+	{ BCMA_CORE_ALTA, "ALTA (I2S)" },
+	{ BCMA_CORE_INVALID, "Invalid" },
+	{ BCMA_CORE_CHIPCOMMON, "ChipCommon" },
+	{ BCMA_CORE_ILINE20, "ILine 20" },
+	{ BCMA_CORE_SRAM, "SRAM" },
+	{ BCMA_CORE_SDRAM, "SDRAM" },
+	{ BCMA_CORE_PCI, "PCI" },
+	{ BCMA_CORE_ETHERNET, "Fast Ethernet" },
+	{ BCMA_CORE_V90, "V90" },
+	{ BCMA_CORE_USB11_HOSTDEV, "USB 1.1 Hostdev" },
+	{ BCMA_CORE_ADSL, "ADSL" },
+	{ BCMA_CORE_ILINE100, "ILine 100" },
+	{ BCMA_CORE_IPSEC, "IPSEC" },
+	{ BCMA_CORE_UTOPIA, "UTOPIA" },
+	{ BCMA_CORE_PCMCIA, "PCMCIA" },
+	{ BCMA_CORE_INTERNAL_MEM, "Internal Memory" },
+	{ BCMA_CORE_MEMC_SDRAM, "MEMC SDRAM" },
+	{ BCMA_CORE_OFDM, "OFDM" },
+	{ BCMA_CORE_EXTIF, "EXTIF" },
+	{ BCMA_CORE_80211, "IEEE 802.11" },
+	{ BCMA_CORE_PHY_A, "PHY A" },
+	{ BCMA_CORE_PHY_B, "PHY B" },
+	{ BCMA_CORE_PHY_G, "PHY G" },
+	{ BCMA_CORE_USB11_HOST, "USB 1.1 Host" },
+	{ BCMA_CORE_USB11_DEV, "USB 1.1 Device" },
+	{ BCMA_CORE_USB20_HOST, "USB 2.0 Host" },
+	{ BCMA_CORE_USB20_DEV, "USB 2.0 Device" },
+	{ BCMA_CORE_SDIO_HOST, "SDIO Host" },
+	{ BCMA_CORE_ROBOSWITCH, "Roboswitch" },
+	{ BCMA_CORE_PARA_ATA, "PATA" },
+	{ BCMA_CORE_SATA_XORDMA, "SATA XOR-DMA" },
+	{ BCMA_CORE_ETHERNET_GBIT, "GBit Ethernet" },
+	{ BCMA_CORE_PCIE, "PCIe" },
+	{ BCMA_CORE_PHY_N, "PHY N" },
+	{ BCMA_CORE_SRAM_CTL, "SRAM Controller" },
+	{ BCMA_CORE_MINI_MACPHY, "Mini MACPHY" },
+	{ BCMA_CORE_PHY_LP, "PHY LP" },
+	{ BCMA_CORE_PMU, "PMU" },
+	{ BCMA_CORE_PHY_SSN, "PHY SSN" },
+	{ BCMA_CORE_SDIO_DEV, "SDIO Device" },
+	{ BCMA_CORE_PHY_HT, "PHY HT" },
+	{ BCMA_CORE_MAC_GBIT, "GBit MAC" },
+	{ BCMA_CORE_DDR12_MEM_CTL, "DDR1/DDR2 Memory Controller" },
+	{ BCMA_CORE_PCIE_RC, "PCIe Root Complex" },
+	{ BCMA_CORE_OCP_OCP_BRIDGE, "OCP to OCP Bridge" },
+	{ BCMA_CORE_SHARED_COMMON, "Common Shared" },
+	{ BCMA_CORE_OCP_AHB_BRIDGE, "OCP to AHB Bridge" },
+	{ BCMA_CORE_SPI_HOST, "SPI Host" },
+	{ BCMA_CORE_I2S, "I2S" },
+	{ BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" },
+	{ BCMA_CORE_SHIM, "SHIM" },
+	{ BCMA_CORE_PCIE2, "PCIe Gen2" },
+	{ BCMA_CORE_ARM_CR4, "ARM CR4" },
+	{ BCMA_CORE_GCI, "GCI" },
+	{ BCMA_CORE_CMEM, "CNDS DDR2/3 memory controller" },
+	{ BCMA_CORE_ARM_CA7, "ARM CA7" },
+	{ BCMA_CORE_DEFAULT, "Default" },
+};
+
+static const struct bcma_device_id_name bcma_mips_device_names[] = {
+	{ BCMA_CORE_MIPS, "MIPS" },
+	{ BCMA_CORE_MIPS_3302, "MIPS 3302" },
+	{ BCMA_CORE_MIPS_74K, "MIPS 74K" },
+};
+
+static const char *bcma_device_name(const struct bcma_device_id *id)
+{
+	const struct bcma_device_id_name *names;
+	int size, i;
+
+	/* search manufacturer specific names */
+	switch (id->manuf) {
+	case BCMA_MANUF_ARM:
+		names = bcma_arm_device_names;
+		size = ARRAY_SIZE(bcma_arm_device_names);
+		break;
+	case BCMA_MANUF_BCM:
+		names = bcma_bcm_device_names;
+		size = ARRAY_SIZE(bcma_bcm_device_names);
+		break;
+	case BCMA_MANUF_MIPS:
+		names = bcma_mips_device_names;
+		size = ARRAY_SIZE(bcma_mips_device_names);
+		break;
+	default:
+		return "UNKNOWN";
+	}
+
+	for (i = 0; i < size; i++) {
+		if (names[i].id == id->id)
+			return names[i].name;
+	}
+
+	return "UNKNOWN";
+}
+
+static u32 bcma_scan_read32(struct bcma_bus *bus, u8 current_coreidx,
+		       u16 offset)
+{
+	return readl(bus->mmio + offset);
+}
+
+static void bcma_scan_switch_core(struct bcma_bus *bus, u32 addr)
+{
+	if (bus->hosttype == BCMA_HOSTTYPE_PCI)
+		pci_write_config_dword(bus->host_pci, BCMA_PCI_BAR0_WIN,
+				       addr);
+}
+
+static u32 bcma_erom_get_ent(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent = readl(*eromptr);
+	(*eromptr)++;
+	return ent;
+}
+
+static void bcma_erom_push_ent(u32 __iomem **eromptr)
+{
+	(*eromptr)--;
+}
+
+static s32 bcma_erom_get_ci(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent = bcma_erom_get_ent(bus, eromptr);
+	if (!(ent & SCAN_ER_VALID))
+		return -ENOENT;
+	if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_CI)
+		return -ENOENT;
+	return ent;
+}
+
+static bool bcma_erom_is_end(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent = bcma_erom_get_ent(bus, eromptr);
+	bcma_erom_push_ent(eromptr);
+	return (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID));
+}
+
+static bool bcma_erom_is_bridge(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent = bcma_erom_get_ent(bus, eromptr);
+	bcma_erom_push_ent(eromptr);
+	return (((ent & SCAN_ER_VALID)) &&
+		((ent & SCAN_ER_TAGX) == SCAN_ER_TAG_ADDR) &&
+		((ent & SCAN_ADDR_TYPE) == SCAN_ADDR_TYPE_BRIDGE));
+}
+
+static void bcma_erom_skip_component(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent;
+	while (1) {
+		ent = bcma_erom_get_ent(bus, eromptr);
+		if ((ent & SCAN_ER_VALID) &&
+		    ((ent & SCAN_ER_TAG) == SCAN_ER_TAG_CI))
+			break;
+		if (ent == (SCAN_ER_TAG_END | SCAN_ER_VALID))
+			break;
+	}
+	bcma_erom_push_ent(eromptr);
+}
+
+static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
+{
+	u32 ent = bcma_erom_get_ent(bus, eromptr);
+	if (!(ent & SCAN_ER_VALID))
+		return -ENOENT;
+	if ((ent & SCAN_ER_TAG) != SCAN_ER_TAG_MP)
+		return -ENOENT;
+	return ent;
+}
+
+static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
+				  u32 type, u8 port)
+{
+	u32 addrl, addrh, sizel, sizeh = 0;
+	u32 size;
+
+	u32 ent = bcma_erom_get_ent(bus, eromptr);
+	if ((!(ent & SCAN_ER_VALID)) ||
+	    ((ent & SCAN_ER_TAGX) != SCAN_ER_TAG_ADDR) ||
+	    ((ent & SCAN_ADDR_TYPE) != type) ||
+	    (((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
+		bcma_erom_push_ent(eromptr);
+		return (u32)-EINVAL;
+	}
+
+	addrl = ent & SCAN_ADDR_ADDR;
+	if (ent & SCAN_ADDR_AG32)
+		addrh = bcma_erom_get_ent(bus, eromptr);
+	else
+		addrh = 0;
+
+	if ((ent & SCAN_ADDR_SZ) == SCAN_ADDR_SZ_SZD) {
+		size = bcma_erom_get_ent(bus, eromptr);
+		sizel = size & SCAN_SIZE_SZ;
+		if (size & SCAN_SIZE_SG32)
+			sizeh = bcma_erom_get_ent(bus, eromptr);
+	} else
+		sizel = SCAN_ADDR_SZ_BASE <<
+				((ent & SCAN_ADDR_SZ) >> SCAN_ADDR_SZ_SHIFT);
+
+	return addrl;
+}
+
+static struct bcma_device *bcma_find_core_by_index(struct bcma_bus *bus,
+						   u16 index)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry(core, &bus->cores, list) {
+		if (core->core_index == index)
+			return core;
+	}
+	return NULL;
+}
+
+static struct bcma_device *bcma_find_core_reverse(struct bcma_bus *bus, u16 coreid)
+{
+	struct bcma_device *core;
+
+	list_for_each_entry_reverse(core, &bus->cores, list) {
+		if (core->id.id == coreid)
+			return core;
+	}
+	return NULL;
+}
+
+#define IS_ERR_VALUE_U32(x) ((x) >= (u32)-MAX_ERRNO)
+
+static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
+			      struct bcma_device_id *match, int core_num,
+			      struct bcma_device *core)
+{
+	u32 tmp;
+	u8 i, j, k;
+	s32 cia, cib;
+	u8 ports[2], wrappers[2];
+
+	/* get CIs */
+	cia = bcma_erom_get_ci(bus, eromptr);
+	if (cia < 0) {
+		bcma_erom_push_ent(eromptr);
+		if (bcma_erom_is_end(bus, eromptr))
+			return -ESPIPE;
+		return -EILSEQ;
+	}
+	cib = bcma_erom_get_ci(bus, eromptr);
+	if (cib < 0)
+		return -EILSEQ;
+
+	/* parse CIs */
+	core->id.class = (cia & SCAN_CIA_CLASS) >> SCAN_CIA_CLASS_SHIFT;
+	core->id.id = (cia & SCAN_CIA_ID) >> SCAN_CIA_ID_SHIFT;
+	core->id.manuf = (cia & SCAN_CIA_MANUF) >> SCAN_CIA_MANUF_SHIFT;
+	ports[0] = (cib & SCAN_CIB_NMP) >> SCAN_CIB_NMP_SHIFT;
+	ports[1] = (cib & SCAN_CIB_NSP) >> SCAN_CIB_NSP_SHIFT;
+	wrappers[0] = (cib & SCAN_CIB_NMW) >> SCAN_CIB_NMW_SHIFT;
+	wrappers[1] = (cib & SCAN_CIB_NSW) >> SCAN_CIB_NSW_SHIFT;
+	core->id.rev = (cib & SCAN_CIB_REV) >> SCAN_CIB_REV_SHIFT;
+
+	if (((core->id.manuf == BCMA_MANUF_ARM) &&
+	     (core->id.id == 0xFFF)) ||
+	    (ports[1] == 0)) {
+		bcma_erom_skip_component(bus, eromptr);
+		return -ENXIO;
+	}
+
+	/* check if component is a core at all */
+	if (wrappers[0] + wrappers[1] == 0) {
+		/* Some specific cores don't need wrappers */
+		switch (core->id.id) {
+		case BCMA_CORE_4706_MAC_GBIT_COMMON:
+		case BCMA_CORE_NS_CHIPCOMMON_B:
+		case BCMA_CORE_PMU:
+		case BCMA_CORE_GCI:
+		/* Not used yet: case BCMA_CORE_OOB_ROUTER: */
+			break;
+		default:
+			bcma_erom_skip_component(bus, eromptr);
+			return -ENXIO;
+		}
+	}
+
+	if (bcma_erom_is_bridge(bus, eromptr)) {
+		bcma_erom_skip_component(bus, eromptr);
+		return -ENXIO;
+	}
+
+	if (bcma_find_core_by_index(bus, core_num)) {
+		bcma_erom_skip_component(bus, eromptr);
+		return -ENODEV;
+	}
+
+	if (match && ((match->manuf != BCMA_ANY_MANUF &&
+	      match->manuf != core->id.manuf) ||
+	     (match->id != BCMA_ANY_ID && match->id != core->id.id) ||
+	     (match->rev != BCMA_ANY_REV && match->rev != core->id.rev) ||
+	     (match->class != BCMA_ANY_CLASS && match->class != core->id.class)
+	    )) {
+		bcma_erom_skip_component(bus, eromptr);
+		return -ENODEV;
+	}
+
+	/* get & parse master ports */
+	for (i = 0; i < ports[0]; i++) {
+		s32 mst_port_d = bcma_erom_get_mst_port(bus, eromptr);
+		if (mst_port_d < 0)
+			return -EILSEQ;
+	}
+
+	/* First Slave Address Descriptor should be port 0:
+	 * the main register space for the core
+	 */
+	tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
+	if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
+		/* Try again to see if it is a bridge */
+		tmp = bcma_erom_get_addr_desc(bus, eromptr,
+					      SCAN_ADDR_TYPE_BRIDGE, 0);
+		if (tmp == 0 || IS_ERR_VALUE_U32(tmp)) {
+			return -EILSEQ;
+		} else {
+			bcma_info(bus, "Bridge found\n");
+			return -ENXIO;
+		}
+	}
+	core->addr = tmp;
+
+	/* get & parse slave ports */
+	k = 0;
+	for (i = 0; i < ports[1]; i++) {
+		for (j = 0; ; j++) {
+			tmp = bcma_erom_get_addr_desc(bus, eromptr,
+				SCAN_ADDR_TYPE_SLAVE, i);
+			if (IS_ERR_VALUE_U32(tmp)) {
+				/* no more entries for port _i_ */
+				/* pr_debug("erom: slave port %d "
+				 * "has %d descriptors\n", i, j); */
+				break;
+			} else if (k < ARRAY_SIZE(core->addr_s)) {
+				core->addr_s[k] = tmp;
+				k++;
+			}
+		}
+	}
+
+	/* get & parse master wrappers */
+	for (i = 0; i < wrappers[0]; i++) {
+		for (j = 0; ; j++) {
+			tmp = bcma_erom_get_addr_desc(bus, eromptr,
+				SCAN_ADDR_TYPE_MWRAP, i);
+			if (IS_ERR_VALUE_U32(tmp)) {
+				/* no more entries for port _i_ */
+				/* pr_debug("erom: master wrapper %d "
+				 * "has %d descriptors\n", i, j); */
+				break;
+			} else {
+				if (i == 0 && j == 0)
+					core->wrap = tmp;
+			}
+		}
+	}
+
+	/* get & parse slave wrappers */
+	for (i = 0; i < wrappers[1]; i++) {
+		u8 hack = (ports[1] == 1) ? 0 : 1;
+		for (j = 0; ; j++) {
+			tmp = bcma_erom_get_addr_desc(bus, eromptr,
+				SCAN_ADDR_TYPE_SWRAP, i + hack);
+			if (IS_ERR_VALUE_U32(tmp)) {
+				/* no more entries for port _i_ */
+				/* pr_debug("erom: master wrapper %d "
+				 * has %d descriptors\n", i, j); */
+				break;
+			} else {
+				if (wrappers[0] == 0 && !i && !j)
+					core->wrap = tmp;
+			}
+		}
+	}
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+		core->io_addr = ioremap_nocache(core->addr, BCMA_CORE_SIZE);
+		if (!core->io_addr)
+			return -ENOMEM;
+		if (core->wrap) {
+			core->io_wrap = ioremap_nocache(core->wrap,
+							BCMA_CORE_SIZE);
+			if (!core->io_wrap) {
+				iounmap(core->io_addr);
+				return -ENOMEM;
+			}
+		}
+	}
+	return 0;
+}
+
+void bcma_detect_chip(struct bcma_bus *bus)
+{
+	s32 tmp;
+	struct bcma_chipinfo *chipinfo = &(bus->chipinfo);
+	char chip_id[8];
+
+	bcma_scan_switch_core(bus, BCMA_ADDR_BASE);
+
+	tmp = bcma_scan_read32(bus, 0, BCMA_CC_ID);
+	chipinfo->id = (tmp & BCMA_CC_ID_ID) >> BCMA_CC_ID_ID_SHIFT;
+	chipinfo->rev = (tmp & BCMA_CC_ID_REV) >> BCMA_CC_ID_REV_SHIFT;
+	chipinfo->pkg = (tmp & BCMA_CC_ID_PKG) >> BCMA_CC_ID_PKG_SHIFT;
+
+	snprintf(chip_id, ARRAY_SIZE(chip_id),
+		 (chipinfo->id > 0x9999) ? "%d" : "0x%04X", chipinfo->id);
+	bcma_info(bus, "Found chip with id %s, rev 0x%02X and package 0x%02X\n",
+		  chip_id, chipinfo->rev, chipinfo->pkg);
+}
+
+int bcma_bus_scan(struct bcma_bus *bus)
+{
+	u32 erombase;
+	u32 __iomem *eromptr, *eromend;
+
+	int err, core_num = 0;
+
+	/* Skip if bus was already scanned (e.g. during early register) */
+	if (bus->nr_cores)
+		return 0;
+
+	erombase = bcma_scan_read32(bus, 0, BCMA_CC_EROM);
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC) {
+		eromptr = ioremap_nocache(erombase, BCMA_CORE_SIZE);
+		if (!eromptr)
+			return -ENOMEM;
+	} else {
+		eromptr = bus->mmio;
+	}
+
+	eromend = eromptr + BCMA_CORE_SIZE / sizeof(u32);
+
+	bcma_scan_switch_core(bus, erombase);
+
+	while (eromptr < eromend) {
+		struct bcma_device *other_core;
+		struct bcma_device *core = kzalloc(sizeof(*core), GFP_KERNEL);
+		if (!core) {
+			err = -ENOMEM;
+			goto out;
+		}
+		INIT_LIST_HEAD(&core->list);
+		core->bus = bus;
+
+		err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
+		if (err < 0) {
+			kfree(core);
+			if (err == -ENODEV) {
+				core_num++;
+				continue;
+			} else if (err == -ENXIO) {
+				continue;
+			} else if (err == -ESPIPE) {
+				break;
+			}
+			goto out;
+		}
+
+		core->core_index = core_num++;
+		bus->nr_cores++;
+		other_core = bcma_find_core_reverse(bus, core->id.id);
+		core->core_unit = (other_core == NULL) ? 0 : other_core->core_unit + 1;
+		bcma_prepare_core(bus, core);
+
+		bcma_info(bus, "Core %d found: %s (manuf 0x%03X, id 0x%03X, rev 0x%02X, class 0x%X)\n",
+			  core->core_index, bcma_device_name(&core->id),
+			  core->id.manuf, core->id.id, core->id.rev,
+			  core->id.class);
+
+		list_add_tail(&core->list, &bus->cores);
+	}
+
+	err = 0;
+out:
+	if (bus->hosttype == BCMA_HOSTTYPE_SOC)
+		iounmap(eromptr);
+
+	return err;
+}
diff --git a/drivers/bcma/scan.h b/drivers/bcma/scan.h
new file mode 100644
index 0000000..e53079a
--- /dev/null
+++ b/drivers/bcma/scan.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef BCMA_SCAN_H_
+#define BCMA_SCAN_H_
+
+#define BCMA_ADDR_BASE		0x18000000
+#define BCMA_WRAP_BASE		0x18100000
+
+#define SCAN_ER_VALID		0x00000001
+#define SCAN_ER_TAGX		0x00000006 /* we have to ignore 0x8 bit when checking tag for SCAN_ER_TAG_ADDR */
+#define SCAN_ER_TAG		0x0000000E
+#define  SCAN_ER_TAG_CI		0x00000000
+#define  SCAN_ER_TAG_MP		0x00000002
+#define  SCAN_ER_TAG_ADDR	0x00000004
+#define  SCAN_ER_TAG_END	0x0000000E
+#define SCAN_ER_BAD		0xFFFFFFFF
+
+#define SCAN_CIA_CLASS		0x000000F0
+#define SCAN_CIA_CLASS_SHIFT	4
+#define SCAN_CIA_ID		0x000FFF00
+#define SCAN_CIA_ID_SHIFT	8
+#define SCAN_CIA_MANUF		0xFFF00000
+#define SCAN_CIA_MANUF_SHIFT	20
+
+#define SCAN_CIB_NMP		0x000001F0
+#define SCAN_CIB_NMP_SHIFT	4
+#define SCAN_CIB_NSP		0x00003E00
+#define SCAN_CIB_NSP_SHIFT	9
+#define SCAN_CIB_NMW		0x0007C000
+#define SCAN_CIB_NMW_SHIFT	14
+#define SCAN_CIB_NSW		0x00F80000
+#define SCAN_CIB_NSW_SHIFT	19
+#define SCAN_CIB_REV		0xFF000000
+#define SCAN_CIB_REV_SHIFT	24
+
+#define SCAN_ADDR_AG32		0x00000008
+#define SCAN_ADDR_SZ		0x00000030
+#define SCAN_ADDR_SZ_SHIFT	4
+#define  SCAN_ADDR_SZ_4K	0x00000000
+#define  SCAN_ADDR_SZ_8K	0x00000010
+#define  SCAN_ADDR_SZ_16K	0x00000020
+#define  SCAN_ADDR_SZ_SZD	0x00000030
+#define SCAN_ADDR_TYPE		0x000000C0
+#define  SCAN_ADDR_TYPE_SLAVE	0x00000000
+#define  SCAN_ADDR_TYPE_BRIDGE	0x00000040
+#define  SCAN_ADDR_TYPE_SWRAP	0x00000080
+#define  SCAN_ADDR_TYPE_MWRAP	0x000000C0
+#define SCAN_ADDR_PORT		0x00000F00
+#define SCAN_ADDR_PORT_SHIFT	8
+#define SCAN_ADDR_ADDR		0xFFFFF000
+
+#define SCAN_ADDR_SZ_BASE	0x00001000	/* 4KB */
+
+#define SCAN_SIZE_SZ_ALIGN	0x00000FFF
+#define SCAN_SIZE_SZ		0xFFFFF000
+#define SCAN_SIZE_SG32		0x00000008
+
+#endif /* BCMA_SCAN_H_ */
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
new file mode 100644
index 0000000..206edd3
--- /dev/null
+++ b/drivers/bcma/sprom.c
@@ -0,0 +1,646 @@
+/*
+ * Broadcom specific AMBA
+ * SPROM reading
+ *
+ * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include "bcma_private.h"
+
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+static int(*get_fallback_sprom)(struct bcma_bus *dev, struct ssb_sprom *out);
+
+/**
+ * bcma_arch_register_fallback_sprom - Registers a method providing a
+ * fallback SPROM if no SPROM is found.
+ *
+ * @sprom_callback: The callback function.
+ *
+ * With this function the architecture implementation may register a
+ * callback handler which fills the SPROM data structure. The fallback is
+ * used for PCI based BCMA devices, where no valid SPROM can be found
+ * in the shadow registers and to provide the SPROM for SoCs where BCMA is
+ * to controll the system bus.
+ *
+ * This function is useful for weird architectures that have a half-assed
+ * BCMA device hardwired to their PCI bus.
+ *
+ * This function is available for architecture code, only. So it is not
+ * exported.
+ */
+int bcma_arch_register_fallback_sprom(int (*sprom_callback)(struct bcma_bus *bus,
+				     struct ssb_sprom *out))
+{
+	if (get_fallback_sprom)
+		return -EEXIST;
+	get_fallback_sprom = sprom_callback;
+
+	return 0;
+}
+
+static int bcma_fill_sprom_with_fallback(struct bcma_bus *bus,
+					 struct ssb_sprom *out)
+{
+	int err;
+
+	if (!get_fallback_sprom) {
+		err = -ENOENT;
+		goto fail;
+	}
+
+	err = get_fallback_sprom(bus, out);
+	if (err)
+		goto fail;
+
+	bcma_debug(bus, "Using SPROM revision %d provided by platform.\n",
+		   bus->sprom.revision);
+	return 0;
+fail:
+	bcma_warn(bus, "Using fallback SPROM failed (err %d)\n", err);
+	return err;
+}
+
+/**************************************************
+ * R/W ops.
+ **************************************************/
+
+static void bcma_sprom_read(struct bcma_bus *bus, u16 offset, u16 *sprom,
+			    size_t words)
+{
+	int i;
+	for (i = 0; i < words; i++)
+		sprom[i] = bcma_read16(bus->drv_cc.core, offset + (i * 2));
+}
+
+/**************************************************
+ * Validation.
+ **************************************************/
+
+static inline u8 bcma_crc8(u8 crc, u8 data)
+{
+	/* Polynomial:   x^8 + x^7 + x^6 + x^4 + x^2 + 1   */
+	static const u8 t[] = {
+		0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
+		0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
+		0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
+		0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
+		0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
+		0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
+		0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
+		0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
+		0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
+		0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
+		0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
+		0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
+		0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
+		0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
+		0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
+		0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
+		0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
+		0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
+		0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
+		0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
+		0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
+		0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
+		0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
+		0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
+		0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
+		0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
+		0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
+		0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
+		0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
+		0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
+		0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
+		0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
+	};
+	return t[crc ^ data];
+}
+
+static u8 bcma_sprom_crc(const u16 *sprom, size_t words)
+{
+	int word;
+	u8 crc = 0xFF;
+
+	for (word = 0; word < words - 1; word++) {
+		crc = bcma_crc8(crc, sprom[word] & 0x00FF);
+		crc = bcma_crc8(crc, (sprom[word] & 0xFF00) >> 8);
+	}
+	crc = bcma_crc8(crc, sprom[words - 1] & 0x00FF);
+	crc ^= 0xFF;
+
+	return crc;
+}
+
+static int bcma_sprom_check_crc(const u16 *sprom, size_t words)
+{
+	u8 crc;
+	u8 expected_crc;
+	u16 tmp;
+
+	crc = bcma_sprom_crc(sprom, words);
+	tmp = sprom[words - 1] & SSB_SPROM_REVISION_CRC;
+	expected_crc = tmp >> SSB_SPROM_REVISION_CRC_SHIFT;
+	if (crc != expected_crc)
+		return -EPROTO;
+
+	return 0;
+}
+
+static int bcma_sprom_valid(struct bcma_bus *bus, const u16 *sprom,
+			    size_t words)
+{
+	u16 revision;
+	int err;
+
+	err = bcma_sprom_check_crc(sprom, words);
+	if (err)
+		return err;
+
+	revision = sprom[words - 1] & SSB_SPROM_REVISION_REV;
+	if (revision != 8 && revision != 9 && revision != 10) {
+		pr_err("Unsupported SPROM revision: %d\n", revision);
+		return -ENOENT;
+	}
+
+	bus->sprom.revision = revision;
+	bcma_debug(bus, "Found SPROM revision %d\n", revision);
+
+	return 0;
+}
+
+/**************************************************
+ * SPROM extraction.
+ **************************************************/
+
+#define SPOFF(offset)	((offset) / sizeof(u16))
+
+#define SPEX(_field, _offset, _mask, _shift)	\
+	bus->sprom._field = ((sprom[SPOFF(_offset)] & (_mask)) >> (_shift))
+
+#define SPEX32(_field, _offset, _mask, _shift)	\
+	bus->sprom._field = ((((u32)sprom[SPOFF((_offset)+2)] << 16 | \
+				sprom[SPOFF(_offset)]) & (_mask)) >> (_shift))
+
+#define SPEX_ARRAY8(_field, _offset, _mask, _shift)	\
+	do {	\
+		SPEX(_field[0], _offset +  0, _mask, _shift);	\
+		SPEX(_field[1], _offset +  2, _mask, _shift);	\
+		SPEX(_field[2], _offset +  4, _mask, _shift);	\
+		SPEX(_field[3], _offset +  6, _mask, _shift);	\
+		SPEX(_field[4], _offset +  8, _mask, _shift);	\
+		SPEX(_field[5], _offset + 10, _mask, _shift);	\
+		SPEX(_field[6], _offset + 12, _mask, _shift);	\
+		SPEX(_field[7], _offset + 14, _mask, _shift);	\
+	} while (0)
+
+static s8 sprom_extract_antgain(const u16 *in, u16 offset, u16 mask, u16 shift)
+{
+	u16 v;
+	u8 gain;
+
+	v = in[SPOFF(offset)];
+	gain = (v & mask) >> shift;
+	if (gain == 0xFF) {
+		gain = 8; /* If unset use 2dBm */
+	} else {
+		/* Q5.2 Fractional part is stored in 0xC0 */
+		gain = ((gain & 0xC0) >> 6) | ((gain & 0x3F) << 2);
+	}
+
+	return (s8)gain;
+}
+
+static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
+{
+	u16 v, o;
+	int i;
+	u16 pwr_info_offset[] = {
+		SSB_SROM8_PWR_INFO_CORE0, SSB_SROM8_PWR_INFO_CORE1,
+		SSB_SROM8_PWR_INFO_CORE2, SSB_SROM8_PWR_INFO_CORE3
+	};
+	BUILD_BUG_ON(ARRAY_SIZE(pwr_info_offset) !=
+			ARRAY_SIZE(bus->sprom.core_pwr_info));
+
+	for (i = 0; i < 3; i++) {
+		v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
+		*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
+	}
+
+	SPEX(board_rev, SSB_SPROM8_BOARDREV, ~0, 0);
+	SPEX(board_type, SSB_SPROM1_SPID, ~0, 0);
+
+	SPEX(txpid2g[0], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G0,
+	     SSB_SPROM4_TXPID2G0_SHIFT);
+	SPEX(txpid2g[1], SSB_SPROM4_TXPID2G01, SSB_SPROM4_TXPID2G1,
+	     SSB_SPROM4_TXPID2G1_SHIFT);
+	SPEX(txpid2g[2], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G2,
+	     SSB_SPROM4_TXPID2G2_SHIFT);
+	SPEX(txpid2g[3], SSB_SPROM4_TXPID2G23, SSB_SPROM4_TXPID2G3,
+	     SSB_SPROM4_TXPID2G3_SHIFT);
+
+	SPEX(txpid5gl[0], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL0,
+	     SSB_SPROM4_TXPID5GL0_SHIFT);
+	SPEX(txpid5gl[1], SSB_SPROM4_TXPID5GL01, SSB_SPROM4_TXPID5GL1,
+	     SSB_SPROM4_TXPID5GL1_SHIFT);
+	SPEX(txpid5gl[2], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL2,
+	     SSB_SPROM4_TXPID5GL2_SHIFT);
+	SPEX(txpid5gl[3], SSB_SPROM4_TXPID5GL23, SSB_SPROM4_TXPID5GL3,
+	     SSB_SPROM4_TXPID5GL3_SHIFT);
+
+	SPEX(txpid5g[0], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G0,
+	     SSB_SPROM4_TXPID5G0_SHIFT);
+	SPEX(txpid5g[1], SSB_SPROM4_TXPID5G01, SSB_SPROM4_TXPID5G1,
+	     SSB_SPROM4_TXPID5G1_SHIFT);
+	SPEX(txpid5g[2], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G2,
+	     SSB_SPROM4_TXPID5G2_SHIFT);
+	SPEX(txpid5g[3], SSB_SPROM4_TXPID5G23, SSB_SPROM4_TXPID5G3,
+	     SSB_SPROM4_TXPID5G3_SHIFT);
+
+	SPEX(txpid5gh[0], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH0,
+	     SSB_SPROM4_TXPID5GH0_SHIFT);
+	SPEX(txpid5gh[1], SSB_SPROM4_TXPID5GH01, SSB_SPROM4_TXPID5GH1,
+	     SSB_SPROM4_TXPID5GH1_SHIFT);
+	SPEX(txpid5gh[2], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH2,
+	     SSB_SPROM4_TXPID5GH2_SHIFT);
+	SPEX(txpid5gh[3], SSB_SPROM4_TXPID5GH23, SSB_SPROM4_TXPID5GH3,
+	     SSB_SPROM4_TXPID5GH3_SHIFT);
+
+	SPEX(boardflags_lo, SSB_SPROM8_BFLLO, ~0, 0);
+	SPEX(boardflags_hi, SSB_SPROM8_BFLHI, ~0, 0);
+	SPEX(boardflags2_lo, SSB_SPROM8_BFL2LO, ~0, 0);
+	SPEX(boardflags2_hi, SSB_SPROM8_BFL2HI, ~0, 0);
+
+	SPEX(alpha2[0], SSB_SPROM8_CCODE, 0xff00, 8);
+	SPEX(alpha2[1], SSB_SPROM8_CCODE, 0x00ff, 0);
+
+	/* Extract cores power info info */
+	for (i = 0; i < ARRAY_SIZE(pwr_info_offset); i++) {
+		o = pwr_info_offset[i];
+		SPEX(core_pwr_info[i].itssi_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
+			SSB_SPROM8_2G_ITSSI, SSB_SPROM8_2G_ITSSI_SHIFT);
+		SPEX(core_pwr_info[i].maxpwr_2g, o + SSB_SROM8_2G_MAXP_ITSSI,
+			SSB_SPROM8_2G_MAXP, 0);
+
+		SPEX(core_pwr_info[i].pa_2g[0], o + SSB_SROM8_2G_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_2g[1], o + SSB_SROM8_2G_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_2g[2], o + SSB_SROM8_2G_PA_2, ~0, 0);
+
+		SPEX(core_pwr_info[i].itssi_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
+			SSB_SPROM8_5G_ITSSI, SSB_SPROM8_5G_ITSSI_SHIFT);
+		SPEX(core_pwr_info[i].maxpwr_5g, o + SSB_SROM8_5G_MAXP_ITSSI,
+			SSB_SPROM8_5G_MAXP, 0);
+		SPEX(core_pwr_info[i].maxpwr_5gh, o + SSB_SPROM8_5GHL_MAXP,
+			SSB_SPROM8_5GH_MAXP, 0);
+		SPEX(core_pwr_info[i].maxpwr_5gl, o + SSB_SPROM8_5GHL_MAXP,
+			SSB_SPROM8_5GL_MAXP, SSB_SPROM8_5GL_MAXP_SHIFT);
+
+		SPEX(core_pwr_info[i].pa_5gl[0], o + SSB_SROM8_5GL_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gl[1], o + SSB_SROM8_5GL_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gl[2], o + SSB_SROM8_5GL_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[0], o + SSB_SROM8_5G_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[1], o + SSB_SROM8_5G_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5g[2], o + SSB_SROM8_5G_PA_2, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[0], o + SSB_SROM8_5GH_PA_0, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[1], o + SSB_SROM8_5GH_PA_1, ~0, 0);
+		SPEX(core_pwr_info[i].pa_5gh[2], o + SSB_SROM8_5GH_PA_2, ~0, 0);
+	}
+
+	SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TSSIPOS,
+	     SSB_SROM8_FEM_TSSIPOS_SHIFT);
+	SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_EXTPA_GAIN,
+	     SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+	SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_PDET_RANGE,
+	     SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+	SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_TR_ISO,
+	     SSB_SROM8_FEM_TR_ISO_SHIFT);
+	SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G, SSB_SROM8_FEM_ANTSWLUT,
+	     SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+	SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TSSIPOS,
+	     SSB_SROM8_FEM_TSSIPOS_SHIFT);
+	SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_EXTPA_GAIN,
+	     SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+	SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_PDET_RANGE,
+	     SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+	SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_TR_ISO,
+	     SSB_SROM8_FEM_TR_ISO_SHIFT);
+	SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G, SSB_SROM8_FEM_ANTSWLUT,
+	     SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+	SPEX(ant_available_a, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_A,
+	     SSB_SPROM8_ANTAVAIL_A_SHIFT);
+	SPEX(ant_available_bg, SSB_SPROM8_ANTAVAIL, SSB_SPROM8_ANTAVAIL_BG,
+	     SSB_SPROM8_ANTAVAIL_BG_SHIFT);
+	SPEX(maxpwr_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_MAXP_BG_MASK, 0);
+	SPEX(itssi_bg, SSB_SPROM8_MAXP_BG, SSB_SPROM8_ITSSI_BG,
+	     SSB_SPROM8_ITSSI_BG_SHIFT);
+	SPEX(maxpwr_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_MAXP_A_MASK, 0);
+	SPEX(itssi_a, SSB_SPROM8_MAXP_A, SSB_SPROM8_ITSSI_A,
+	     SSB_SPROM8_ITSSI_A_SHIFT);
+	SPEX(maxpwr_ah, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AH_MASK, 0);
+	SPEX(maxpwr_al, SSB_SPROM8_MAXP_AHL, SSB_SPROM8_MAXP_AL_MASK,
+	     SSB_SPROM8_MAXP_AL_SHIFT);
+	SPEX(gpio0, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P0, 0);
+	SPEX(gpio1, SSB_SPROM8_GPIOA, SSB_SPROM8_GPIOA_P1,
+	     SSB_SPROM8_GPIOA_P1_SHIFT);
+	SPEX(gpio2, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P2, 0);
+	SPEX(gpio3, SSB_SPROM8_GPIOB, SSB_SPROM8_GPIOB_P3,
+	     SSB_SPROM8_GPIOB_P3_SHIFT);
+	SPEX(tri2g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI2G, 0);
+	SPEX(tri5g, SSB_SPROM8_TRI25G, SSB_SPROM8_TRI5G,
+	     SSB_SPROM8_TRI5G_SHIFT);
+	SPEX(tri5gl, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GL, 0);
+	SPEX(tri5gh, SSB_SPROM8_TRI5GHL, SSB_SPROM8_TRI5GH,
+	     SSB_SPROM8_TRI5GH_SHIFT);
+	SPEX(rxpo2g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO2G,
+	     SSB_SPROM8_RXPO2G_SHIFT);
+	SPEX(rxpo5g, SSB_SPROM8_RXPO, SSB_SPROM8_RXPO5G,
+	     SSB_SPROM8_RXPO5G_SHIFT);
+	SPEX(rssismf2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMF2G, 0);
+	SPEX(rssismc2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISMC2G,
+	     SSB_SPROM8_RSSISMC2G_SHIFT);
+	SPEX(rssisav2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_RSSISAV2G,
+	     SSB_SPROM8_RSSISAV2G_SHIFT);
+	SPEX(bxa2g, SSB_SPROM8_RSSIPARM2G, SSB_SPROM8_BXA2G,
+	     SSB_SPROM8_BXA2G_SHIFT);
+	SPEX(rssismf5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMF5G, 0);
+	SPEX(rssismc5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISMC5G,
+	     SSB_SPROM8_RSSISMC5G_SHIFT);
+	SPEX(rssisav5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_RSSISAV5G,
+	     SSB_SPROM8_RSSISAV5G_SHIFT);
+	SPEX(bxa5g, SSB_SPROM8_RSSIPARM5G, SSB_SPROM8_BXA5G,
+	     SSB_SPROM8_BXA5G_SHIFT);
+
+	SPEX(pa0b0, SSB_SPROM8_PA0B0, ~0, 0);
+	SPEX(pa0b1, SSB_SPROM8_PA0B1, ~0, 0);
+	SPEX(pa0b2, SSB_SPROM8_PA0B2, ~0, 0);
+	SPEX(pa1b0, SSB_SPROM8_PA1B0, ~0, 0);
+	SPEX(pa1b1, SSB_SPROM8_PA1B1, ~0, 0);
+	SPEX(pa1b2, SSB_SPROM8_PA1B2, ~0, 0);
+	SPEX(pa1lob0, SSB_SPROM8_PA1LOB0, ~0, 0);
+	SPEX(pa1lob1, SSB_SPROM8_PA1LOB1, ~0, 0);
+	SPEX(pa1lob2, SSB_SPROM8_PA1LOB2, ~0, 0);
+	SPEX(pa1hib0, SSB_SPROM8_PA1HIB0, ~0, 0);
+	SPEX(pa1hib1, SSB_SPROM8_PA1HIB1, ~0, 0);
+	SPEX(pa1hib2, SSB_SPROM8_PA1HIB2, ~0, 0);
+	SPEX(cck2gpo, SSB_SPROM8_CCK2GPO, ~0, 0);
+	SPEX32(ofdm2gpo, SSB_SPROM8_OFDM2GPO, ~0, 0);
+	SPEX32(ofdm5glpo, SSB_SPROM8_OFDM5GLPO, ~0, 0);
+	SPEX32(ofdm5gpo, SSB_SPROM8_OFDM5GPO, ~0, 0);
+	SPEX32(ofdm5ghpo, SSB_SPROM8_OFDM5GHPO, ~0, 0);
+
+	/* Extract the antenna gain values. */
+	bus->sprom.antenna_gain.a0 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN01,
+							   SSB_SPROM8_AGAIN0,
+							   SSB_SPROM8_AGAIN0_SHIFT);
+	bus->sprom.antenna_gain.a1 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN01,
+							   SSB_SPROM8_AGAIN1,
+							   SSB_SPROM8_AGAIN1_SHIFT);
+	bus->sprom.antenna_gain.a2 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN23,
+							   SSB_SPROM8_AGAIN2,
+							   SSB_SPROM8_AGAIN2_SHIFT);
+	bus->sprom.antenna_gain.a3 = sprom_extract_antgain(sprom,
+							   SSB_SPROM8_AGAIN23,
+							   SSB_SPROM8_AGAIN3,
+							   SSB_SPROM8_AGAIN3_SHIFT);
+
+	SPEX(leddc_on_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_ON,
+	     SSB_SPROM8_LEDDC_ON_SHIFT);
+	SPEX(leddc_off_time, SSB_SPROM8_LEDDC, SSB_SPROM8_LEDDC_OFF,
+	     SSB_SPROM8_LEDDC_OFF_SHIFT);
+
+	SPEX(txchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_TXCHAIN,
+	     SSB_SPROM8_TXRXC_TXCHAIN_SHIFT);
+	SPEX(rxchain, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_RXCHAIN,
+	     SSB_SPROM8_TXRXC_RXCHAIN_SHIFT);
+	SPEX(antswitch, SSB_SPROM8_TXRXC, SSB_SPROM8_TXRXC_SWITCH,
+	     SSB_SPROM8_TXRXC_SWITCH_SHIFT);
+
+	SPEX(opo, SSB_SPROM8_OFDM2GPO, 0x00ff, 0);
+
+	SPEX_ARRAY8(mcs2gpo, SSB_SPROM8_2G_MCSPO, ~0, 0);
+	SPEX_ARRAY8(mcs5gpo, SSB_SPROM8_5G_MCSPO, ~0, 0);
+	SPEX_ARRAY8(mcs5glpo, SSB_SPROM8_5GL_MCSPO, ~0, 0);
+	SPEX_ARRAY8(mcs5ghpo, SSB_SPROM8_5GH_MCSPO, ~0, 0);
+
+	SPEX(rawtempsense, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_RAWTEMP,
+	     SSB_SPROM8_RAWTS_RAWTEMP_SHIFT);
+	SPEX(measpower, SSB_SPROM8_RAWTS, SSB_SPROM8_RAWTS_MEASPOWER,
+	     SSB_SPROM8_RAWTS_MEASPOWER_SHIFT);
+	SPEX(tempsense_slope, SSB_SPROM8_OPT_CORRX,
+	     SSB_SPROM8_OPT_CORRX_TEMP_SLOPE,
+	     SSB_SPROM8_OPT_CORRX_TEMP_SLOPE_SHIFT);
+	SPEX(tempcorrx, SSB_SPROM8_OPT_CORRX, SSB_SPROM8_OPT_CORRX_TEMPCORRX,
+	     SSB_SPROM8_OPT_CORRX_TEMPCORRX_SHIFT);
+	SPEX(tempsense_option, SSB_SPROM8_OPT_CORRX,
+	     SSB_SPROM8_OPT_CORRX_TEMP_OPTION,
+	     SSB_SPROM8_OPT_CORRX_TEMP_OPTION_SHIFT);
+	SPEX(freqoffset_corr, SSB_SPROM8_HWIQ_IQSWP,
+	     SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR,
+	     SSB_SPROM8_HWIQ_IQSWP_FREQ_CORR_SHIFT);
+	SPEX(iqcal_swp_dis, SSB_SPROM8_HWIQ_IQSWP,
+	     SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP,
+	     SSB_SPROM8_HWIQ_IQSWP_IQCAL_SWP_SHIFT);
+	SPEX(hw_iqcal_en, SSB_SPROM8_HWIQ_IQSWP, SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL,
+	     SSB_SPROM8_HWIQ_IQSWP_HW_IQCAL_SHIFT);
+
+	SPEX(bw40po, SSB_SPROM8_BW40PO, ~0, 0);
+	SPEX(cddpo, SSB_SPROM8_CDDPO, ~0, 0);
+	SPEX(stbcpo, SSB_SPROM8_STBCPO, ~0, 0);
+	SPEX(bwduppo, SSB_SPROM8_BWDUPPO, ~0, 0);
+
+	SPEX(tempthresh, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_TRESH,
+	     SSB_SPROM8_THERMAL_TRESH_SHIFT);
+	SPEX(tempoffset, SSB_SPROM8_THERMAL, SSB_SPROM8_THERMAL_OFFSET,
+	     SSB_SPROM8_THERMAL_OFFSET_SHIFT);
+	SPEX(phycal_tempdelta, SSB_SPROM8_TEMPDELTA,
+	     SSB_SPROM8_TEMPDELTA_PHYCAL,
+	     SSB_SPROM8_TEMPDELTA_PHYCAL_SHIFT);
+	SPEX(temps_period, SSB_SPROM8_TEMPDELTA, SSB_SPROM8_TEMPDELTA_PERIOD,
+	     SSB_SPROM8_TEMPDELTA_PERIOD_SHIFT);
+	SPEX(temps_hysteresis, SSB_SPROM8_TEMPDELTA,
+	     SSB_SPROM8_TEMPDELTA_HYSTERESIS,
+	     SSB_SPROM8_TEMPDELTA_HYSTERESIS_SHIFT);
+}
+
+/*
+ * Indicates the presence of external SPROM.
+ */
+static bool bcma_sprom_ext_available(struct bcma_bus *bus)
+{
+	u32 chip_status;
+	u32 srom_control;
+	u32 present_mask;
+
+	if (bus->drv_cc.core->id.rev >= 31) {
+		if (!(bus->drv_cc.capabilities & BCMA_CC_CAP_SPROM))
+			return false;
+
+		srom_control = bcma_read32(bus->drv_cc.core,
+					   BCMA_CC_SROM_CONTROL);
+		return srom_control & BCMA_CC_SROM_CONTROL_PRESENT;
+	}
+
+	/* older chipcommon revisions use chip status register */
+	chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4313:
+		present_mask = BCMA_CC_CHIPST_4313_SPROM_PRESENT;
+		break;
+
+	case BCMA_CHIP_ID_BCM4331:
+		present_mask = BCMA_CC_CHIPST_4331_SPROM_PRESENT;
+		break;
+
+	default:
+		return true;
+	}
+
+	return chip_status & present_mask;
+}
+
+/*
+ * Indicates that on-chip OTP memory is present and enabled.
+ */
+static bool bcma_sprom_onchip_available(struct bcma_bus *bus)
+{
+	u32 chip_status;
+	u32 otpsize = 0;
+	bool present;
+
+	chip_status = bcma_read32(bus->drv_cc.core, BCMA_CC_CHIPSTAT);
+	switch (bus->chipinfo.id) {
+	case BCMA_CHIP_ID_BCM4313:
+		present = chip_status & BCMA_CC_CHIPST_4313_OTP_PRESENT;
+		break;
+
+	case BCMA_CHIP_ID_BCM4331:
+		present = chip_status & BCMA_CC_CHIPST_4331_OTP_PRESENT;
+		break;
+	case BCMA_CHIP_ID_BCM43142:
+	case BCMA_CHIP_ID_BCM43224:
+	case BCMA_CHIP_ID_BCM43225:
+		/* for these chips OTP is always available */
+		present = true;
+		break;
+	case BCMA_CHIP_ID_BCM43131:
+	case BCMA_CHIP_ID_BCM43217:
+	case BCMA_CHIP_ID_BCM43227:
+	case BCMA_CHIP_ID_BCM43228:
+	case BCMA_CHIP_ID_BCM43428:
+		present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT;
+		break;
+	default:
+		present = false;
+		break;
+	}
+
+	if (present) {
+		otpsize = bus->drv_cc.capabilities & BCMA_CC_CAP_OTPS;
+		otpsize >>= BCMA_CC_CAP_OTPS_SHIFT;
+	}
+
+	return otpsize != 0;
+}
+
+/*
+ * Verify OTP is filled and determine the byte
+ * offset where SPROM data is located.
+ *
+ * On error, returns 0; byte offset otherwise.
+ */
+static int bcma_sprom_onchip_offset(struct bcma_bus *bus)
+{
+	struct bcma_device *cc = bus->drv_cc.core;
+	u32 offset;
+
+	/* verify OTP status */
+	if ((bcma_read32(cc, BCMA_CC_OTPS) & BCMA_CC_OTPS_GU_PROG_HW) == 0)
+		return 0;
+
+	/* obtain bit offset from otplayout register */
+	offset = (bcma_read32(cc, BCMA_CC_OTPL) & BCMA_CC_OTPL_GURGN_OFFSET);
+	return BCMA_CC_SPROM + (offset >> 3);
+}
+
+int bcma_sprom_get(struct bcma_bus *bus)
+{
+	u16 offset = BCMA_CC_SPROM;
+	u16 *sprom;
+	size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
+				 SSB_SPROMSIZE_WORDS_R10,
+				 SSB_SPROMSIZE_WORDS_R11, };
+	int i, err = 0;
+
+	if (!bus->drv_cc.core)
+		return -EOPNOTSUPP;
+
+	if (!bcma_sprom_ext_available(bus)) {
+		bool sprom_onchip;
+
+		/*
+		 * External SPROM takes precedence so check
+		 * on-chip OTP only when no external SPROM
+		 * is present.
+		 */
+		sprom_onchip = bcma_sprom_onchip_available(bus);
+		if (sprom_onchip) {
+			/* determine offset */
+			offset = bcma_sprom_onchip_offset(bus);
+		}
+		if (!offset || !sprom_onchip) {
+			/*
+			 * Maybe there is no SPROM on the device?
+			 * Now we ask the arch code if there is some sprom
+			 * available for this device in some other storage.
+			 */
+			err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
+			return err;
+		}
+	}
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
+		bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
+
+	bcma_debug(bus, "SPROM offset 0x%x\n", offset);
+	for (i = 0; i < ARRAY_SIZE(sprom_sizes); i++) {
+		size_t words = sprom_sizes[i];
+
+		sprom = kcalloc(words, sizeof(u16), GFP_KERNEL);
+		if (!sprom)
+			return -ENOMEM;
+
+		bcma_sprom_read(bus, offset, sprom, words);
+		err = bcma_sprom_valid(bus, sprom, words);
+		if (!err)
+			break;
+
+		kfree(sprom);
+	}
+
+	if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4331 ||
+	    bus->chipinfo.id == BCMA_CHIP_ID_BCM43431)
+		bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
+
+	if (err) {
+		bcma_warn(bus, "Invalid SPROM read from the PCIe card, trying to use fallback SPROM\n");
+		err = bcma_fill_sprom_with_fallback(bus, &bus->sprom);
+	} else {
+		bcma_sprom_extract_r8(bus, sprom);
+		kfree(sprom);
+	}
+
+	return err;
+}