v4.19.13 snapshot.
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
new file mode 100644
index 0000000..d9c8ecf
--- /dev/null
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -0,0 +1,58 @@
+menuconfig ARCH_SUNXI
+	bool "Allwinner SoCs"
+	depends on ARCH_MULTI_V7
+	select ARCH_HAS_RESET_CONTROLLER
+	select CLKSRC_MMIO
+	select GENERIC_IRQ_CHIP
+	select GPIOLIB
+	select PINCTRL
+	select PM_OPP
+	select SUN4I_TIMER
+	select RESET_CONTROLLER
+
+if ARCH_SUNXI
+
+config MACH_SUN4I
+	bool "Allwinner A10 (sun4i) SoCs support"
+	default ARCH_SUNXI
+
+config MACH_SUN5I
+	bool "Allwinner A10s / A13 (sun5i) SoCs support"
+	default ARCH_SUNXI
+	select SUN5I_HSTIMER
+
+config MACH_SUN6I
+	bool "Allwinner A31 (sun6i) SoCs support"
+	default ARCH_SUNXI
+	select ARM_GIC
+	select MFD_SUN6I_PRCM
+	select SUN5I_HSTIMER
+
+config MACH_SUN7I
+	bool "Allwinner A20 (sun7i) SoCs support"
+	default ARCH_SUNXI
+	select ARM_GIC
+	select ARM_PSCI
+	select ARCH_SUPPORTS_BIG_ENDIAN
+	select HAVE_ARM_ARCH_TIMER
+	select SUN5I_HSTIMER
+
+config MACH_SUN8I
+	bool "Allwinner sun8i Family SoCs support"
+	default ARCH_SUNXI
+	select ARM_GIC
+	select MFD_SUN6I_PRCM
+
+config MACH_SUN9I
+	bool "Allwinner (sun9i) SoCs support"
+	default ARCH_SUNXI
+	select ARM_GIC
+
+config ARCH_SUNXI_MC_SMP
+	bool
+	depends on SMP
+	default MACH_SUN9I || MACH_SUN8I
+	select ARM_CCI400_PORT_CTRL
+	select ARM_CPU_SUSPEND
+
+endif
diff --git a/arch/arm/mach-sunxi/Makefile b/arch/arm/mach-sunxi/Makefile
new file mode 100644
index 0000000..71429aa
--- /dev/null
+++ b/arch/arm/mach-sunxi/Makefile
@@ -0,0 +1,5 @@
+CFLAGS_mc_smp.o	+= -march=armv7-a
+
+obj-$(CONFIG_ARCH_SUNXI) += sunxi.o
+obj-$(CONFIG_ARCH_SUNXI_MC_SMP) += mc_smp.o headsmp.o
+obj-$(CONFIG_SMP) += platsmp.o
diff --git a/arch/arm/mach-sunxi/headsmp.S b/arch/arm/mach-sunxi/headsmp.S
new file mode 100644
index 0000000..32d76be
--- /dev/null
+++ b/arch/arm/mach-sunxi/headsmp.S
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2018 Chen-Yu Tsai
+ * Copyright (c) 2018 Bootlin
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ * Mylène Josserand <mylene.josserand@bootlin.com>
+ *
+ * SMP support for sunxi based systems with Cortex A7/A15
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/cputype.h>
+
+ENTRY(sunxi_mc_smp_cluster_cache_enable)
+	.arch	armv7-a
+	/*
+	 * Enable cluster-level coherency, in preparation for turning on the MMU.
+	 *
+	 * Also enable regional clock gating and L2 data latency settings for
+	 * Cortex-A15. These settings are from the vendor kernel.
+	 */
+	mrc	p15, 0, r1, c0, c0, 0
+	movw	r2, #(ARM_CPU_PART_MASK & 0xffff)
+	movt	r2, #(ARM_CPU_PART_MASK >> 16)
+	and	r1, r1, r2
+	movw	r2, #(ARM_CPU_PART_CORTEX_A15 & 0xffff)
+	movt	r2, #(ARM_CPU_PART_CORTEX_A15 >> 16)
+	cmp	r1, r2
+	bne	not_a15
+
+	/* The following is Cortex-A15 specific */
+
+	/* ACTLR2: Enable CPU regional clock gates */
+	mrc p15, 1, r1, c15, c0, 4
+	orr r1, r1, #(0x1 << 31)
+	mcr p15, 1, r1, c15, c0, 4
+
+	/* L2ACTLR */
+	mrc p15, 1, r1, c15, c0, 0
+	/* Enable L2, GIC, and Timer regional clock gates */
+	orr r1, r1, #(0x1 << 26)
+	/* Disable clean/evict from being pushed to external */
+	orr r1, r1, #(0x1<<3)
+	mcr p15, 1, r1, c15, c0, 0
+
+	/* L2CTRL: L2 data RAM latency */
+	mrc p15, 1, r1, c9, c0, 2
+	bic r1, r1, #(0x7 << 0)
+	orr r1, r1, #(0x3 << 0)
+	mcr p15, 1, r1, c9, c0, 2
+
+	/* End of Cortex-A15 specific setup */
+	not_a15:
+
+	/* Get value of sunxi_mc_smp_first_comer */
+	adr	r1, first
+	ldr	r0, [r1]
+	ldr	r0, [r1, r0]
+
+	/* Skip cci_enable_port_for_self if not first comer */
+	cmp	r0, #0
+	bxeq	lr
+	b	cci_enable_port_for_self
+
+	.align 2
+	first: .word sunxi_mc_smp_first_comer - .
+ENDPROC(sunxi_mc_smp_cluster_cache_enable)
+
+ENTRY(sunxi_mc_smp_secondary_startup)
+	bl	sunxi_mc_smp_cluster_cache_enable
+	bl	secure_cntvoff_init
+	b	secondary_startup
+ENDPROC(sunxi_mc_smp_secondary_startup)
+
+ENTRY(sunxi_mc_smp_resume)
+	bl	sunxi_mc_smp_cluster_cache_enable
+	b	cpu_resume
+ENDPROC(sunxi_mc_smp_resume)
diff --git a/arch/arm/mach-sunxi/mc_smp.c b/arch/arm/mach-sunxi/mc_smp.c
new file mode 100644
index 0000000..b4037b6
--- /dev/null
+++ b/arch/arm/mach-sunxi/mc_smp.c
@@ -0,0 +1,907 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * arch/arm/mach-sunxi/mc_smp.c
+ *
+ * Based on Allwinner code, arch/arm/mach-exynos/mcpm-exynos.c, and
+ * arch/arm/mach-hisi/platmcpm.c
+ * Cluster cache enable trampoline code adapted from MCPM framework
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/cpu_pm.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/idmap.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+
+#define SUNXI_CPUS_PER_CLUSTER		4
+#define SUNXI_NR_CLUSTERS		2
+
+#define POLL_USEC	100
+#define TIMEOUT_USEC	100000
+
+#define CPUCFG_CX_CTRL_REG0(c)		(0x10 * (c))
+#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(n)	BIT(n)
+#define CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL	0xf
+#define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7	BIT(4)
+#define CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15	BIT(0)
+#define CPUCFG_CX_CTRL_REG1(c)		(0x10 * (c) + 0x4)
+#define CPUCFG_CX_CTRL_REG1_ACINACTM	BIT(0)
+#define CPUCFG_CX_STATUS(c)		(0x30 + 0x4 * (c))
+#define CPUCFG_CX_STATUS_STANDBYWFI(n)	BIT(16 + (n))
+#define CPUCFG_CX_STATUS_STANDBYWFIL2	BIT(0)
+#define CPUCFG_CX_RST_CTRL(c)		(0x80 + 0x4 * (c))
+#define CPUCFG_CX_RST_CTRL_DBG_SOC_RST	BIT(24)
+#define CPUCFG_CX_RST_CTRL_ETM_RST(n)	BIT(20 + (n))
+#define CPUCFG_CX_RST_CTRL_ETM_RST_ALL	(0xf << 20)
+#define CPUCFG_CX_RST_CTRL_DBG_RST(n)	BIT(16 + (n))
+#define CPUCFG_CX_RST_CTRL_DBG_RST_ALL	(0xf << 16)
+#define CPUCFG_CX_RST_CTRL_H_RST	BIT(12)
+#define CPUCFG_CX_RST_CTRL_L2_RST	BIT(8)
+#define CPUCFG_CX_RST_CTRL_CX_RST(n)	BIT(4 + (n))
+#define CPUCFG_CX_RST_CTRL_CORE_RST(n)	BIT(n)
+#define CPUCFG_CX_RST_CTRL_CORE_RST_ALL	(0xf << 0)
+
+#define PRCM_CPU_PO_RST_CTRL(c)		(0x4 + 0x4 * (c))
+#define PRCM_CPU_PO_RST_CTRL_CORE(n)	BIT(n)
+#define PRCM_CPU_PO_RST_CTRL_CORE_ALL	0xf
+#define PRCM_PWROFF_GATING_REG(c)	(0x100 + 0x4 * (c))
+/* The power off register for clusters are different from a80 and a83t */
+#define PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I	BIT(0)
+#define PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I	BIT(4)
+#define PRCM_PWROFF_GATING_REG_CORE(n)	BIT(n)
+#define PRCM_PWR_SWITCH_REG(c, cpu)	(0x140 + 0x10 * (c) + 0x4 * (cpu))
+#define PRCM_CPU_SOFT_ENTRY_REG		0x164
+
+/* R_CPUCFG registers, specific to sun8i-a83t */
+#define R_CPUCFG_CLUSTER_PO_RST_CTRL(c)	(0x30 + (c) * 0x4)
+#define R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(n)	BIT(n)
+#define R_CPUCFG_CPU_SOFT_ENTRY_REG		0x01a4
+
+#define CPU0_SUPPORT_HOTPLUG_MAGIC0	0xFA50392F
+#define CPU0_SUPPORT_HOTPLUG_MAGIC1	0x790DCA3A
+
+static void __iomem *cpucfg_base;
+static void __iomem *prcm_base;
+static void __iomem *sram_b_smp_base;
+static void __iomem *r_cpucfg_base;
+
+extern void sunxi_mc_smp_secondary_startup(void);
+extern void sunxi_mc_smp_resume(void);
+static bool is_a83t;
+
+static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster)
+{
+	struct device_node *node;
+	int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core;
+
+	node = of_cpu_device_node_get(cpu);
+
+	/* In case of_cpu_device_node_get fails */
+	if (!node)
+		node = of_get_cpu_node(cpu, NULL);
+
+	if (!node) {
+		/*
+		 * There's no point in returning an error, since we
+		 * would be mid way in a core or cluster power sequence.
+		 */
+		pr_err("%s: Couldn't get CPU cluster %u core %u device node\n",
+		       __func__, cluster, core);
+
+		return false;
+	}
+
+	return of_device_is_compatible(node, "arm,cortex-a15");
+}
+
+static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster,
+				      bool enable)
+{
+	u32 reg;
+
+	/* control sequence from Allwinner A80 user manual v1.2 PRCM section */
+	reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+	if (enable) {
+		if (reg == 0x00) {
+			pr_debug("power clamp for cluster %u cpu %u already open\n",
+				 cluster, cpu);
+			return 0;
+		}
+
+		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+		writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+		writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+		writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+		writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+	} else {
+		writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu));
+		udelay(10);
+	}
+
+	return 0;
+}
+
+static void sunxi_cpu0_hotplug_support_set(bool enable)
+{
+	if (enable) {
+		writel(CPU0_SUPPORT_HOTPLUG_MAGIC0, sram_b_smp_base);
+		writel(CPU0_SUPPORT_HOTPLUG_MAGIC1, sram_b_smp_base + 0x4);
+	} else {
+		writel(0x0, sram_b_smp_base);
+		writel(0x0, sram_b_smp_base + 0x4);
+	}
+}
+
+static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
+{
+	u32 reg;
+
+	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
+	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
+		return -EINVAL;
+
+	/* Set hotplug support magic flags for cpu0 */
+	if (cluster == 0 && cpu == 0)
+		sunxi_cpu0_hotplug_support_set(true);
+
+	/* assert processor power-on reset */
+	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+	reg &= ~PRCM_CPU_PO_RST_CTRL_CORE(cpu);
+	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+
+	if (is_a83t) {
+		/* assert cpu power-on reset */
+		reg  = readl(r_cpucfg_base +
+			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		reg &= ~(R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu));
+		writel(reg, r_cpucfg_base +
+		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		udelay(10);
+	}
+
+	/* Cortex-A7: hold L1 reset disable signal low */
+	if (!sunxi_core_is_cortex_a15(cpu, cluster)) {
+		reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
+		reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE(cpu);
+		writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
+	}
+
+	/* assert processor related resets */
+	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+	reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
+
+	/*
+	 * Allwinner code also asserts resets for NEON on A15. According
+	 * to ARM manuals, asserting power-on reset is sufficient.
+	 */
+	if (!sunxi_core_is_cortex_a15(cpu, cluster))
+		reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
+
+	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+
+	/* open power switch */
+	sunxi_cpu_power_switch_set(cpu, cluster, true);
+
+	/* Handle A83T bit swap */
+	if (is_a83t) {
+		if (cpu == 0)
+			cpu = 4;
+	}
+
+	/* clear processor power gate */
+	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	reg &= ~PRCM_PWROFF_GATING_REG_CORE(cpu);
+	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	udelay(20);
+
+	/* Handle A83T bit swap */
+	if (is_a83t) {
+		if (cpu == 4)
+			cpu = 0;
+	}
+
+	/* de-assert processor power-on reset */
+	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+	reg |= PRCM_CPU_PO_RST_CTRL_CORE(cpu);
+	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+
+	if (is_a83t) {
+		reg  = readl(r_cpucfg_base +
+			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		reg |= R_CPUCFG_CLUSTER_PO_RST_CTRL_CORE(cpu);
+		writel(reg, r_cpucfg_base +
+		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		udelay(10);
+	}
+
+	/* de-assert all processor resets */
+	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+	reg |= CPUCFG_CX_RST_CTRL_DBG_RST(cpu);
+	reg |= CPUCFG_CX_RST_CTRL_CORE_RST(cpu);
+	if (!sunxi_core_is_cortex_a15(cpu, cluster))
+		reg |= CPUCFG_CX_RST_CTRL_ETM_RST(cpu);
+	else
+		reg |= CPUCFG_CX_RST_CTRL_CX_RST(cpu); /* NEON */
+	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+
+	return 0;
+}
+
+static int sunxi_cluster_powerup(unsigned int cluster)
+{
+	u32 reg;
+
+	pr_debug("%s: cluster %u\n", __func__, cluster);
+	if (cluster >= SUNXI_NR_CLUSTERS)
+		return -EINVAL;
+
+	/* For A83T, assert cluster cores resets */
+	if (is_a83t) {
+		reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+		reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;   /* Core Reset    */
+		writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+		udelay(10);
+	}
+
+	/* assert ACINACTM */
+	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+	reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
+	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+
+	/* assert cluster processor power-on resets */
+	reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+	reg &= ~PRCM_CPU_PO_RST_CTRL_CORE_ALL;
+	writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster));
+
+	/* assert cluster cores resets */
+	if (is_a83t) {
+		reg  = readl(r_cpucfg_base +
+			     R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		reg &= ~CPUCFG_CX_RST_CTRL_CORE_RST_ALL;
+		writel(reg, r_cpucfg_base +
+		       R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster));
+		udelay(10);
+	}
+
+	/* assert cluster resets */
+	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+	reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
+	reg &= ~CPUCFG_CX_RST_CTRL_DBG_RST_ALL;
+	reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
+	reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
+
+	/*
+	 * Allwinner code also asserts resets for NEON on A15. According
+	 * to ARM manuals, asserting power-on reset is sufficient.
+	 */
+	if (!sunxi_core_is_cortex_a15(0, cluster))
+		reg &= ~CPUCFG_CX_RST_CTRL_ETM_RST_ALL;
+
+	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+
+	/* hold L1/L2 reset disable signals low */
+	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
+	if (sunxi_core_is_cortex_a15(0, cluster)) {
+		/* Cortex-A15: hold L2RSTDISABLE low */
+		reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A15;
+	} else {
+		/* Cortex-A7: hold L1RSTDISABLE and L2RSTDISABLE low */
+		reg &= ~CPUCFG_CX_CTRL_REG0_L1_RST_DISABLE_ALL;
+		reg &= ~CPUCFG_CX_CTRL_REG0_L2_RST_DISABLE_A7;
+	}
+	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster));
+
+	/* clear cluster power gate */
+	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	if (is_a83t)
+		reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
+	else
+		reg &= ~PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
+	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	udelay(20);
+
+	/* de-assert cluster resets */
+	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+	reg |= CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
+	reg |= CPUCFG_CX_RST_CTRL_H_RST;
+	reg |= CPUCFG_CX_RST_CTRL_L2_RST;
+	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+
+	/* de-assert ACINACTM */
+	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+	reg &= ~CPUCFG_CX_CTRL_REG1_ACINACTM;
+	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+
+	return 0;
+}
+
+/*
+ * This bit is shared between the initial nocache_trampoline call to
+ * enable CCI-400 and proper cluster cache disable before power down.
+ */
+static void sunxi_cluster_cache_disable_without_axi(void)
+{
+	if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A15) {
+		/*
+		 * On the Cortex-A15 we need to disable
+		 * L2 prefetching before flushing the cache.
+		 */
+		asm volatile(
+		"mcr	p15, 1, %0, c15, c0, 3\n"
+		"isb\n"
+		"dsb"
+		: : "r" (0x400));
+	}
+
+	/* Flush all cache levels for this cluster. */
+	v7_exit_coherency_flush(all);
+
+	/*
+	 * Disable cluster-level coherency by masking
+	 * incoming snoops and DVM messages:
+	 */
+	cci_disable_port_by_cpu(read_cpuid_mpidr());
+}
+
+static int sunxi_mc_smp_cpu_table[SUNXI_NR_CLUSTERS][SUNXI_CPUS_PER_CLUSTER];
+int sunxi_mc_smp_first_comer;
+
+static DEFINE_SPINLOCK(boot_lock);
+
+static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster)
+{
+	int i;
+
+	for (i = 0; i < SUNXI_CPUS_PER_CLUSTER; i++)
+		if (sunxi_mc_smp_cpu_table[cluster][i])
+			return false;
+	return true;
+}
+
+static void sunxi_mc_smp_secondary_init(unsigned int cpu)
+{
+	/* Clear hotplug support magic flags for cpu0 */
+	if (cpu == 0)
+		sunxi_cpu0_hotplug_support_set(false);
+}
+
+static int sunxi_mc_smp_boot_secondary(unsigned int l_cpu, struct task_struct *idle)
+{
+	unsigned int mpidr, cpu, cluster;
+
+	mpidr = cpu_logical_map(l_cpu);
+	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+	if (!cpucfg_base)
+		return -ENODEV;
+	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER)
+		return -EINVAL;
+
+	spin_lock_irq(&boot_lock);
+
+	if (sunxi_mc_smp_cpu_table[cluster][cpu])
+		goto out;
+
+	if (sunxi_mc_smp_cluster_is_down(cluster)) {
+		sunxi_mc_smp_first_comer = true;
+		sunxi_cluster_powerup(cluster);
+	} else {
+		sunxi_mc_smp_first_comer = false;
+	}
+
+	/* This is read by incoming CPUs with their cache and MMU disabled */
+	sync_cache_w(&sunxi_mc_smp_first_comer);
+	sunxi_cpu_powerup(cpu, cluster);
+
+out:
+	sunxi_mc_smp_cpu_table[cluster][cpu]++;
+	spin_unlock_irq(&boot_lock);
+
+	return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void sunxi_cluster_cache_disable(void)
+{
+	unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
+	u32 reg;
+
+	pr_debug("%s: cluster %u\n", __func__, cluster);
+
+	sunxi_cluster_cache_disable_without_axi();
+
+	/* last man standing, assert ACINACTM */
+	reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+	reg |= CPUCFG_CX_CTRL_REG1_ACINACTM;
+	writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster));
+}
+
+static void sunxi_mc_smp_cpu_die(unsigned int l_cpu)
+{
+	unsigned int mpidr, cpu, cluster;
+	bool last_man;
+
+	mpidr = cpu_logical_map(l_cpu);
+	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
+
+	spin_lock(&boot_lock);
+	sunxi_mc_smp_cpu_table[cluster][cpu]--;
+	if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) {
+		/* A power_up request went ahead of us. */
+		pr_debug("%s: aborting due to a power up request\n",
+			 __func__);
+		spin_unlock(&boot_lock);
+		return;
+	} else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) {
+		pr_err("Cluster %d CPU%d boots multiple times\n",
+		       cluster, cpu);
+		BUG();
+	}
+
+	last_man = sunxi_mc_smp_cluster_is_down(cluster);
+	spin_unlock(&boot_lock);
+
+	gic_cpu_if_down(0);
+	if (last_man)
+		sunxi_cluster_cache_disable();
+	else
+		v7_exit_coherency_flush(louis);
+
+	for (;;)
+		wfi();
+}
+
+static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster)
+{
+	u32 reg;
+
+	pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
+	if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS)
+		return -EINVAL;
+
+	/* gate processor power */
+	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	reg |= PRCM_PWROFF_GATING_REG_CORE(cpu);
+	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	udelay(20);
+
+	/* close power switch */
+	sunxi_cpu_power_switch_set(cpu, cluster, false);
+
+	return 0;
+}
+
+static int sunxi_cluster_powerdown(unsigned int cluster)
+{
+	u32 reg;
+
+	pr_debug("%s: cluster %u\n", __func__, cluster);
+	if (cluster >= SUNXI_NR_CLUSTERS)
+		return -EINVAL;
+
+	/* assert cluster resets or system will hang */
+	pr_debug("%s: assert cluster reset\n", __func__);
+	reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+	reg &= ~CPUCFG_CX_RST_CTRL_DBG_SOC_RST;
+	reg &= ~CPUCFG_CX_RST_CTRL_H_RST;
+	reg &= ~CPUCFG_CX_RST_CTRL_L2_RST;
+	writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster));
+
+	/* gate cluster power */
+	pr_debug("%s: gate cluster power\n", __func__);
+	reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	if (is_a83t)
+		reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN8I;
+	else
+		reg |= PRCM_PWROFF_GATING_REG_CLUSTER_SUN9I;
+	writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster));
+	udelay(20);
+
+	return 0;
+}
+
+static int sunxi_mc_smp_cpu_kill(unsigned int l_cpu)
+{
+	unsigned int mpidr, cpu, cluster;
+	unsigned int tries, count;
+	int ret = 0;
+	u32 reg;
+
+	mpidr = cpu_logical_map(l_cpu);
+	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+	/* This should never happen */
+	if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS ||
+		    cpu >= SUNXI_CPUS_PER_CLUSTER))
+		return 0;
+
+	/* wait for CPU core to die and enter WFI */
+	count = TIMEOUT_USEC / POLL_USEC;
+	spin_lock_irq(&boot_lock);
+	for (tries = 0; tries < count; tries++) {
+		spin_unlock_irq(&boot_lock);
+		usleep_range(POLL_USEC / 2, POLL_USEC);
+		spin_lock_irq(&boot_lock);
+
+		/*
+		 * If the user turns off a bunch of cores at the same
+		 * time, the kernel might call cpu_kill before some of
+		 * them are ready. This is because boot_lock serializes
+		 * both cpu_die and cpu_kill callbacks. Either one could
+		 * run first. We should wait for cpu_die to complete.
+		 */
+		if (sunxi_mc_smp_cpu_table[cluster][cpu])
+			continue;
+
+		reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster));
+		if (reg & CPUCFG_CX_STATUS_STANDBYWFI(cpu))
+			break;
+	}
+
+	if (tries >= count) {
+		ret = ETIMEDOUT;
+		goto out;
+	}
+
+	/* power down CPU core */
+	sunxi_cpu_powerdown(cpu, cluster);
+
+	if (!sunxi_mc_smp_cluster_is_down(cluster))
+		goto out;
+
+	/* wait for cluster L2 WFI */
+	ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg,
+				 reg & CPUCFG_CX_STATUS_STANDBYWFIL2,
+				 POLL_USEC, TIMEOUT_USEC);
+	if (ret) {
+		/*
+		 * Ignore timeout on the cluster. Leaving the cluster on
+		 * will not affect system execution, just use a bit more
+		 * power. But returning an error here will only confuse
+		 * the user as the CPU has already been shutdown.
+		 */
+		ret = 0;
+		goto out;
+	}
+
+	/* Power down cluster */
+	sunxi_cluster_powerdown(cluster);
+
+out:
+	spin_unlock_irq(&boot_lock);
+	pr_debug("%s: cluster %u cpu %u powerdown: %d\n",
+		 __func__, cluster, cpu, ret);
+	return !ret;
+}
+
+static bool sunxi_mc_smp_cpu_can_disable(unsigned int cpu)
+{
+	/* CPU0 hotplug not handled for sun8i-a83t */
+	if (is_a83t)
+		if (cpu == 0)
+			return false;
+	return true;
+}
+#endif
+
+static const struct smp_operations sunxi_mc_smp_smp_ops __initconst = {
+	.smp_secondary_init	= sunxi_mc_smp_secondary_init,
+	.smp_boot_secondary	= sunxi_mc_smp_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+	.cpu_die		= sunxi_mc_smp_cpu_die,
+	.cpu_kill		= sunxi_mc_smp_cpu_kill,
+	.cpu_can_disable	= sunxi_mc_smp_cpu_can_disable,
+#endif
+};
+
+static bool __init sunxi_mc_smp_cpu_table_init(void)
+{
+	unsigned int mpidr, cpu, cluster;
+
+	mpidr = read_cpuid_mpidr();
+	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+
+	if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) {
+		pr_err("%s: boot CPU is out of bounds!\n", __func__);
+		return false;
+	}
+	sunxi_mc_smp_cpu_table[cluster][cpu] = 1;
+	return true;
+}
+
+/*
+ * Adapted from arch/arm/common/mc_smp_entry.c
+ *
+ * We need the trampoline code to enable CCI-400 on the first cluster
+ */
+typedef typeof(cpu_reset) phys_reset_t;
+
+static int __init nocache_trampoline(unsigned long __unused)
+{
+	phys_reset_t phys_reset;
+
+	setup_mm_for_reboot();
+	sunxi_cluster_cache_disable_without_axi();
+
+	phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset);
+	phys_reset(__pa_symbol(sunxi_mc_smp_resume), false);
+	BUG();
+}
+
+static int __init sunxi_mc_smp_loopback(void)
+{
+	int ret;
+
+	/*
+	 * We're going to soft-restart the current CPU through the
+	 * low-level MCPM code by leveraging the suspend/resume
+	 * infrastructure. Let's play it safe by using cpu_pm_enter()
+	 * in case the CPU init code path resets the VFP or similar.
+	 */
+	sunxi_mc_smp_first_comer = true;
+	local_irq_disable();
+	local_fiq_disable();
+	ret = cpu_pm_enter();
+	if (!ret) {
+		ret = cpu_suspend(0, nocache_trampoline);
+		cpu_pm_exit();
+	}
+	local_fiq_enable();
+	local_irq_enable();
+	sunxi_mc_smp_first_comer = false;
+
+	return ret;
+}
+
+/*
+ * This holds any device nodes that we requested resources for,
+ * so that we may easily release resources in the error path.
+ */
+struct sunxi_mc_smp_nodes {
+	struct device_node *prcm_node;
+	struct device_node *cpucfg_node;
+	struct device_node *sram_node;
+	struct device_node *r_cpucfg_node;
+};
+
+/* This structure holds SoC-specific bits tied to an enable-method string. */
+struct sunxi_mc_smp_data {
+	const char *enable_method;
+	int (*get_smp_nodes)(struct sunxi_mc_smp_nodes *nodes);
+	bool is_a83t;
+};
+
+static void __init sunxi_mc_smp_put_nodes(struct sunxi_mc_smp_nodes *nodes)
+{
+	of_node_put(nodes->prcm_node);
+	of_node_put(nodes->cpucfg_node);
+	of_node_put(nodes->sram_node);
+	of_node_put(nodes->r_cpucfg_node);
+	memset(nodes, 0, sizeof(*nodes));
+}
+
+static int __init sun9i_a80_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
+{
+	nodes->prcm_node = of_find_compatible_node(NULL, NULL,
+						   "allwinner,sun9i-a80-prcm");
+	if (!nodes->prcm_node) {
+		pr_err("%s: PRCM not available\n", __func__);
+		return -ENODEV;
+	}
+
+	nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
+						     "allwinner,sun9i-a80-cpucfg");
+	if (!nodes->cpucfg_node) {
+		pr_err("%s: CPUCFG not available\n", __func__);
+		return -ENODEV;
+	}
+
+	nodes->sram_node = of_find_compatible_node(NULL, NULL,
+						   "allwinner,sun9i-a80-smp-sram");
+	if (!nodes->sram_node) {
+		pr_err("%s: Secure SRAM not available\n", __func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int __init sun8i_a83t_get_smp_nodes(struct sunxi_mc_smp_nodes *nodes)
+{
+	nodes->prcm_node = of_find_compatible_node(NULL, NULL,
+						   "allwinner,sun8i-a83t-r-ccu");
+	if (!nodes->prcm_node) {
+		pr_err("%s: PRCM not available\n", __func__);
+		return -ENODEV;
+	}
+
+	nodes->cpucfg_node = of_find_compatible_node(NULL, NULL,
+						     "allwinner,sun8i-a83t-cpucfg");
+	if (!nodes->cpucfg_node) {
+		pr_err("%s: CPUCFG not available\n", __func__);
+		return -ENODEV;
+	}
+
+	nodes->r_cpucfg_node = of_find_compatible_node(NULL, NULL,
+						       "allwinner,sun8i-a83t-r-cpucfg");
+	if (!nodes->r_cpucfg_node) {
+		pr_err("%s: RCPUCFG not available\n", __func__);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static const struct sunxi_mc_smp_data sunxi_mc_smp_data[] __initconst = {
+	{
+		.enable_method	= "allwinner,sun9i-a80-smp",
+		.get_smp_nodes	= sun9i_a80_get_smp_nodes,
+	},
+	{
+		.enable_method	= "allwinner,sun8i-a83t-smp",
+		.get_smp_nodes	= sun8i_a83t_get_smp_nodes,
+		.is_a83t	= true,
+	},
+};
+
+static int __init sunxi_mc_smp_init(void)
+{
+	struct sunxi_mc_smp_nodes nodes = { 0 };
+	struct device_node *node;
+	struct resource res;
+	void __iomem *addr;
+	int i, ret;
+
+	/*
+	 * Don't bother checking the "cpus" node, as an enable-method
+	 * property in that node is undocumented.
+	 */
+	node = of_cpu_device_node_get(0);
+	if (!node)
+		return -ENODEV;
+
+	/*
+	 * We can't actually use the enable-method magic in the kernel.
+	 * Our loopback / trampoline code uses the CPU suspend framework,
+	 * which requires the identity mapping be available. It would not
+	 * yet be available if we used the .init_cpus or .prepare_cpus
+	 * callbacks in smp_operations, which we would use if we were to
+	 * use CPU_METHOD_OF_DECLARE
+	 */
+	for (i = 0; i < ARRAY_SIZE(sunxi_mc_smp_data); i++) {
+		ret = of_property_match_string(node, "enable-method",
+					       sunxi_mc_smp_data[i].enable_method);
+		if (!ret)
+			break;
+	}
+
+	is_a83t = sunxi_mc_smp_data[i].is_a83t;
+
+	of_node_put(node);
+	if (ret)
+		return -ENODEV;
+
+	if (!sunxi_mc_smp_cpu_table_init())
+		return -EINVAL;
+
+	if (!cci_probed()) {
+		pr_err("%s: CCI-400 not available\n", __func__);
+		return -ENODEV;
+	}
+
+	/* Get needed device tree nodes */
+	ret = sunxi_mc_smp_data[i].get_smp_nodes(&nodes);
+	if (ret)
+		goto err_put_nodes;
+
+	/*
+	 * Unfortunately we can not request the I/O region for the PRCM.
+	 * It is shared with the PRCM clock.
+	 */
+	prcm_base = of_iomap(nodes.prcm_node, 0);
+	if (!prcm_base) {
+		pr_err("%s: failed to map PRCM registers\n", __func__);
+		ret = -ENOMEM;
+		goto err_put_nodes;
+	}
+
+	cpucfg_base = of_io_request_and_map(nodes.cpucfg_node, 0,
+					    "sunxi-mc-smp");
+	if (IS_ERR(cpucfg_base)) {
+		ret = PTR_ERR(cpucfg_base);
+		pr_err("%s: failed to map CPUCFG registers: %d\n",
+		       __func__, ret);
+		goto err_unmap_prcm;
+	}
+
+	if (is_a83t) {
+		r_cpucfg_base = of_io_request_and_map(nodes.r_cpucfg_node,
+						      0, "sunxi-mc-smp");
+		if (IS_ERR(r_cpucfg_base)) {
+			ret = PTR_ERR(r_cpucfg_base);
+			pr_err("%s: failed to map R-CPUCFG registers\n",
+			       __func__);
+			goto err_unmap_release_cpucfg;
+		}
+	} else {
+		sram_b_smp_base = of_io_request_and_map(nodes.sram_node, 0,
+							"sunxi-mc-smp");
+		if (IS_ERR(sram_b_smp_base)) {
+			ret = PTR_ERR(sram_b_smp_base);
+			pr_err("%s: failed to map secure SRAM\n", __func__);
+			goto err_unmap_release_cpucfg;
+		}
+	}
+
+	/* Configure CCI-400 for boot cluster */
+	ret = sunxi_mc_smp_loopback();
+	if (ret) {
+		pr_err("%s: failed to configure boot cluster: %d\n",
+		       __func__, ret);
+		goto err_unmap_release_sram_rcpucfg;
+	}
+
+	/* We don't need the device nodes anymore */
+	sunxi_mc_smp_put_nodes(&nodes);
+
+	/* Set the hardware entry point address */
+	if (is_a83t)
+		addr = r_cpucfg_base + R_CPUCFG_CPU_SOFT_ENTRY_REG;
+	else
+		addr = prcm_base + PRCM_CPU_SOFT_ENTRY_REG;
+	writel(__pa_symbol(sunxi_mc_smp_secondary_startup), addr);
+
+	/* Actually enable multi cluster SMP */
+	smp_set_ops(&sunxi_mc_smp_smp_ops);
+
+	pr_info("sunxi multi cluster SMP support installed\n");
+
+	return 0;
+
+err_unmap_release_sram_rcpucfg:
+	if (is_a83t) {
+		iounmap(r_cpucfg_base);
+		of_address_to_resource(nodes.r_cpucfg_node, 0, &res);
+	} else {
+		iounmap(sram_b_smp_base);
+		of_address_to_resource(nodes.sram_node, 0, &res);
+	}
+	release_mem_region(res.start, resource_size(&res));
+err_unmap_release_cpucfg:
+	iounmap(cpucfg_base);
+	of_address_to_resource(nodes.cpucfg_node, 0, &res);
+	release_mem_region(res.start, resource_size(&res));
+err_unmap_prcm:
+	iounmap(prcm_base);
+err_put_nodes:
+	sunxi_mc_smp_put_nodes(&nodes);
+	return ret;
+}
+
+early_initcall(sunxi_mc_smp_init);
diff --git a/arch/arm/mach-sunxi/platsmp.c b/arch/arm/mach-sunxi/platsmp.c
new file mode 100644
index 0000000..8fb5088
--- /dev/null
+++ b/arch/arm/mach-sunxi/platsmp.c
@@ -0,0 +1,192 @@
+/*
+ * SMP support for Allwinner SoCs
+ *
+ * Copyright (C) 2013 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * Based on code
+ *  Copyright (C) 2012-2013 Allwinner Ltd.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memory.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/smp.h>
+
+#define CPUCFG_CPU_PWR_CLAMP_STATUS_REG(cpu)	((cpu) * 0x40 + 0x64)
+#define CPUCFG_CPU_RST_CTRL_REG(cpu)		(((cpu) + 1) * 0x40)
+#define CPUCFG_CPU_CTRL_REG(cpu)		(((cpu) + 1) * 0x40 + 0x04)
+#define CPUCFG_CPU_STATUS_REG(cpu)		(((cpu) + 1) * 0x40 + 0x08)
+#define CPUCFG_GEN_CTRL_REG			0x184
+#define CPUCFG_PRIVATE0_REG			0x1a4
+#define CPUCFG_PRIVATE1_REG			0x1a8
+#define CPUCFG_DBG_CTL0_REG			0x1e0
+#define CPUCFG_DBG_CTL1_REG			0x1e4
+
+#define PRCM_CPU_PWROFF_REG			0x100
+#define PRCM_CPU_PWR_CLAMP_REG(cpu)		(((cpu) * 4) + 0x140)
+
+static void __iomem *cpucfg_membase;
+static void __iomem *prcm_membase;
+
+static DEFINE_SPINLOCK(cpu_lock);
+
+static void __init sun6i_smp_prepare_cpus(unsigned int max_cpus)
+{
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, "allwinner,sun6i-a31-prcm");
+	if (!node) {
+		pr_err("Missing A31 PRCM node in the device tree\n");
+		return;
+	}
+
+	prcm_membase = of_iomap(node, 0);
+	if (!prcm_membase) {
+		pr_err("Couldn't map A31 PRCM registers\n");
+		return;
+	}
+
+	node = of_find_compatible_node(NULL, NULL,
+				       "allwinner,sun6i-a31-cpuconfig");
+	if (!node) {
+		pr_err("Missing A31 CPU config node in the device tree\n");
+		return;
+	}
+
+	cpucfg_membase = of_iomap(node, 0);
+	if (!cpucfg_membase)
+		pr_err("Couldn't map A31 CPU config registers\n");
+
+}
+
+static int sun6i_smp_boot_secondary(unsigned int cpu,
+				    struct task_struct *idle)
+{
+	u32 reg;
+	int i;
+
+	if (!(prcm_membase && cpucfg_membase))
+		return -EFAULT;
+
+	spin_lock(&cpu_lock);
+
+	/* Set CPU boot address */
+	writel(__pa_symbol(secondary_startup),
+	       cpucfg_membase + CPUCFG_PRIVATE0_REG);
+
+	/* Assert the CPU core in reset */
+	writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
+
+	/* Assert the L1 cache in reset */
+	reg = readl(cpucfg_membase + CPUCFG_GEN_CTRL_REG);
+	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
+
+	/* Disable external debug access */
+	reg = readl(cpucfg_membase + CPUCFG_DBG_CTL1_REG);
+	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
+
+	/* Power up the CPU */
+	for (i = 0; i <= 8; i++)
+		writel(0xff >> i, prcm_membase + PRCM_CPU_PWR_CLAMP_REG(cpu));
+	mdelay(10);
+
+	/* Clear CPU power-off gating */
+	reg = readl(prcm_membase + PRCM_CPU_PWROFF_REG);
+	writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
+	mdelay(1);
+
+	/* Deassert the CPU core reset */
+	writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
+
+	/* Enable back the external debug accesses */
+	reg = readl(cpucfg_membase + CPUCFG_DBG_CTL1_REG);
+	writel(reg | BIT(cpu), cpucfg_membase + CPUCFG_DBG_CTL1_REG);
+
+	spin_unlock(&cpu_lock);
+
+	return 0;
+}
+
+static const struct smp_operations sun6i_smp_ops __initconst = {
+	.smp_prepare_cpus	= sun6i_smp_prepare_cpus,
+	.smp_boot_secondary	= sun6i_smp_boot_secondary,
+};
+CPU_METHOD_OF_DECLARE(sun6i_a31_smp, "allwinner,sun6i-a31", &sun6i_smp_ops);
+
+static void __init sun8i_smp_prepare_cpus(unsigned int max_cpus)
+{
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, "allwinner,sun8i-a23-prcm");
+	if (!node) {
+		pr_err("Missing A23 PRCM node in the device tree\n");
+		return;
+	}
+
+	prcm_membase = of_iomap(node, 0);
+	if (!prcm_membase) {
+		pr_err("Couldn't map A23 PRCM registers\n");
+		return;
+	}
+
+	node = of_find_compatible_node(NULL, NULL,
+				       "allwinner,sun8i-a23-cpuconfig");
+	if (!node) {
+		pr_err("Missing A23 CPU config node in the device tree\n");
+		return;
+	}
+
+	cpucfg_membase = of_iomap(node, 0);
+	if (!cpucfg_membase)
+		pr_err("Couldn't map A23 CPU config registers\n");
+
+}
+
+static int sun8i_smp_boot_secondary(unsigned int cpu,
+				    struct task_struct *idle)
+{
+	u32 reg;
+
+	if (!(prcm_membase && cpucfg_membase))
+		return -EFAULT;
+
+	spin_lock(&cpu_lock);
+
+	/* Set CPU boot address */
+	writel(__pa_symbol(secondary_startup),
+	       cpucfg_membase + CPUCFG_PRIVATE0_REG);
+
+	/* Assert the CPU core in reset */
+	writel(0, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
+
+	/* Assert the L1 cache in reset */
+	reg = readl(cpucfg_membase + CPUCFG_GEN_CTRL_REG);
+	writel(reg & ~BIT(cpu), cpucfg_membase + CPUCFG_GEN_CTRL_REG);
+
+	/* Clear CPU power-off gating */
+	reg = readl(prcm_membase + PRCM_CPU_PWROFF_REG);
+	writel(reg & ~BIT(cpu), prcm_membase + PRCM_CPU_PWROFF_REG);
+	mdelay(1);
+
+	/* Deassert the CPU core reset */
+	writel(3, cpucfg_membase + CPUCFG_CPU_RST_CTRL_REG(cpu));
+
+	spin_unlock(&cpu_lock);
+
+	return 0;
+}
+
+static const struct smp_operations sun8i_smp_ops __initconst = {
+	.smp_prepare_cpus	= sun8i_smp_prepare_cpus,
+	.smp_boot_secondary	= sun8i_smp_boot_secondary,
+};
+CPU_METHOD_OF_DECLARE(sun8i_a23_smp, "allwinner,sun8i-a23", &sun8i_smp_ops);
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
new file mode 100644
index 0000000..de4b0e9
--- /dev/null
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -0,0 +1,103 @@
+/*
+ * Device Tree support for Allwinner A1X SoCs
+ *
+ * Copyright (C) 2012 Maxime Ripard
+ *
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach/arch.h>
+#include <asm/secure_cntvoff.h>
+
+static const char * const sunxi_board_dt_compat[] = {
+	"allwinner,sun4i-a10",
+	"allwinner,sun5i-a10s",
+	"allwinner,sun5i-a13",
+	"allwinner,sun5i-r8",
+	"nextthing,gr8",
+	NULL,
+};
+
+DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families")
+	.dt_compat	= sunxi_board_dt_compat,
+MACHINE_END
+
+static const char * const sun6i_board_dt_compat[] = {
+	"allwinner,sun6i-a31",
+	"allwinner,sun6i-a31s",
+	NULL,
+};
+
+extern void __init sun6i_reset_init(void);
+static void __init sun6i_timer_init(void)
+{
+	of_clk_init(NULL);
+	if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
+		sun6i_reset_init();
+	timer_probe();
+}
+
+DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family")
+	.init_time	= sun6i_timer_init,
+	.dt_compat	= sun6i_board_dt_compat,
+MACHINE_END
+
+static const char * const sun7i_board_dt_compat[] = {
+	"allwinner,sun7i-a20",
+	NULL,
+};
+
+DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
+	.dt_compat	= sun7i_board_dt_compat,
+MACHINE_END
+
+static const char * const sun8i_board_dt_compat[] = {
+	"allwinner,sun8i-a23",
+	"allwinner,sun8i-a33",
+	"allwinner,sun8i-h2-plus",
+	"allwinner,sun8i-h3",
+	"allwinner,sun8i-r40",
+	"allwinner,sun8i-v3s",
+	NULL,
+};
+
+DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family")
+	.init_time	= sun6i_timer_init,
+	.dt_compat	= sun8i_board_dt_compat,
+MACHINE_END
+
+static void __init sun8i_a83t_cntvoff_init(void)
+{
+#ifdef CONFIG_SMP
+	secure_cntvoff_init();
+#endif
+}
+
+static const char * const sun8i_a83t_cntvoff_board_dt_compat[] = {
+	"allwinner,sun8i-a83t",
+	NULL,
+};
+
+DT_MACHINE_START(SUN8I_A83T_CNTVOFF_DT, "Allwinner A83t board")
+	.init_early	= sun8i_a83t_cntvoff_init,
+	.init_time	= sun6i_timer_init,
+	.dt_compat	= sun8i_a83t_cntvoff_board_dt_compat,
+MACHINE_END
+
+static const char * const sun9i_board_dt_compat[] = {
+	"allwinner,sun9i-a80",
+	NULL,
+};
+
+DT_MACHINE_START(SUN9I_DT, "Allwinner sun9i Family")
+	.dt_compat	= sun9i_board_dt_compat,
+MACHINE_END