v4.19.13 snapshot.
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
new file mode 100644
index 0000000..89f6e3f
--- /dev/null
+++ b/arch/mips/netlogic/common/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y				+= irq.o time.o
+obj-y				+= reset.o
+obj-$(CONFIG_SMP)		+= smp.o smpboot.o
+obj-$(CONFIG_EARLY_PRINTK)	+= earlycons.o
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
new file mode 100644
index 0000000..8f5bc15
--- /dev/null
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+
+#include <asm/mipsregs.h>
+#include <asm/setup.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/uart.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#endif
+
+void prom_putchar(char c)
+{
+	uint64_t uartbase;
+
+#if defined(CONFIG_CPU_XLP)
+	uartbase = nlm_get_uart_regbase(0, 0);
+#elif defined(CONFIG_CPU_XLR)
+	uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
+#endif
+	while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
+		;
+	nlm_write_reg(uartbase, UART_TX, c);
+}
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
new file mode 100644
index 0000000..f4961bc
--- /dev/null
+++ b/arch/mips/netlogic/common/irq.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/ptrace.h>
+#include <asm/mipsregs.h>
+#include <asm/thread_info.h>
+
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
+#else
+#error "Unknown CPU"
+#endif
+
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK	((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+				 (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK	0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+				(1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+	void	(*extra_ack)(struct irq_data *);
+	struct	nlm_soc_info *node;
+	int	picirq;
+	int	irt;
+	int	flags;
+};
+
+static void xlp_pic_enable(struct irq_data *d)
+{
+	unsigned long flags;
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+	BUG_ON(!pd);
+	spin_lock_irqsave(&pd->node->piclock, flags);
+	nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+	spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_disable(struct irq_data *d)
+{
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+	unsigned long flags;
+
+	BUG_ON(!pd);
+	spin_lock_irqsave(&pd->node->piclock, flags);
+	nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+	spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_mask_ack(struct irq_data *d)
+{
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+	clear_c0_eimr(pd->picirq);
+	ack_c0_eirr(pd->picirq);
+}
+
+static void xlp_pic_unmask(struct irq_data *d)
+{
+	struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+	BUG_ON(!pd);
+
+	if (pd->extra_ack)
+		pd->extra_ack(d);
+
+	/* re-enable the intr on this cpu */
+	set_c0_eimr(pd->picirq);
+
+	/* Ack is a single write, no need to lock */
+	nlm_pic_ack(pd->node->picbase, pd->irt);
+}
+
+static struct irq_chip xlp_pic = {
+	.name		= "XLP-PIC",
+	.irq_enable	= xlp_pic_enable,
+	.irq_disable	= xlp_pic_disable,
+	.irq_mask_ack	= xlp_pic_mask_ack,
+	.irq_unmask	= xlp_pic_unmask,
+};
+
+static void cpuintr_disable(struct irq_data *d)
+{
+	clear_c0_eimr(d->irq);
+}
+
+static void cpuintr_enable(struct irq_data *d)
+{
+	set_c0_eimr(d->irq);
+}
+
+static void cpuintr_ack(struct irq_data *d)
+{
+	ack_c0_eirr(d->irq);
+}
+
+/*
+ * Chip definition for CPU originated interrupts(timer, msg) and
+ * IPIs
+ */
+struct irq_chip nlm_cpu_intr = {
+	.name		= "XLP-CPU-INTR",
+	.irq_enable	= cpuintr_enable,
+	.irq_disable	= cpuintr_disable,
+	.irq_mask	= cpuintr_disable,
+	.irq_ack	= cpuintr_ack,
+	.irq_eoi	= cpuintr_enable,
+};
+
+static void __init nlm_init_percpu_irqs(void)
+{
+	int i;
+
+	for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
+		irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
+#ifdef CONFIG_SMP
+	irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
+			 nlm_smp_function_ipi_handler);
+	irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
+			 nlm_smp_resched_ipi_handler);
+#endif
+}
+
+
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+	struct nlm_pic_irq *pic_data;
+	int xirq;
+
+	xirq = nlm_irq_to_xirq(node, irq);
+	pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+	BUG_ON(pic_data == NULL);
+	pic_data->irt = irt;
+	pic_data->picirq = picirq;
+	pic_data->node = nlm_get_node(node);
+	irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+	irq_set_chip_data(xirq, pic_data);
+}
+
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
+{
+	struct nlm_pic_irq *pic_data;
+	int xirq;
+
+	xirq = nlm_irq_to_xirq(node, irq);
+	pic_data = irq_get_chip_data(xirq);
+	if (WARN_ON(!pic_data))
+		return;
+	pic_data->extra_ack = xack;
+}
+
+static void nlm_init_node_irqs(int node)
+{
+	struct nlm_soc_info *nodep;
+	int i, irt;
+
+	pr_info("Init IRQ for node %d\n", node);
+	nodep = nlm_get_node(node);
+	nodep->irqmask = PERCPU_IRQ_MASK;
+	for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+		irt = nlm_irq_to_irt(i);
+		if (irt == -1)		/* unused irq */
+			continue;
+		nodep->irqmask |= 1ull << i;
+		if (irt == -2)		/* not a direct PIC irq */
+			continue;
+
+		nlm_pic_init_irt(nodep->picbase, irt, i,
+				node * nlm_threads_per_node(), 0);
+		nlm_setup_pic_irq(node, i, i, irt);
+	}
+}
+
+void nlm_smp_irq_init(int hwtid)
+{
+	int cpu, node;
+
+	cpu = hwtid % nlm_threads_per_node();
+	node = hwtid / nlm_threads_per_node();
+
+	if (cpu == 0 && node != 0)
+		nlm_init_node_irqs(node);
+	write_c0_eimr(nlm_get_node(node)->irqmask);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+	uint64_t eirr;
+	int i, node;
+
+	node = nlm_nodeid();
+	eirr = read_c0_eirr_and_eimr();
+	if (eirr == 0)
+		return;
+
+	i = __ffs64(eirr);
+	/* per-CPU IRQs don't need translation */
+	if (i < PIC_IRQ_BASE) {
+		do_IRQ(i);
+		return;
+	}
+
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_CPU_XLP)
+	/* PCI interrupts need a second level dispatch for MSI bits */
+	if (i >= PIC_PCIE_LINK_MSI_IRQ(0) && i <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+		nlm_dispatch_msi(node, i);
+		return;
+	}
+	if (i >= PIC_PCIE_MSIX_IRQ(0) && i <= PIC_PCIE_MSIX_IRQ(3)) {
+		nlm_dispatch_msix(node, i);
+		return;
+	}
+
+#endif
+	/* top level irq handling */
+	do_IRQ(nlm_irq_to_xirq(node, i));
+}
+
+#ifdef CONFIG_CPU_XLP
+static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init xlp_of_pic_init(struct device_node *node,
+					struct device_node *parent)
+{
+	const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+	struct irq_domain *xlp_pic_domain;
+	struct resource res;
+	int socid, ret, bus;
+
+	/* we need a hack to get the PIC's SoC chip id */
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret < 0) {
+		pr_err("PIC %s: reg property not found!\n", node->name);
+		return -EINVAL;
+	}
+
+	if (cpu_is_xlp9xx()) {
+		bus = (res.start >> 20) & 0xf;
+		for (socid = 0; socid < NLM_NR_NODES; socid++) {
+			if (!nlm_node_present(socid))
+				continue;
+			if (nlm_get_node(socid)->socbus == bus)
+				break;
+		}
+		if (socid == NLM_NR_NODES) {
+			pr_err("PIC %s: Node mapping for bus %d not found!\n",
+					node->name, bus);
+			return -EINVAL;
+		}
+	} else {
+		socid = (res.start >> 18) & 0x3;
+		if (!nlm_node_present(socid)) {
+			pr_err("PIC %s: node %d does not exist!\n",
+							node->name, socid);
+			return -EINVAL;
+		}
+	}
+
+	if (!nlm_node_present(socid)) {
+		pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
+		return -EINVAL;
+	}
+
+	xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
+		nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
+		&xlp_pic_irq_domain_ops, NULL);
+	if (xlp_pic_domain == NULL) {
+		pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
+		return -EINVAL;
+	}
+	pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
+	return 0;
+}
+
+static struct of_device_id __initdata xlp_pic_irq_ids[] = {
+	{ .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
+	{},
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+	/* Initialize the irq descriptors */
+	nlm_init_percpu_irqs();
+	nlm_init_node_irqs(0);
+	write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+	nlm_setup_fmn_irq();
+#endif
+#ifdef CONFIG_CPU_XLP
+	of_irq_init(xlp_pic_irq_ids);
+#endif
+}
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
new file mode 100644
index 0000000..c474981
--- /dev/null
+++ b/arch/mips/netlogic/common/reset.S
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpu.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+#define SYS_CPU_COHERENT_BASE	CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+			XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
+			SYS_CPU_NONCOHERENT_MODE * 4
+
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+	li	t0, LSU_DEFEATURE
+	mfcr	t1, t0
+
+	lui	t2, 0x4080	/* Enable Unaligned Access, L2HPE */
+	or	t1, t1, t2
+	mtcr	t1, t0
+
+	li	t0, ICU_DEFEATURE
+	mfcr	t1, t0
+	ori	t1, 0x1000	/* Enable Icache partitioning */
+	mtcr	t1, t0
+
+	li	t0, SCHED_DEFEATURE
+	lui	t1, 0x0100	/* Disable BRU accepting ALU ops */
+	mtcr	t1, t0
+.endm
+
+/*
+ * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
+ * register. This is needed before going to C code since the SP can
+ * in this region. Called from all HW threads.
+ */
+.macro xlp_early_mmu_init
+	mfc0	t0, CP0_PAGEMASK, 1
+	li	t1, (1 << 29)		/* ELPA bit */
+	or	t0, t1
+	mtc0	t0, CP0_PAGEMASK, 1
+.endm
+
+/*
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
+ */
+.macro	xlp_flush_l1_dcache
+	mfc0	t0, CP0_PRID
+	andi	t0, t0, PRID_IMP_MASK
+	slt	t1, t0, 0x1200
+	beqz	t1, 15f
+	nop
+
+	/* XLP8xx low level cache flush */
+	li	t0, LSU_DEBUG_DATA0
+	li	t1, LSU_DEBUG_ADDR
+	li	t2, 0		/* index */
+	li	t3, 0x1000	/* loop count */
+11:
+	sll	v0, t2, 5
+	mtcr	zero, t0
+	ori	v1, v0, 0x3	/* way0 | write_enable | write_active */
+	mtcr	v1, t1
+12:
+	mfcr	v1, t1
+	andi	v1, 0x1		/* wait for write_active == 0 */
+	bnez	v1, 12b
+	nop
+	mtcr	zero, t0
+	ori	v1, v0, 0x7	/* way1 | write_enable | write_active */
+	mtcr	v1, t1
+13:
+	mfcr	v1, t1
+	andi	v1, 0x1		/* wait for write_active == 0 */
+	bnez	v1, 13b
+	nop
+	addi	t2, 1
+	bne	t3, t2, 11b
+	nop
+	b	17f
+	nop
+
+	/* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+	li	t0, 0x80000000
+	li	t1, 0x80010000
+16:	cache	Index_Writeback_Inv_D, 0(t0)
+	addiu	t0, t0, 32
+	bne	t0, t1, 16b
+	nop
+17:
+.endm
+
+/*
+ * nlm_reset_entry will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ *
+ * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
+ *
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
+ *
+ * When a core or thread is newly woken up, it marks itself ready and
+ * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
+ * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
+ */
+	.set	noreorder
+	.set	noat
+	.set	arch=xlr	/* for mfcr/mtcr, XLR is sufficient */
+
+FEXPORT(nlm_reset_entry)
+	dmtc0	k0, $22, 6
+	dmtc0	k1, $22, 7
+	mfc0	k0, CP0_STATUS
+	li	k1, 0x80000
+	and	k1, k0, k1
+	beqz	k1, 1f		/* go to real reset entry */
+	nop
+	li	k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+	ld	k0, BOOT_NMI_HANDLER(k1)
+	jr	k0
+	nop
+
+1:	/* Entry point on core wakeup */
+	mfc0	t0, CP0_PRID		/* processor ID */
+	andi	t0, PRID_IMP_MASK
+	li	t1, 0x1500		/* XLP 9xx */
+	beq	t0, t1, 2f		/* does not need to set coherent */
+	nop
+
+	li	t1, 0x1300		/* XLP 5xx */
+	beq	t0, t1, 2f		/* does not need to set coherent */
+	nop
+
+	/* set bit in SYS coherent register for the core */
+	mfc0	t0, CP0_EBASE
+	mfc0	t1, CP0_EBASE
+	srl	t1, 5
+	andi	t1, 0x3			/* t1 <- node */
+	li	t2, 0x40000
+	mul	t3, t2, t1		/* t3 = node * 0x40000 */
+	srl	t0, t0, 2
+	and	t0, t0, 0x7		/* t0 <- core */
+	li	t1, 0x1
+	sll	t0, t1, t0
+	nor	t0, t0, zero		/* t0 <- ~(1 << core) */
+	li	t2, SYS_CPU_COHERENT_BASE
+	add	t2, t2, t3		/* t2 <- SYS offset for node */
+	lw	t1, 0(t2)
+	and	t1, t1, t0
+	sw	t1, 0(t2)
+
+	/* read back to ensure complete */
+	lw	t1, 0(t2)
+	sync
+
+2:
+	/* Configure LSU on Non-0 Cores. */
+	xlp_config_lsu
+	/* FALL THROUGH */
+
+/*
+ * Wake up sibling threads from the initial thread in a core.
+ */
+EXPORT(nlm_boot_siblings)
+	/* core L1D flush before enable threads */
+	xlp_flush_l1_dcache
+	/* save ra and sp, will be used later (only for boot cpu) */
+	dmtc0	ra, $22, 6
+	dmtc0	sp, $22, 7
+	/* Enable hw threads by writing to MAP_THREADMODE of the core */
+	li	t0, CKSEG1ADDR(RESET_DATA_PHYS)
+	lw	t1, BOOT_THREAD_MODE(t0)	/* t1 <- thread mode */
+	li	t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
+	mfcr	t2, t0
+	or	t2, t2, t1
+	mtcr	t2, t0
+
+	/*
+	 * The new hardware thread starts at the next instruction
+	 * For all the cases other than core 0 thread 0, we will
+	 * jump to the secondary wait function.
+
+	 * NOTE: All GPR contents are lost after the mtcr above!
+	 */
+	mfc0	v0, CP0_EBASE
+	andi	v0, 0x3ff		/* v0 <- node/core */
+
+	/*
+	 * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
+	 * when running 4 threads per core
+	 */
+	andi	v1, v0, 0x3             /* v1 <- thread id */
+	bnez	v1, 2f
+	nop
+
+	/* thread 0 of each core. */
+	li	t0, CKSEG1ADDR(RESET_DATA_PHYS)
+	lw	t1, BOOT_THREAD_MODE(t0)        /* t1 <- thread mode */
+	subu	t1, 0x3				/* 4-thread per core mode? */
+	bnez	t1, 2f
+	nop
+
+	li	t0, IFU_BRUB_RESERVE
+	li	t1, 0x55
+	mtcr	t1, t0
+	_ehb
+2:
+	beqz	v0, 4f		/* boot cpu (cpuid == 0)? */
+	nop
+
+	/* setup status reg */
+	move	t1, zero
+#ifdef CONFIG_64BIT
+	ori	t1, ST0_KX
+#endif
+	mtc0	t1, CP0_STATUS
+
+	xlp_early_mmu_init
+
+	/* mark CPU ready */
+	li	t3, CKSEG1ADDR(RESET_DATA_PHYS)
+	ADDIU	t1, t3, BOOT_CPU_READY
+	sll	v1, v0, 2
+	PTR_ADDU t1, v1
+	li	t2, 1
+	sw	t2, 0(t1)
+	/* Wait until NMI hits */
+3:	wait
+	b	3b
+	nop
+
+	/*
+	 * For the boot CPU, we have to restore ra and sp and return, rest
+	 * of the registers will be restored by the caller
+	 */
+4:
+	dmfc0	ra, $22, 6
+	dmfc0	sp, $22, 7
+	jr	ra
+	nop
+EXPORT(nlm_reset_entry_end)
+
+LEAF(nlm_init_boot_cpu)
+#ifdef CONFIG_CPU_XLP
+	xlp_config_lsu
+	xlp_early_mmu_init
+#endif
+	jr	ra
+	nop
+END(nlm_init_boot_cpu)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
new file mode 100644
index 0000000..39a300b
--- /dev/null
+++ b/arch/mips/netlogic/common/smp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/mmu_context.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+void nlm_send_ipi_single(int logical_cpu, unsigned int action)
+{
+	unsigned int hwtid;
+	uint64_t picbase;
+
+	/* node id is part of hwtid, and needed for send_ipi */
+	hwtid = cpu_logical_map(logical_cpu);
+	picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+	if (action & SMP_CALL_FUNCTION)
+		nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
+	if (action & SMP_RESCHEDULE_YOURSELF)
+		nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
+}
+
+void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+	int cpu;
+
+	for_each_cpu(cpu, mask) {
+		nlm_send_ipi_single(cpu, action);
+	}
+}
+
+/* IRQ_IPI_SMP_FUNCTION Handler */
+void nlm_smp_function_ipi_handler(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	clear_c0_eimr(irq);
+	ack_c0_eirr(irq);
+	generic_smp_call_function_interrupt();
+	set_c0_eimr(irq);
+}
+
+/* IRQ_IPI_SMP_RESCHEDULE  handler */
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	clear_c0_eimr(irq);
+	ack_c0_eirr(irq);
+	scheduler_ipi();
+	set_c0_eimr(irq);
+}
+
+/*
+ * Called before going into mips code, early cpu init
+ */
+void nlm_early_init_secondary(int cpu)
+{
+	change_c0_config(CONF_CM_CMASK, 0x3);
+#ifdef CONFIG_CPU_XLP
+	xlp_mmu_init();
+#endif
+	write_c0_ebase(nlm_current_node()->ebase);
+}
+
+/*
+ * Code to run on secondary just after probing the CPU
+ */
+static void nlm_init_secondary(void)
+{
+	int hwtid;
+
+	hwtid = hard_smp_processor_id();
+	cpu_set_core(&current_cpu_data, hwtid / NLM_THREADS_PER_CORE);
+	current_cpu_data.package = nlm_nodeid();
+	nlm_percpu_init(hwtid);
+	nlm_smp_irq_init(hwtid);
+}
+
+void nlm_prepare_cpus(unsigned int max_cpus)
+{
+	/* declare we are SMT capable */
+	smp_num_siblings = nlm_threads_per_core;
+}
+
+void nlm_smp_finish(void)
+{
+	local_irq_enable();
+}
+
+/*
+ * Boot all other cpus in the system, initialize them, and bring them into
+ * the boot function
+ */
+unsigned long nlm_next_gp;
+unsigned long nlm_next_sp;
+static cpumask_t phys_cpu_present_mask;
+
+int nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
+{
+	uint64_t picbase;
+	int hwtid;
+
+	hwtid = cpu_logical_map(logical_cpu);
+	picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+	nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+	nlm_next_gp = (unsigned long)task_thread_info(idle);
+
+	/* barrier for sp/gp store above */
+	__sync();
+	nlm_pic_send_ipi(picbase, hwtid, 1, 1);  /* NMI */
+
+	return 0;
+}
+
+void __init nlm_smp_setup(void)
+{
+	unsigned int boot_cpu;
+	int num_cpus, i, ncore, node;
+	volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+
+	boot_cpu = hard_smp_processor_id();
+	cpumask_clear(&phys_cpu_present_mask);
+
+	cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
+	__cpu_number_map[boot_cpu] = 0;
+	__cpu_logical_map[0] = boot_cpu;
+	set_cpu_possible(0, true);
+
+	num_cpus = 1;
+	for (i = 0; i < NR_CPUS; i++) {
+		/*
+		 * cpu_ready array is not set for the boot_cpu,
+		 * it is only set for ASPs (see smpboot.S)
+		 */
+		if (cpu_ready[i]) {
+			cpumask_set_cpu(i, &phys_cpu_present_mask);
+			__cpu_number_map[i] = num_cpus;
+			__cpu_logical_map[num_cpus] = i;
+			set_cpu_possible(num_cpus, true);
+			node = nlm_hwtid_to_node(i);
+			cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
+			++num_cpus;
+		}
+	}
+
+	pr_info("Physical CPU mask: %*pb\n",
+		cpumask_pr_args(&phys_cpu_present_mask));
+	pr_info("Possible CPU mask: %*pb\n",
+		cpumask_pr_args(cpu_possible_mask));
+
+	/* check with the cores we have woken up */
+	for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+		ncore += hweight32(nlm_get_node(i)->coremask);
+
+	pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+		nlm_threads_per_core, num_cpus);
+
+	/* switch NMI handler to boot CPUs */
+	nlm_set_nmi_handler(nlm_boot_secondary_cpus);
+}
+
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
+{
+	uint32_t core0_thr_mask, core_thr_mask;
+	int threadmode, i, j;
+
+	core0_thr_mask = 0;
+	for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+		if (cpumask_test_cpu(i, wakeup_mask))
+			core0_thr_mask |= (1 << i);
+	switch (core0_thr_mask) {
+	case 1:
+		nlm_threads_per_core = 1;
+		threadmode = 0;
+		break;
+	case 3:
+		nlm_threads_per_core = 2;
+		threadmode = 2;
+		break;
+	case 0xf:
+		nlm_threads_per_core = 4;
+		threadmode = 3;
+		break;
+	default:
+		goto unsupp;
+	}
+
+	/* Verify other cores CPU masks */
+	for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+		core_thr_mask = 0;
+		for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+			if (cpumask_test_cpu(i + j, wakeup_mask))
+				core_thr_mask |= (1 << j);
+		if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
+				goto unsupp;
+	}
+	return threadmode;
+
+unsupp:
+	panic("Unsupported CPU mask %*pb", cpumask_pr_args(wakeup_mask));
+	return 0;
+}
+
+int nlm_wakeup_secondary_cpus(void)
+{
+	u32 *reset_data;
+	int threadmode;
+
+	/* verify the mask and setup core config variables */
+	threadmode = nlm_parse_cpumask(&nlm_cpumask);
+
+	/* Setup CPU init parameters */
+	reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
+	*reset_data = threadmode;
+
+#ifdef CONFIG_CPU_XLP
+	xlp_wakeup_secondary_cpus();
+#else
+	xlr_wakeup_secondary_cpus();
+#endif
+	return 0;
+}
+
+const struct plat_smp_ops nlm_smp_ops = {
+	.send_ipi_single	= nlm_send_ipi_single,
+	.send_ipi_mask		= nlm_send_ipi_mask,
+	.init_secondary		= nlm_init_secondary,
+	.smp_finish		= nlm_smp_finish,
+	.boot_secondary		= nlm_boot_secondary,
+	.smp_setup		= nlm_smp_setup,
+	.prepare_cpus		= nlm_prepare_cpus,
+};
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
new file mode 100644
index 0000000..509c1a7
--- /dev/null
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+	.set	noreorder
+	.set	noat
+	.set	arch=xlr		/* for mfcr/mtcr, XLR is sufficient */
+
+/* Called by the boot cpu to wake up its sibling threads */
+NESTED(xlp_boot_core0_siblings, PT_SIZE, sp)
+	/* CPU register contents lost when enabling threads, save them first */
+	SAVE_ALL
+	sync
+	/* find the location to which nlm_boot_siblings was relocated */
+	li	t0, CKSEG1ADDR(RESET_VEC_PHYS)
+	PTR_LA	t1, nlm_reset_entry
+	PTR_LA	t2, nlm_boot_siblings
+	dsubu	t2, t1
+	daddu	t2, t0
+	/* call it */
+	jalr	t2
+	nop
+	RESTORE_ALL
+	jr	ra
+	nop
+END(xlp_boot_core0_siblings)
+
+NESTED(nlm_boot_secondary_cpus, 16, sp)
+	/* Initialize CP0 Status */
+	move	t1, zero
+#ifdef CONFIG_64BIT
+	ori	t1, ST0_KX
+#endif
+	mtc0	t1, CP0_STATUS
+	PTR_LA	t1, nlm_next_sp
+	PTR_L	sp, 0(t1)
+	PTR_LA	t1, nlm_next_gp
+	PTR_L	gp, 0(t1)
+
+	/* a0 has the processor id */
+	mfc0	a0, CP0_EBASE
+	andi	a0, 0x3ff		/* a0 <- node/core */
+	PTR_LA	t0, nlm_early_init_secondary
+	jalr	t0
+	nop
+
+	PTR_LA	t0, smp_bootstrap
+	jr	t0
+	nop
+END(nlm_boot_secondary_cpus)
+
+/*
+ * In case of RMIboot bootloader which is used on XLR boards, the CPUs
+ * be already woken up and waiting in bootloader code.
+ * This will get them out of the bootloader code and into linux. Needed
+ * because the bootloader area will be taken and initialized by linux.
+ */
+NESTED(nlm_rmiboot_preboot, 16, sp)
+	mfc0	t0, $15, 1	/* read ebase */
+	andi	t0, 0x1f	/* t0 has the processor_id() */
+	andi	t2, t0, 0x3	/* thread num */
+	sll	t0, 2		/* offset in cpu array */
+
+	li	t3, CKSEG1ADDR(RESET_DATA_PHYS)
+	ADDIU	t1, t3, BOOT_CPU_READY
+	ADDU	t1, t0
+	li	t3, 1
+	sw	t3, 0(t1)
+
+	bnez	t2, 1f		/* skip thread programming */
+	nop			/* for thread id != 0 */
+
+	/*
+	 * XLR MMU setup only for first thread in core
+	 */
+	li	t0, 0x400
+	mfcr	t1, t0
+	li	t2, 6		/* XLR thread mode mask */
+	nor	t3, t2, zero
+	and	t2, t1, t2	/* t2 - current thread mode */
+	li	v0, CKSEG1ADDR(RESET_DATA_PHYS)
+	lw	v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
+	sll	v1, 1
+	beq	v1, t2, 1f	/* same as request value */
+	nop			/* nothing to do */
+
+	and	t2, t1, t3	/* mask out old thread mode */
+	or	t1, t2, v1	/* put in new value */
+	mtcr	t1, t0		/* update core control */
+
+	/* wait for NMI to hit */
+1:	wait
+	b	1b
+	nop
+END(nlm_rmiboot_preboot)
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
new file mode 100644
index 0000000..cbbf0d4
--- /dev/null
+++ b/arch/mips/netlogic/common/time.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/cpu-features.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+unsigned int get_c0_compare_int(void)
+{
+	return IRQ_TIMER;
+}
+
+static u64 nlm_get_pic_timer(struct clocksource *cs)
+{
+	uint64_t picbase = nlm_get_node(0)->picbase;
+
+	return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static u64 nlm_get_pic_timer32(struct clocksource *cs)
+{
+	uint64_t picbase = nlm_get_node(0)->picbase;
+
+	return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+	.name		= "PIC",
+	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+	uint64_t picbase = nlm_get_node(0)->picbase;
+	u32 picfreq;
+
+	nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+	if (current_cpu_data.cputype == CPU_XLR) {
+		csrc_pic.mask	= CLOCKSOURCE_MASK(32);
+		csrc_pic.read	= nlm_get_pic_timer32;
+	} else {
+		csrc_pic.mask	= CLOCKSOURCE_MASK(64);
+		csrc_pic.read	= nlm_get_pic_timer;
+	}
+	csrc_pic.rating = 1000;
+	picfreq = pic_timer_freq();
+	clocksource_register_hz(&csrc_pic, picfreq);
+	pr_info("PIC clock source added, frequency %d\n", picfreq);
+}
+
+void __init plat_time_init(void)
+{
+	nlm_init_pic_timer();
+	mips_hpt_frequency = nlm_get_cpu_frequency();
+	if (current_cpu_type() == CPU_XLR)
+		preset_lpj = mips_hpt_frequency / (3 * HZ);
+	else
+		preset_lpj = mips_hpt_frequency / (2 * HZ);
+	pr_info("MIPS counter frequency [%ld]\n",
+			(unsigned long)mips_hpt_frequency);
+}