v4.19.13 snapshot.
diff --git a/arch/nds32/kernel/Makefile b/arch/nds32/kernel/Makefile
new file mode 100644
index 0000000..27cded3
--- /dev/null
+++ b/arch/nds32/kernel/Makefile
@@ -0,0 +1,29 @@
+#
+# Makefile for the linux kernel.
+#
+
+CPPFLAGS_vmlinux.lds	:= -DTEXTADDR=$(TEXTADDR)
+AFLAGS_head.o		:= -DTEXTADDR=$(TEXTADDR)
+
+# Object file lists.
+
+obj-y			:= ex-entry.o ex-exit.o ex-scall.o irq.o \
+			process.o ptrace.o setup.o signal.o \
+			sys_nds32.o time.o traps.o cacheinfo.o \
+			dma.o syscall_table.o vdso.o
+
+obj-$(CONFIG_MODULES)		+= nds32_ksyms.o module.o
+obj-$(CONFIG_STACKTRACE)	+= stacktrace.o
+obj-$(CONFIG_OF)		+= devtree.o
+obj-$(CONFIG_CACHE_L2)		+= atl2c.o
+
+extra-y := head.o vmlinux.lds
+
+
+obj-y				+= vdso/
+
+obj-$(CONFIG_FUNCTION_TRACER)   += ftrace.o
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+endif
diff --git a/arch/nds32/kernel/asm-offsets.c b/arch/nds32/kernel/asm-offsets.c
new file mode 100644
index 0000000..3541d59
--- /dev/null
+++ b/arch/nds32/kernel/asm-offsets.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kbuild.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+int main(void)
+{
+	DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+	DEFINE(TSK_TI_PREEMPT,
+	       offsetof(struct task_struct, thread_info.preempt_count));
+	DEFINE(THREAD_CPU_CONTEXT,
+	       offsetof(struct task_struct, thread.cpu_context));
+	DEFINE(OSP_OFFSET, offsetof(struct pt_regs, osp));
+	DEFINE(SP_OFFSET, offsetof(struct pt_regs, sp));
+	DEFINE(FUCOP_CTL_OFFSET, offsetof(struct pt_regs, fucop_ctl));
+	DEFINE(IPSW_OFFSET, offsetof(struct pt_regs, ipsw));
+	DEFINE(SYSCALLNO_OFFSET, offsetof(struct pt_regs, syscallno));
+	DEFINE(IPC_OFFSET, offsetof(struct pt_regs, ipc));
+	DEFINE(R0_OFFSET, offsetof(struct pt_regs, uregs[0]));
+	DEFINE(R15_OFFSET, offsetof(struct pt_regs, uregs[15]));
+	DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
+	DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
+	return 0;
+}
diff --git a/arch/nds32/kernel/atl2c.c b/arch/nds32/kernel/atl2c.c
new file mode 100644
index 0000000..0c5386e
--- /dev/null
+++ b/arch/nds32/kernel/atl2c.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/compiler.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <asm/l2_cache.h>
+
+void __iomem *atl2c_base;
+static const struct of_device_id atl2c_ids[] __initconst = {
+	{.compatible = "andestech,atl2c",},
+	{}
+};
+
+static int __init atl2c_of_init(void)
+{
+	struct device_node *np;
+	struct resource res;
+	unsigned long tmp = 0;
+	unsigned long l2set, l2way, l2clsz;
+
+	if (!(__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C))
+		return -ENODEV;
+
+	np = of_find_matching_node(NULL, atl2c_ids);
+	if (!np)
+		return -ENODEV;
+
+	if (of_address_to_resource(np, 0, &res))
+		return -ENODEV;
+
+	atl2c_base = ioremap(res.start, resource_size(&res));
+	if (!atl2c_base)
+		return -ENOMEM;
+
+	l2set =
+	    64 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2SET) >>
+		   L2_CA_CONF_offL2SET);
+	l2way =
+	    1 +
+	    ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2WAY) >>
+	     L2_CA_CONF_offL2WAY);
+	l2clsz =
+	    4 << ((L2C_R_REG(L2_CA_CONF_OFF) & L2_CA_CONF_mskL2CLSZ) >>
+		  L2_CA_CONF_offL2CLSZ);
+	pr_info("L2:%luKB/%luS/%luW/%luB\n",
+		l2set * l2way * l2clsz / 1024, l2set, l2way, l2clsz);
+
+	tmp = L2C_R_REG(L2CC_PROT_OFF);
+	tmp &= ~L2CC_PROT_mskMRWEN;
+	L2C_W_REG(L2CC_PROT_OFF, tmp);
+
+	tmp = L2C_R_REG(L2CC_SETUP_OFF);
+	tmp &= ~L2CC_SETUP_mskPART;
+	L2C_W_REG(L2CC_SETUP_OFF, tmp);
+
+	tmp = L2C_R_REG(L2CC_CTRL_OFF);
+	tmp |= L2CC_CTRL_mskEN;
+	L2C_W_REG(L2CC_CTRL_OFF, tmp);
+
+	return 0;
+}
+
+subsys_initcall(atl2c_of_init);
diff --git a/arch/nds32/kernel/cacheinfo.c b/arch/nds32/kernel/cacheinfo.c
new file mode 100644
index 0000000..0a7bc69
--- /dev/null
+++ b/arch/nds32/kernel/cacheinfo.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/bitops.h>
+#include <linux/cacheinfo.h>
+#include <linux/cpu.h>
+
+static void ci_leaf_init(struct cacheinfo *this_leaf,
+			 enum cache_type type, unsigned int level)
+{
+	char cache_type = (type & CACHE_TYPE_INST ? ICACHE : DCACHE);
+
+	this_leaf->level = level;
+	this_leaf->type = type;
+	this_leaf->coherency_line_size = CACHE_LINE_SIZE(cache_type);
+	this_leaf->number_of_sets = CACHE_SET(cache_type);;
+	this_leaf->ways_of_associativity = CACHE_WAY(cache_type);
+	this_leaf->size = this_leaf->number_of_sets *
+	    this_leaf->coherency_line_size * this_leaf->ways_of_associativity;
+#if defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
+	this_leaf->attributes = CACHE_WRITE_THROUGH;
+#else
+	this_leaf->attributes = CACHE_WRITE_BACK;
+#endif
+}
+
+int init_cache_level(unsigned int cpu)
+{
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+	/* Only 1 level and I/D cache seperate. */
+	this_cpu_ci->num_levels = 1;
+	this_cpu_ci->num_leaves = 2;
+	return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+	unsigned int level, idx;
+	struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+	struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+	for (idx = 0, level = 1; level <= this_cpu_ci->num_levels &&
+	     idx < this_cpu_ci->num_leaves; idx++, level++) {
+		ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level);
+		ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level);
+	}
+	return 0;
+}
diff --git a/arch/nds32/kernel/devtree.c b/arch/nds32/kernel/devtree.c
new file mode 100644
index 0000000..bdce0fe
--- /dev/null
+++ b/arch/nds32/kernel/devtree.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/bug.h>
+#include <linux/printk.h>
+#include <linux/of_fdt.h>
+
+void __init early_init_devtree(void *params)
+{
+	if (!params || !early_init_dt_scan(params)) {
+		pr_crit("\n"
+			"Error: invalid device tree blob at (virtual address 0x%p)\n"
+			"\nPlease check your bootloader.", params);
+
+		BUG_ON(1);
+	}
+
+	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+}
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
new file mode 100644
index 0000000..d0dbd4f
--- /dev/null
+++ b/arch/nds32/kernel/dma.c
@@ -0,0 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/dma-noncoherent.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/proc-fns.h>
+
+/*
+ * This is the page table (2MB) covering uncached, DMA consistent allocations
+ */
+static pte_t *consistent_pte;
+static DEFINE_RAW_SPINLOCK(consistent_lock);
+
+/*
+ * VM region handling support.
+ *
+ * This should become something generic, handling VM region allocations for
+ * vmalloc and similar (ioremap, module space, etc).
+ *
+ * I envisage vmalloc()'s supporting vm_struct becoming:
+ *
+ *  struct vm_struct {
+ *    struct vm_region	region;
+ *    unsigned long	flags;
+ *    struct page	**pages;
+ *    unsigned int	nr_pages;
+ *    unsigned long	phys_addr;
+ *  };
+ *
+ * get_vm_area() would then call vm_region_alloc with an appropriate
+ * struct vm_region head (eg):
+ *
+ *  struct vm_region vmalloc_head = {
+ *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list),
+ *	.vm_start	= VMALLOC_START,
+ *	.vm_end		= VMALLOC_END,
+ *  };
+ *
+ * However, vmalloc_head.vm_start is variable (typically, it is dependent on
+ * the amount of RAM found at boot time.)  I would imagine that get_vm_area()
+ * would have to initialise this each time prior to calling vm_region_alloc().
+ */
+struct arch_vm_region {
+	struct list_head vm_list;
+	unsigned long vm_start;
+	unsigned long vm_end;
+	struct page *vm_pages;
+};
+
+static struct arch_vm_region consistent_head = {
+	.vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
+	.vm_start = CONSISTENT_BASE,
+	.vm_end = CONSISTENT_END,
+};
+
+static struct arch_vm_region *vm_region_alloc(struct arch_vm_region *head,
+					      size_t size, int gfp)
+{
+	unsigned long addr = head->vm_start, end = head->vm_end - size;
+	unsigned long flags;
+	struct arch_vm_region *c, *new;
+
+	new = kmalloc(sizeof(struct arch_vm_region), gfp);
+	if (!new)
+		goto out;
+
+	raw_spin_lock_irqsave(&consistent_lock, flags);
+
+	list_for_each_entry(c, &head->vm_list, vm_list) {
+		if ((addr + size) < addr)
+			goto nospc;
+		if ((addr + size) <= c->vm_start)
+			goto found;
+		addr = c->vm_end;
+		if (addr > end)
+			goto nospc;
+	}
+
+found:
+	/*
+	 * Insert this entry _before_ the one we found.
+	 */
+	list_add_tail(&new->vm_list, &c->vm_list);
+	new->vm_start = addr;
+	new->vm_end = addr + size;
+
+	raw_spin_unlock_irqrestore(&consistent_lock, flags);
+	return new;
+
+nospc:
+	raw_spin_unlock_irqrestore(&consistent_lock, flags);
+	kfree(new);
+out:
+	return NULL;
+}
+
+static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
+					     unsigned long addr)
+{
+	struct arch_vm_region *c;
+
+	list_for_each_entry(c, &head->vm_list, vm_list) {
+		if (c->vm_start == addr)
+			goto out;
+	}
+	c = NULL;
+out:
+	return c;
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		gfp_t gfp, unsigned long attrs)
+{
+	struct page *page;
+	struct arch_vm_region *c;
+	unsigned long order;
+	u64 mask = ~0ULL, limit;
+	pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
+
+	if (!consistent_pte) {
+		pr_err("%s: not initialized\n", __func__);
+		dump_stack();
+		return NULL;
+	}
+
+	if (dev) {
+		mask = dev->coherent_dma_mask;
+
+		/*
+		 * Sanity check the DMA mask - it must be non-zero, and
+		 * must be able to be satisfied by a DMA allocation.
+		 */
+		if (mask == 0) {
+			dev_warn(dev, "coherent DMA mask is unset\n");
+			goto no_page;
+		}
+
+	}
+
+	/*
+	 * Sanity check the allocation size.
+	 */
+	size = PAGE_ALIGN(size);
+	limit = (mask + 1) & ~mask;
+	if ((limit && size >= limit) ||
+	    size >= (CONSISTENT_END - CONSISTENT_BASE)) {
+		pr_warn("coherent allocation too big "
+			"(requested %#x mask %#llx)\n", size, mask);
+		goto no_page;
+	}
+
+	order = get_order(size);
+
+	if (mask != 0xffffffff)
+		gfp |= GFP_DMA;
+
+	page = alloc_pages(gfp, order);
+	if (!page)
+		goto no_page;
+
+	/*
+	 * Invalidate any data that might be lurking in the
+	 * kernel direct-mapped region for device DMA.
+	 */
+	{
+		unsigned long kaddr = (unsigned long)page_address(page);
+		memset(page_address(page), 0, size);
+		cpu_dma_wbinval_range(kaddr, kaddr + size);
+	}
+
+	/*
+	 * Allocate a virtual address in the consistent mapping region.
+	 */
+	c = vm_region_alloc(&consistent_head, size,
+			    gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
+	if (c) {
+		pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+		struct page *end = page + (1 << order);
+
+		c->vm_pages = page;
+
+		/*
+		 * Set the "dma handle"
+		 */
+		*handle = page_to_phys(page);
+
+		do {
+			BUG_ON(!pte_none(*pte));
+
+			/*
+			 * x86 does not mark the pages reserved...
+			 */
+			SetPageReserved(page);
+			set_pte(pte, mk_pte(page, prot));
+			page++;
+			pte++;
+		} while (size -= PAGE_SIZE);
+
+		/*
+		 * Free the otherwise unused pages.
+		 */
+		while (page < end) {
+			__free_page(page);
+			page++;
+		}
+
+		return (void *)c->vm_start;
+	}
+
+	if (page)
+		__free_pages(page, order);
+no_page:
+	*handle = ~0;
+	return NULL;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, unsigned long attrs)
+{
+	struct arch_vm_region *c;
+	unsigned long flags, addr;
+	pte_t *ptep;
+
+	size = PAGE_ALIGN(size);
+
+	raw_spin_lock_irqsave(&consistent_lock, flags);
+
+	c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
+	if (!c)
+		goto no_area;
+
+	if ((c->vm_end - c->vm_start) != size) {
+		pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+		       __func__, c->vm_end - c->vm_start, size);
+		dump_stack();
+		size = c->vm_end - c->vm_start;
+	}
+
+	ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+	addr = c->vm_start;
+	do {
+		pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
+		unsigned long pfn;
+
+		ptep++;
+		addr += PAGE_SIZE;
+
+		if (!pte_none(pte) && pte_present(pte)) {
+			pfn = pte_pfn(pte);
+
+			if (pfn_valid(pfn)) {
+				struct page *page = pfn_to_page(pfn);
+
+				/*
+				 * x86 does not mark the pages reserved...
+				 */
+				ClearPageReserved(page);
+
+				__free_page(page);
+				continue;
+			}
+		}
+
+		pr_crit("%s: bad page in kernel page table\n", __func__);
+	} while (size -= PAGE_SIZE);
+
+	flush_tlb_kernel_range(c->vm_start, c->vm_end);
+
+	list_del(&c->vm_list);
+
+	raw_spin_unlock_irqrestore(&consistent_lock, flags);
+
+	kfree(c);
+	return;
+
+no_area:
+	raw_spin_unlock_irqrestore(&consistent_lock, flags);
+	pr_err("%s: trying to free invalid coherent area: %p\n",
+	       __func__, cpu_addr);
+	dump_stack();
+}
+
+/*
+ * Initialise the consistent memory allocation.
+ */
+static int __init consistent_init(void)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	int ret = 0;
+
+	do {
+		pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
+		pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+		if (!pmd) {
+			pr_err("%s: no pmd tables\n", __func__);
+			ret = -ENOMEM;
+			break;
+		}
+		/* The first level mapping may be created in somewhere.
+		 * It's not necessary to warn here. */
+		/* WARN_ON(!pmd_none(*pmd)); */
+
+		pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
+		if (!pte) {
+			ret = -ENOMEM;
+			break;
+		}
+
+		consistent_pte = pte;
+	} while (0);
+
+	return ret;
+}
+
+core_initcall(consistent_init);
+
+static inline void cache_op(phys_addr_t paddr, size_t size,
+		void (*fn)(unsigned long start, unsigned long end))
+{
+	struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
+	unsigned offset = paddr & ~PAGE_MASK;
+	size_t left = size;
+	unsigned long start;
+
+	do {
+		size_t len = left;
+
+		if (PageHighMem(page)) {
+			void *addr;
+
+			if (offset + len > PAGE_SIZE) {
+				if (offset >= PAGE_SIZE) {
+					page += offset >> PAGE_SHIFT;
+					offset &= ~PAGE_MASK;
+				}
+				len = PAGE_SIZE - offset;
+			}
+
+			addr = kmap_atomic(page);
+			start = (unsigned long)(addr + offset);
+			fn(start, start + len);
+			kunmap_atomic(addr);
+		} else {
+			start = (unsigned long)phys_to_virt(paddr);
+			fn(start, start + size);
+		}
+		offset = 0;
+		page++;
+		left -= len;
+	} while (left);
+}
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		break;
+	case DMA_TO_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cache_op(paddr, size, cpu_dma_wb_range);
+		break;
+	default:
+		BUG();
+	}
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		break;
+	case DMA_FROM_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cache_op(paddr, size, cpu_dma_inval_range);
+		break;
+	default:
+		BUG();
+	}
+}
diff --git a/arch/nds32/kernel/ex-entry.S b/arch/nds32/kernel/ex-entry.S
new file mode 100644
index 0000000..21a1440
--- /dev/null
+++ b/arch/nds32/kernel/ex-entry.S
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/memory.h>
+#include <asm/nds32.h>
+#include <asm/errno.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_HWZOL
+	.macro push_zol
+	mfusr	$r14, $LB
+	mfusr	$r15, $LE
+	mfusr	$r16, $LC
+	.endm
+#endif
+
+	.macro	save_user_regs
+
+	smw.adm $sp, [$sp], $sp, #0x1
+	/* move $SP to the bottom of pt_regs */
+	addi    $sp, $sp, -OSP_OFFSET
+
+	/* push $r0 ~ $r25 */
+	smw.bim $r0, [$sp], $r25
+	/* push $fp, $gp, $lp */
+	smw.bim $sp, [$sp], $sp, #0xe
+
+	mfsr	$r12, $SP_USR
+	mfsr	$r13, $IPC
+#ifdef CONFIG_HWZOL
+	push_zol
+#endif
+	movi	$r17, -1
+	move	$r18, $r0
+	mfsr	$r19, $PSW
+	mfsr	$r20, $IPSW
+	mfsr	$r21, $P_IPSW
+	mfsr	$r22, $P_IPC
+	mfsr	$r23, $P_P0
+	mfsr	$r24, $P_P1
+	smw.bim $r12, [$sp], $r24, #0
+	addi	$sp, $sp, -FUCOP_CTL_OFFSET
+
+	/* Initialize kernel space $fp */
+	andi    $p0, $r20, #PSW_mskPOM
+	movi    $p1, #0x0
+	cmovz   $fp, $p1, $p0
+
+	andi	$r16, $r19, #PSW_mskINTL
+	slti	$r17, $r16, #4
+	bnez	$r17, 1f
+	addi	$r17, $r19, #-2
+	mtsr	$r17, $PSW
+	isb
+1:
+	/* If it was superuser mode, we don't need to update $r25 */
+	bnez	$p0, 2f
+	la	$p0, __entry_task
+	lw	$r25, [$p0]
+2:
+	.endm
+
+	.text
+
+/*
+ * Exception Vector
+ */
+exception_handlers:
+	.long	unhandled_exceptions	!Reset/NMI
+	.long	unhandled_exceptions	!TLB fill
+	.long	do_page_fault		!PTE not present
+	.long	do_dispatch_tlb_misc	!TLB misc
+	.long	unhandled_exceptions	!TLB VLPT
+	.long	unhandled_exceptions	!Machine Error
+	.long	do_debug_trap		!Debug related
+	.long	do_dispatch_general	!General exception
+	.long	eh_syscall		!Syscall
+	.long	asm_do_IRQ		!IRQ
+
+common_exception_handler:
+	save_user_regs
+	mfsr	$p0, $ITYPE
+	andi	$p0, $p0, #ITYPE_mskVECTOR
+	srli	$p0, $p0, #ITYPE_offVECTOR
+	andi	$p1, $p0, #NDS32_VECTOR_mskNONEXCEPTION
+	bnez	$p1, 1f
+	sethi	$lp, hi20(ret_from_exception)
+	ori	$lp, $lp, lo12(ret_from_exception)
+	sethi	$p1, hi20(exception_handlers)
+	ori	$p1, $p1, lo12(exception_handlers)
+	lw	$p1, [$p1+$p0<<2]
+	move	$r0, $p0
+	mfsr	$r1, $EVA
+	mfsr	$r2, $ITYPE
+	move	$r3, $sp
+	mfsr    $r4, $OIPC
+	/* enable gie if it is enabled in IPSW. */
+	mfsr	$r21, $PSW
+	andi	$r20, $r20, #PSW_mskGIE	/* r20 is $IPSW*/
+	or	$r21, $r21, $r20
+	mtsr	$r21, $PSW
+	dsb
+	jr	$p1
+
+	/* syscall */
+1:
+	addi	$p1, $p0, #-NDS32_VECTOR_offEXCEPTION
+	bnez	$p1, 2f
+	sethi	$lp, hi20(ret_from_exception)
+	ori	$lp, $lp, lo12(ret_from_exception)
+	sethi	$p1, hi20(exception_handlers)
+	ori	$p1, $p1, lo12(exception_handlers)
+	lwi	$p1, [$p1+#NDS32_VECTOR_offEXCEPTION<<2]
+	jr	$p1
+
+	/* interrupt */
+2:
+#ifdef CONFIG_TRACE_IRQFLAGS
+	jal     __trace_hardirqs_off
+#endif
+	move	$r0, $sp
+	sethi	$lp, hi20(ret_from_intr)
+	ori	$lp, $lp, lo12(ret_from_intr)
+	sethi	$p0, hi20(exception_handlers)
+	ori	$p0, $p0, lo12(exception_handlers)
+	lwi	$p0, [$p0+#NDS32_VECTOR_offINTERRUPT<<2]
+	jr	$p0
+
+	.macro	EXCEPTION_VECTOR_DEBUG
+	.align 4
+	mfsr     $p0, $EDM_CTL
+	andi     $p0, $p0, EDM_CTL_mskV3_EDM_MODE
+	tnez     $p0, SWID_RAISE_INTERRUPT_LEVEL
+	.endm
+
+	.macro	EXCEPTION_VECTOR
+	.align 4
+	sethi	 $p0, hi20(common_exception_handler)
+	ori	 $p0, $p0, lo12(common_exception_handler)
+	jral.ton $p0, $p0
+	.endm
+
+	.section	".text.init", #alloc, #execinstr
+	.global	exception_vector
+exception_vector:
+.rept 6
+	EXCEPTION_VECTOR
+.endr
+	EXCEPTION_VECTOR_DEBUG
+.rept 121
+	EXCEPTION_VECTOR
+.endr
+	.align 4
+	.global	exception_vector_end
+exception_vector_end:
diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S
new file mode 100644
index 0000000..f00af92
--- /dev/null
+++ b/arch/nds32/kernel/ex-exit.S
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/nds32.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/current.h>
+
+
+
+#ifdef CONFIG_HWZOL
+	.macro pop_zol
+	mtusr	$r14, $LB
+	mtusr	$r15, $LE
+	mtusr	$r16, $LC
+	.endm
+#endif
+
+	.macro	restore_user_regs_first
+	setgie.d
+	isb
+
+	addi	$sp, $sp, FUCOP_CTL_OFFSET
+
+	lmw.adm $r12, [$sp], $r24, #0x0
+	mtsr	$r12, $SP_USR
+	mtsr	$r13, $IPC
+#ifdef CONFIG_HWZOL
+	pop_zol
+#endif
+	mtsr	$r19, $PSW
+	mtsr	$r20, $IPSW
+	mtsr    $r21, $P_IPSW
+	mtsr	$r22, $P_IPC
+	mtsr	$r23, $P_P0
+	mtsr	$r24, $P_P1
+	lmw.adm $sp, [$sp], $sp, #0xe
+	.endm
+
+	.macro	restore_user_regs_last
+	pop	$p0
+	cmovn	$sp, $p0, $p0
+
+	iret
+	nop
+
+	.endm
+
+	.macro	restore_user_regs
+	restore_user_regs_first
+	lmw.adm $r0, [$sp], $r25, #0x0
+	addi	$sp, $sp, OSP_OFFSET
+	restore_user_regs_last
+	.endm
+
+	.macro	fast_restore_user_regs
+	restore_user_regs_first
+	lmw.adm $r1, [$sp], $r25, #0x0
+	addi	$sp, $sp, OSP_OFFSET-4
+	restore_user_regs_last
+	.endm
+
+#ifdef CONFIG_PREEMPT
+	.macro	preempt_stop
+	.endm
+#else
+	.macro	preempt_stop
+	setgie.d
+	isb
+	.endm
+#define	resume_kernel	no_work_pending
+#endif
+
+ENTRY(ret_from_exception)
+	preempt_stop
+ENTRY(ret_from_intr)
+
+/*
+ * judge Kernel or user mode
+ *
+ */
+	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
+	andi	$p0, $p0, #PSW_mskINTL
+	bnez	$p0, resume_kernel		! done with iret
+	j	resume_userspace
+
+
+/*
+ * This is the fast syscall return path.  We do as little as
+ * possible here, and this includes saving $r0 back into the SVC
+ * stack.
+ * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
+ */
+ENTRY(ret_fast_syscall)
+	gie_disable
+	lwi	$r1, [tsk+#TSK_TI_FLAGS]
+	andi	$p1, $r1, #_TIF_WORK_MASK
+	bnez	$p1, fast_work_pending
+	fast_restore_user_regs			! iret
+
+/*
+ * Ok, we need to do extra processing,
+ * enter the slow path returning from syscall, while pending work.
+ */
+fast_work_pending:
+	swi	$r0, [$sp+(#R0_OFFSET)]		! what is different from ret_from_exception
+work_pending:
+	andi	$p1, $r1, #_TIF_NEED_RESCHED
+	bnez	$p1, work_resched
+
+	andi	$p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
+	beqz	$p1, no_work_pending
+
+	move	$r0, $sp			! 'regs'
+	gie_enable
+	bal	do_notify_resume
+	b       ret_slow_syscall
+work_resched:
+	bal	schedule			! path, return to user mode
+
+/*
+ * "slow" syscall return path.
+ */
+ENTRY(resume_userspace)
+ENTRY(ret_slow_syscall)
+	gie_disable
+	lwi	$p0, [$sp+(#IPSW_OFFSET)]	! Check if in nested interrupt
+	andi	$p0, $p0, #PSW_mskINTL
+	bnez	$p0, no_work_pending		! done with iret
+	lwi	$r1, [tsk+#TSK_TI_FLAGS]
+	andi	$p1, $r1, #_TIF_WORK_MASK
+	bnez	$p1, work_pending		! handle work_resched, sig_pend
+
+no_work_pending:
+#ifdef CONFIG_TRACE_IRQFLAGS
+	lwi	$p0, [$sp+(#IPSW_OFFSET)]
+	andi	$p0, $p0, #0x1
+	la	$r10, __trace_hardirqs_off
+	la	$r9, __trace_hardirqs_on
+	cmovz	$r9, $p0, $r10
+	jral	$r9
+#endif
+	restore_user_regs			! return from iret
+
+
+/*
+ * preemptive kernel
+ */
+#ifdef CONFIG_PREEMPT
+resume_kernel:
+	gie_disable
+	lwi	$t0, [tsk+#TSK_TI_PREEMPT]
+	bnez	$t0, no_work_pending
+need_resched:
+	lwi	$t0, [tsk+#TSK_TI_FLAGS]
+	andi	$p1, $t0, #_TIF_NEED_RESCHED
+	beqz	$p1, no_work_pending
+
+	lwi	$t0, [$sp+(#IPSW_OFFSET)]	! Interrupts off?
+	andi	$t0, $t0, #1
+	beqz	$t0, no_work_pending
+
+	jal	preempt_schedule_irq
+	b	need_resched
+#endif
+
+/*
+ * This is how we return from a fork.
+ */
+ENTRY(ret_from_fork)
+	bal	schedule_tail
+	beqz	$r6, 1f				! r6 stores fn for kernel thread
+	move	$r0, $r7			! prepare kernel thread arg
+	jral	$r6
+1:
+	lwi	$r1, [tsk+#TSK_TI_FLAGS]		! check for syscall tracing
+	andi	$p1, $r1, #_TIF_WORK_SYSCALL_LEAVE	! are we tracing syscalls?
+	beqz	$p1, ret_slow_syscall
+	move    $r0, $sp
+	bal	syscall_trace_leave
+	b	ret_slow_syscall
diff --git a/arch/nds32/kernel/ex-scall.S b/arch/nds32/kernel/ex-scall.S
new file mode 100644
index 0000000..36aa87e
--- /dev/null
+++ b/arch/nds32/kernel/ex-scall.S
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+#include <asm/assembler.h>
+#include <asm/nds32.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+#include <asm/current.h>
+
+/*
+ * $r0 = previous task_struct,
+ * $r1 = next task_struct,
+ * previous and next are guaranteed not to be the same.
+ */
+
+ENTRY(__switch_to)
+
+	la	$p0, __entry_task
+	sw	$r1, [$p0]
+	move	$p1, $r0
+	addi	$p1, $p1, #THREAD_CPU_CONTEXT
+	smw.bi 	$r6, [$p1], $r14, #0xb		! push r6~r14, fp, lp, sp
+	move	$r25, $r1
+	addi	$r1, $r1, #THREAD_CPU_CONTEXT
+	lmw.bi 	$r6, [$r1], $r14, #0xb		! pop r6~r14, fp, lp, sp
+	ret
+
+
+#define tbl $r8
+
+/*
+ * $r7 will be writen as syscall nr
+ */
+	.macro	get_scno
+	lwi	$r7, [$sp + R15_OFFSET]
+	swi	$r7, [$sp + SYSCALLNO_OFFSET]
+	.endm
+
+	.macro	updateipc
+	addi	$r17, $r13, #4				! $r13 is $IPC
+	swi	$r17, [$sp + IPC_OFFSET]
+	.endm
+
+ENTRY(eh_syscall)
+	updateipc
+
+	get_scno
+	gie_enable
+
+	lwi	$p0, [tsk+#TSK_TI_FLAGS]		! check for syscall tracing
+
+	andi	$p1, $p0, #_TIF_WORK_SYSCALL_ENTRY	! are we tracing syscalls?
+	bnez	$p1, __sys_trace
+
+	la	$lp, ret_fast_syscall		! return address
+jmp_systbl:
+	addi	$p1, $r7, #-__NR_syscalls	! syscall number of syscall instruction is guarded by addembler
+	bgez	$p1, _SCNO_EXCEED		! call sys_* routine
+	la	tbl, sys_call_table		! load syscall table pointer
+	slli	$p1, $r7, #2
+	add	$p1, tbl, $p1
+	lwi	$p1, [$p1]
+	jr	$p1				! no return
+
+_SCNO_EXCEED:
+	ori	$r0, $r7, #0
+        ori	$r1, $sp, #0
+	b	bad_syscall
+
+/*
+ * This is the really slow path.  We're going to be doing
+ * context switches, and waiting for our parent to respond.
+ */
+__sys_trace:
+	move	$r0, $sp
+	bal	syscall_trace_enter
+	move	$r7, $r0
+	la	$lp, __sys_trace_return		! return address
+
+	addi    $p1, $r7, #1
+	beqz    $p1, ret_slow_syscall		! fatal signal is pending
+
+	addi	$p1, $sp, #R0_OFFSET		! pointer to regs
+	lmw.bi	$r0, [$p1], $r5			! have to reload $r0 - $r5
+	b	jmp_systbl
+
+__sys_trace_return:
+	swi	$r0, [$sp+#R0_OFFSET]		! T: save returned $r0
+	move	$r0, $sp			! set pt_regs for syscall_trace_leave
+	bal	syscall_trace_leave
+	b	ret_slow_syscall
+
+ENTRY(sys_rt_sigreturn_wrapper)
+	addi	$r0, $sp, #0
+	b	sys_rt_sigreturn
+ENDPROC(sys_rt_sigreturn_wrapper)
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c
new file mode 100644
index 0000000..8a41372
--- /dev/null
+++ b/arch/nds32/kernel/ftrace.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+extern void (*ftrace_trace_function)(unsigned long, unsigned long,
+				     struct ftrace_ops*, struct pt_regs*);
+extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace);
+extern void ftrace_graph_caller(void);
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+				  struct ftrace_ops *op, struct pt_regs *regs)
+{
+	__asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void _mcount(unsigned long parent_ip)
+{
+	/* save all state by the compiler prologue */
+
+	unsigned long ip = (unsigned long)__builtin_return_address(0);
+
+	if (ftrace_trace_function != ftrace_stub)
+		ftrace_trace_function(ip - MCOUNT_INSN_SIZE, parent_ip,
+				      NULL, NULL);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	if (ftrace_graph_return != (trace_func_graph_ret_t)ftrace_stub
+	    || ftrace_graph_entry != ftrace_graph_entry_stub)
+		ftrace_graph_caller();
+#endif
+
+	/* restore all state by the compiler epilogue */
+}
+EXPORT_SYMBOL(_mcount);
+
+#else /* CONFIG_DYNAMIC_FTRACE */
+
+noinline void __naked ftrace_stub(unsigned long ip, unsigned long parent_ip,
+				  struct ftrace_ops *op, struct pt_regs *regs)
+{
+	__asm__ ("");  /* avoid to optimize as pure function */
+}
+
+noinline void __naked _mcount(unsigned long parent_ip)
+{
+	__asm__ ("");  /* avoid to optimize as pure function */
+}
+EXPORT_SYMBOL(_mcount);
+
+#define XSTR(s) STR(s)
+#define STR(s) #s
+void _ftrace_caller(unsigned long parent_ip)
+{
+	/* save all state needed by the compiler prologue */
+
+	/*
+	 * prepare arguments for real tracing function
+	 * first  arg : __builtin_return_address(0) - MCOUNT_INSN_SIZE
+	 * second arg : parent_ip
+	 */
+	__asm__ __volatile__ (
+		"move $r1, %0				   \n\t"
+		"addi $r0, %1, #-" XSTR(MCOUNT_INSN_SIZE) "\n\t"
+		:
+		: "r" (parent_ip), "r" (__builtin_return_address(0)));
+
+	/* a placeholder for the call to a real tracing function */
+	__asm__ __volatile__ (
+		"ftrace_call:		\n\t"
+		"nop			\n\t"
+		"nop			\n\t"
+		"nop			\n\t");
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* a placeholder for the call to ftrace_graph_caller */
+	__asm__ __volatile__ (
+		"ftrace_graph_call:	\n\t"
+		"nop			\n\t"
+		"nop			\n\t"
+		"nop			\n\t");
+#endif
+	/* restore all state needed by the compiler epilogue */
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+	return 0;
+}
+
+int ftrace_arch_code_modify_prepare(void)
+{
+	set_all_modules_text_rw();
+	return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void)
+{
+	set_all_modules_text_ro();
+	return 0;
+}
+
+static unsigned long gen_sethi_insn(unsigned long addr)
+{
+	unsigned long opcode = 0x46000000;
+	unsigned long imm = addr >> 12;
+	unsigned long rt_num = 0xf << 20;
+
+	return ENDIAN_CONVERT(opcode | rt_num | imm);
+}
+
+static unsigned long gen_ori_insn(unsigned long addr)
+{
+	unsigned long opcode = 0x58000000;
+	unsigned long imm = addr & 0x0000fff;
+	unsigned long rt_num = 0xf << 20;
+	unsigned long ra_num = 0xf << 15;
+
+	return ENDIAN_CONVERT(opcode | rt_num | ra_num | imm);
+}
+
+static unsigned long gen_jral_insn(unsigned long addr)
+{
+	unsigned long opcode = 0x4a000001;
+	unsigned long rt_num = 0x1e << 20;
+	unsigned long rb_num = 0xf << 10;
+
+	return ENDIAN_CONVERT(opcode | rt_num | rb_num);
+}
+
+static void ftrace_gen_call_insn(unsigned long *call_insns,
+				 unsigned long addr)
+{
+	call_insns[0] = gen_sethi_insn(addr); /* sethi $r15, imm20u       */
+	call_insns[1] = gen_ori_insn(addr);   /* ori   $r15, $r15, imm15u */
+	call_insns[2] = gen_jral_insn(addr);  /* jral  $lp,  $r15         */
+}
+
+static int __ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+				unsigned long *new_insn, bool validate)
+{
+	unsigned long orig_insn[3];
+
+	if (validate) {
+		if (probe_kernel_read(orig_insn, (void *)pc, MCOUNT_INSN_SIZE))
+			return -EFAULT;
+		if (memcmp(orig_insn, old_insn, MCOUNT_INSN_SIZE))
+			return -EINVAL;
+	}
+
+	if (probe_kernel_write((void *)pc, new_insn, MCOUNT_INSN_SIZE))
+		return -EPERM;
+
+	return 0;
+}
+
+static int ftrace_modify_code(unsigned long pc, unsigned long *old_insn,
+			      unsigned long *new_insn, bool validate)
+{
+	int ret;
+
+	ret = __ftrace_modify_code(pc, old_insn, new_insn, validate);
+	if (ret)
+		return ret;
+
+	flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
+
+	return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+	unsigned long pc = (unsigned long)&ftrace_call;
+	unsigned long old_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+	unsigned long new_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+	if (func != ftrace_stub)
+		ftrace_gen_call_insn(new_insn, (unsigned long)func);
+
+	return ftrace_modify_code(pc, old_insn, new_insn, false);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+	unsigned long pc = rec->ip;
+	unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+	unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+	ftrace_gen_call_insn(call_insn, addr);
+
+	return ftrace_modify_code(pc, nop_insn, call_insn, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+		    unsigned long addr)
+{
+	unsigned long pc = rec->ip;
+	unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+	unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+	ftrace_gen_call_insn(call_insn, addr);
+
+	return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+			   unsigned long frame_pointer)
+{
+	unsigned long return_hooker = (unsigned long)&return_to_handler;
+	unsigned long old;
+
+	if (unlikely(atomic_read(&current->tracing_graph_pause)))
+		return;
+
+	old = *parent;
+
+	if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
+		*parent = return_hooker;
+}
+
+noinline void ftrace_graph_caller(void)
+{
+	unsigned long *parent_ip =
+		(unsigned long *)(__builtin_frame_address(2) - 4);
+
+	unsigned long selfpc =
+		(unsigned long)(__builtin_return_address(1) - MCOUNT_INSN_SIZE);
+
+	unsigned long frame_pointer =
+		(unsigned long)__builtin_frame_address(3);
+
+	prepare_ftrace_return(parent_ip, selfpc, frame_pointer);
+}
+
+extern unsigned long ftrace_return_to_handler(unsigned long frame_pointer);
+void __naked return_to_handler(void)
+{
+	__asm__ __volatile__ (
+		/* save state needed by the ABI     */
+		"smw.adm $r0,[$sp],$r1,#0x0  \n\t"
+
+		/* get original return address      */
+		"move $r0, $fp               \n\t"
+		"bal ftrace_return_to_handler\n\t"
+		"move $lp, $r0               \n\t"
+
+		/* restore state nedded by the ABI  */
+		"lmw.bim $r0,[$sp],$r1,#0x0  \n\t");
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern unsigned long ftrace_graph_call;
+
+static int ftrace_modify_graph_caller(bool enable)
+{
+	unsigned long pc = (unsigned long)&ftrace_graph_call;
+	unsigned long nop_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+	unsigned long call_insn[3] = {INSN_NOP, INSN_NOP, INSN_NOP};
+
+	ftrace_gen_call_insn(call_insn, (unsigned long)ftrace_graph_caller);
+
+	if (enable)
+		return ftrace_modify_code(pc, nop_insn, call_insn, true);
+	else
+		return ftrace_modify_code(pc, call_insn, nop_insn, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+	return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+	return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+noinline void __trace_hardirqs_off(void)
+{
+	trace_hardirqs_off();
+}
+noinline void __trace_hardirqs_on(void)
+{
+	trace_hardirqs_on();
+}
+#endif /* CONFIG_TRACE_IRQFLAGS */
diff --git a/arch/nds32/kernel/head.S b/arch/nds32/kernel/head.S
new file mode 100644
index 0000000..c5fdae1
--- /dev/null
+++ b/arch/nds32/kernel/head.S
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sizes.h>
+#include <asm/thread_info.h>
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define OF_DT_MAGIC 0xd00dfeed
+#else
+#define OF_DT_MAGIC 0xedfe0dd0
+#endif
+
+	.globl  swapper_pg_dir
+	.equ    swapper_pg_dir, TEXTADDR - 0x4000
+
+/*
+ * Kernel startup entry point.
+ */
+	.section ".head.text", "ax"
+	.type   _stext, %function
+ENTRY(_stext)
+	setgie.d                            ! Disable interrupt
+	isb
+/*
+ * Disable I/D-cache and enable it at a proper time
+ */
+	mfsr    $r0, $mr8
+	li      $r1, #~(CACHE_CTL_mskIC_EN|CACHE_CTL_mskDC_EN)
+	and     $r0, $r0, $r1
+	mtsr    $r0, $mr8
+
+/*
+ * Process device tree blob
+ */
+	andi 	$r0,$r2,#0x3
+	li	$r10, 0
+	bne     $r0, $r10, _nodtb
+	lwi	$r0, [$r2]
+	li	$r1, OF_DT_MAGIC
+	bne     $r0, $r1, _nodtb
+	move	$r10, $r2
+_nodtb:
+
+/*
+ * Create a temporary mapping area for booting, before start_kernel
+ */
+	sethi   $r4, hi20(swapper_pg_dir)
+	li      $p0, (PAGE_OFFSET - PHYS_OFFSET)
+	sub     $r4, $r4, $p0
+	tlbop   FlushAll            ! invalidate TLB\n"
+	isb
+	mtsr    $r4, $L1_PPTB       ! load page table pointer\n"
+
+#ifdef CONFIG_CPU_DCACHE_DISABLE
+	#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_NON
+#else
+	#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+		#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WT
+	#else
+		#define MMU_CTL_NTCC MMU_CTL_CACHEABLE_WB
+	#endif
+#endif
+
+/* set NTC cacheability, mutliple page size in use */
+	mfsr    $r3, $MMU_CTL
+#if CONFIG_MEMORY_START >= 0xc0000000
+	ori     $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC3)
+#elif CONFIG_MEMORY_START >= 0x80000000
+	ori     $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC2)
+#elif CONFIG_MEMORY_START >= 0x40000000
+	ori     $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC1)
+#else
+	ori     $r3, $r3, (MMU_CTL_NTCC << MMU_CTL_offNTC0)
+#endif
+
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+	ori     $r3, $r3, #(MMU_CTL_mskMPZIU)
+#else
+	ori     $r3, $r3, #(MMU_CTL_mskMPZIU|MMU_CTL_D8KB)
+#endif
+#ifdef CONFIG_HW_SUPPORT_UNALIGNMENT_ACCESS
+	li      $r0, #MMU_CTL_UNA
+	or      $r3, $r3, $r0
+#endif
+	mtsr    $r3, $MMU_CTL
+	isb
+
+/* set page size and size of kernel image */
+        mfsr    $r0, $MMU_CFG
+        srli    $r3, $r0, MMU_CFG_offfEPSZ
+        zeb     $r3, $r3
+        bnez    $r3, _extra_page_size_support
+#ifdef CONFIG_ANDES_PAGE_SIZE_4KB
+        li      $r5, #SZ_4K                 ! Use 4KB page size
+#else
+        li      $r5, #SZ_8K                 ! Use 8KB page size
+        li      $r3, #1
+#endif
+        mtsr    $r3, $TLB_MISC
+        b       _image_size_check
+
+_extra_page_size_support:                    ! Use epzs pages size
+        clz     $r6, $r3
+        subri   $r2, $r6, #31
+        li      $r3, #1
+        sll     $r3, $r3, $r2
+        /* MMU_CFG.EPSZ value -> meaning */
+        mul     $r5, $r3, $r3
+        slli    $r5, $r5, #14
+        /* MMU_CFG.EPSZ  -> TLB_MISC.ACC_PSZ */
+        addi    $r3, $r2, #0x2
+        mtsr    $r3, $TLB_MISC
+
+_image_size_check:
+        /* calculate the image maximum size accepted by TLB config */
+        andi    $r6, $r0, MMU_CFG_mskTBW
+        andi    $r0, $r0, MMU_CFG_mskTBS
+        srli    $r6, $r6, MMU_CFG_offTBW
+        srli    $r0, $r0, MMU_CFG_offTBS
+        /*
+         * we just map the kernel to the maximum way - 1 of tlb
+         * reserver one way for UART VA mapping
+         * it will cause page fault if UART mapping cover the kernel mapping
+         *
+         * direct mapping is not supported now.
+         */
+        li      $r2, 't'
+        beqz    $r6, __error                 ! MMU_CFG.TBW = 0 is direct mappin
+        addi    $r0, $r0, #0x2               ! MMU_CFG.TBS value -> meaning
+        sll     $r0, $r6, $r0                ! entries = k-way * n-set
+        mul     $r6, $r0, $r5                ! max size = entries * page size
+        /* check kernel image size */
+        la      $r3, (_end - PAGE_OFFSET)
+        li      $r2, 's'
+        bgt     $r3, $r6, __error
+
+	li      $r2, #(PHYS_OFFSET + TLB_DATA_kernel_text_attr)
+        li      $r3, PAGE_OFFSET
+        add     $r6, $r6, $r3
+
+_tlb:
+	mtsr    $r3, $TLB_VPN
+	dsb
+	tlbop   $r2, RWR
+	isb
+	add     $r3, $r3, $r5
+	add     $r2, $r2, $r5
+	bgt     $r6, $r3, _tlb
+	mfsr    $r3, $TLB_MISC      ! setup access page size
+	li      $r2, #~0xf
+	and     $r3, $r3, $r2
+#ifdef CONFIG_ANDES_PAGE_SIZE_8KB
+	ori    $r3, $r3, #0x1
+#endif
+	mtsr    $r3, $TLB_MISC
+
+	mfsr    $r0, $MISC_CTL      ! Enable BTB and RTP and shadow sp
+	ori     $r0, $r0, #MISC_init
+	mtsr    $r0, $MISC_CTL
+
+	mfsr    $p1, $PSW
+	li      $r15, #~PSW_clr             ! clear WBNA|DME|IME|DT|IT|POM|INTL|GIE
+	and     $p1, $p1, $r15
+	ori     $p1, $p1, #PSW_init
+	mtsr    $p1, $IPSW                  ! when iret, it will automatically enable MMU
+	la      $lp, __mmap_switched
+	mtsr    $lp, $IPC
+	iret
+	nop
+
+	.type   __switch_data, %object
+__switch_data:
+	.long   __bss_start                 ! $r6
+	.long   _end                        ! $r7
+	.long	__atags_pointer 	    ! $atag_pointer
+	.long   init_task                   ! $r9, move to $r25
+	.long   init_thread_union + THREAD_SIZE    ! $sp
+
+
+/*
+ * The following fragment of code is executed with the MMU on in MMU mode,
+ * and uses absolute addresses; this is not position independent.
+ */
+	.align
+	.type   __mmap_switched, %function
+__mmap_switched:
+	la  $r3, __switch_data
+	lmw.bim $r6, [$r3], $r9, #0b0001
+	move	$r25, $r9
+	move    $fp, #0             ! Clear  BSS (and zero $fp)
+	beq $r7, $r6, _RRT
+1:	swi.bi  $fp, [$r6], #4
+	bne $r7, $r6, 1b
+	swi	$r10, [$r8]
+
+_RRT:
+	b   start_kernel
+
+__error:
+	b   __error
diff --git a/arch/nds32/kernel/irq.c b/arch/nds32/kernel/irq.c
new file mode 100644
index 0000000..6ff5a67
--- /dev/null
+++ b/arch/nds32/kernel/irq.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/irqchip.h>
+
+void __init init_IRQ(void)
+{
+	irqchip_init();
+}
diff --git a/arch/nds32/kernel/module.c b/arch/nds32/kernel/module.c
new file mode 100644
index 0000000..1e31829
--- /dev/null
+++ b/arch/nds32/kernel/module.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/moduleloader.h>
+#include <asm/pgtable.h>
+
+void *module_alloc(unsigned long size)
+{
+	return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+				    GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+				    __builtin_return_address(0));
+}
+
+void module_free(struct module *module, void *region)
+{
+	vfree(region);
+}
+
+int module_frob_arch_sections(Elf_Ehdr * hdr,
+			      Elf_Shdr * sechdrs,
+			      char *secstrings, struct module *mod)
+{
+	return 0;
+}
+
+void do_reloc16(unsigned int val, unsigned int *loc, unsigned int val_mask,
+		unsigned int val_shift, unsigned int loc_mask,
+		unsigned int partial_in_place, unsigned int swap)
+{
+	unsigned int tmp = 0, tmp2 = 0;
+
+	__asm__ __volatile__("\tlhi.bi\t%0, [%2], 0\n"
+			     "\tbeqz\t%3, 1f\n"
+			     "\twsbh\t%0, %1\n"
+			     "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap)
+	    );
+
+	tmp2 = tmp & loc_mask;
+	if (partial_in_place) {
+		tmp &= (~loc_mask);
+		tmp =
+		    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
+	} else {
+		tmp = tmp2 | ((val & val_mask) >> val_shift);
+	}
+
+	__asm__ __volatile__("\tbeqz\t%3, 2f\n"
+			     "\twsbh\t%0, %1\n"
+			     "2:\n"
+			     "\tshi.bi\t%0, [%2], 0\n":"=r"(tmp):"0"(tmp),
+			     "r"(loc), "r"(swap)
+	    );
+}
+
+void do_reloc32(unsigned int val, unsigned int *loc, unsigned int val_mask,
+		unsigned int val_shift, unsigned int loc_mask,
+		unsigned int partial_in_place, unsigned int swap)
+{
+	unsigned int tmp = 0, tmp2 = 0;
+
+	__asm__ __volatile__("\tlmw.bi\t%0, [%2], %0, 0\n"
+			     "\tbeqz\t%3, 1f\n"
+			     "\twsbh\t%0, %1\n"
+			     "\trotri\t%0, %1, 16\n"
+			     "1:\n":"=r"(tmp):"0"(tmp), "r"(loc), "r"(swap)
+	    );
+
+	tmp2 = tmp & loc_mask;
+	if (partial_in_place) {
+		tmp &= (~loc_mask);
+		tmp =
+		    tmp2 | ((tmp + ((val & val_mask) >> val_shift)) & val_mask);
+	} else {
+		tmp = tmp2 | ((val & val_mask) >> val_shift);
+	}
+
+	__asm__ __volatile__("\tbeqz\t%3, 2f\n"
+			     "\twsbh\t%0, %1\n"
+			     "\trotri\t%0, %1, 16\n"
+			     "2:\n"
+			     "\tsmw.bi\t%0, [%2], %0, 0\n":"=r"(tmp):"0"(tmp),
+			     "r"(loc), "r"(swap)
+	    );
+}
+
+static inline int exceed_limit(int offset, unsigned int val_mask,
+			       struct module *module, Elf32_Rela * rel,
+			       unsigned int relindex, unsigned int reloc_order)
+{
+	int abs_off = offset < 0 ? ~offset : offset;
+
+	if (abs_off & (~val_mask)) {
+		pr_err("\n%s: relocation type %d out of range.\n"
+		       "please rebuild the kernel module with gcc option \"-Wa,-mno-small-text\".\n",
+		       module->name, ELF32_R_TYPE(rel->r_info));
+		pr_err("section %d reloc %d offset 0x%x relative 0x%x.\n",
+		       relindex, reloc_order, rel->r_offset, offset);
+		return true;
+	}
+	return false;
+}
+
+#ifdef __NDS32_EL__
+#define NEED_SWAP 1
+#else
+#define NEED_SWAP 0
+#endif
+
+int
+apply_relocate_add(Elf32_Shdr * sechdrs, const char *strtab,
+		   unsigned int symindex, unsigned int relindex,
+		   struct module *module)
+{
+	Elf32_Shdr *symsec = sechdrs + symindex;
+	Elf32_Shdr *relsec = sechdrs + relindex;
+	Elf32_Shdr *dstsec = sechdrs + relsec->sh_info;
+	Elf32_Rela *rel = (void *)relsec->sh_addr;
+	unsigned int i;
+
+	for (i = 0; i < relsec->sh_size / sizeof(Elf32_Rela); i++, rel++) {
+		Elf32_Addr *loc;
+		Elf32_Sym *sym;
+		Elf32_Addr v;
+		s32 offset;
+
+		offset = ELF32_R_SYM(rel->r_info);
+		if (offset < 0
+		    || offset > (symsec->sh_size / sizeof(Elf32_Sym))) {
+			pr_err("%s: bad relocation\n", module->name);
+			pr_err("section %d reloc %d\n", relindex, i);
+			return -ENOEXEC;
+		}
+
+		sym = ((Elf32_Sym *) symsec->sh_addr) + offset;
+
+		if (rel->r_offset < 0
+		    || rel->r_offset > dstsec->sh_size - sizeof(u16)) {
+			pr_err("%s: out of bounds relocation\n", module->name);
+			pr_err("section %d reloc %d offset 0x%0x size %d\n",
+			       relindex, i, rel->r_offset, dstsec->sh_size);
+			return -ENOEXEC;
+		}
+
+		loc = (Elf32_Addr *) (dstsec->sh_addr + rel->r_offset);
+		v = sym->st_value + rel->r_addend;
+
+		switch (ELF32_R_TYPE(rel->r_info)) {
+		case R_NDS32_NONE:
+		case R_NDS32_INSN16:
+		case R_NDS32_LABEL:
+		case R_NDS32_LONGCALL1:
+		case R_NDS32_LONGCALL2:
+		case R_NDS32_LONGCALL3:
+		case R_NDS32_LONGCALL4:
+		case R_NDS32_LONGJUMP1:
+		case R_NDS32_LONGJUMP2:
+		case R_NDS32_LONGJUMP3:
+		case R_NDS32_9_FIXED_RELA:
+		case R_NDS32_15_FIXED_RELA:
+		case R_NDS32_17_FIXED_RELA:
+		case R_NDS32_25_FIXED_RELA:
+		case R_NDS32_LOADSTORE:
+		case R_NDS32_DWARF2_OP1_RELA:
+		case R_NDS32_DWARF2_OP2_RELA:
+		case R_NDS32_DWARF2_LEB_RELA:
+		case R_NDS32_RELA_NOP_MIX ... R_NDS32_RELA_NOP_MAX:
+			break;
+
+		case R_NDS32_32_RELA:
+			do_reloc32(v, loc, 0xffffffff, 0, 0, 0, 0);
+			break;
+
+		case R_NDS32_HI20_RELA:
+			do_reloc32(v, loc, 0xfffff000, 12, 0xfff00000, 0,
+				   NEED_SWAP);
+			break;
+
+		case R_NDS32_LO12S3_RELA:
+			do_reloc32(v, loc, 0x00000fff, 3, 0xfffff000, 0,
+				   NEED_SWAP);
+			break;
+
+		case R_NDS32_LO12S2_RELA:
+			do_reloc32(v, loc, 0x00000fff, 2, 0xfffff000, 0,
+				   NEED_SWAP);
+			break;
+
+		case R_NDS32_LO12S1_RELA:
+			do_reloc32(v, loc, 0x00000fff, 1, 0xfffff000, 0,
+				   NEED_SWAP);
+			break;
+
+		case R_NDS32_LO12S0_RELA:
+		case R_NDS32_LO12S0_ORI_RELA:
+			do_reloc32(v, loc, 0x00000fff, 0, 0xfffff000, 0,
+				   NEED_SWAP);
+			break;
+
+		case R_NDS32_9_PCREL_RELA:
+			if (exceed_limit
+			    ((v - (Elf32_Addr) loc), 0x000000ff, module, rel,
+			     relindex, i))
+				return -ENOEXEC;
+			do_reloc16(v - (Elf32_Addr) loc, loc, 0x000001ff, 1,
+				   0xffffff00, 0, NEED_SWAP);
+			break;
+
+		case R_NDS32_15_PCREL_RELA:
+			if (exceed_limit
+			    ((v - (Elf32_Addr) loc), 0x00003fff, module, rel,
+			     relindex, i))
+				return -ENOEXEC;
+			do_reloc32(v - (Elf32_Addr) loc, loc, 0x00007fff, 1,
+				   0xffffc000, 0, NEED_SWAP);
+			break;
+
+		case R_NDS32_17_PCREL_RELA:
+			if (exceed_limit
+			    ((v - (Elf32_Addr) loc), 0x0000ffff, module, rel,
+			     relindex, i))
+				return -ENOEXEC;
+			do_reloc32(v - (Elf32_Addr) loc, loc, 0x0001ffff, 1,
+				   0xffff0000, 0, NEED_SWAP);
+			break;
+
+		case R_NDS32_25_PCREL_RELA:
+			if (exceed_limit
+			    ((v - (Elf32_Addr) loc), 0x00ffffff, module, rel,
+			     relindex, i))
+				return -ENOEXEC;
+			do_reloc32(v - (Elf32_Addr) loc, loc, 0x01ffffff, 1,
+				   0xff000000, 0, NEED_SWAP);
+			break;
+		case R_NDS32_WORD_9_PCREL_RELA:
+			if (exceed_limit
+			    ((v - (Elf32_Addr) loc), 0x000000ff, module, rel,
+			     relindex, i))
+				return -ENOEXEC;
+			do_reloc32(v - (Elf32_Addr) loc, loc, 0x000001ff, 1,
+				   0xffffff00, 0, NEED_SWAP);
+			break;
+
+		case R_NDS32_SDA15S3_RELA:
+		case R_NDS32_SDA15S2_RELA:
+		case R_NDS32_SDA15S1_RELA:
+		case R_NDS32_SDA15S0_RELA:
+			pr_err("%s: unsupported relocation type %d.\n",
+			       module->name, ELF32_R_TYPE(rel->r_info));
+			pr_err
+			    ("Small data section access doesn't work in the kernel space; "
+			     "please rebuild the kernel module with gcc option -mcmodel=large.\n");
+			pr_err("section %d reloc %d offset 0x%x size %d\n",
+			       relindex, i, rel->r_offset, dstsec->sh_size);
+			break;
+
+		default:
+			pr_err("%s: unsupported relocation type %d.\n",
+			       module->name, ELF32_R_TYPE(rel->r_info));
+			pr_err("section %d reloc %d offset 0x%x size %d\n",
+			       relindex, i, rel->r_offset, dstsec->sh_size);
+		}
+	}
+	return 0;
+}
+
+int
+module_finalize(const Elf32_Ehdr * hdr, const Elf_Shdr * sechdrs,
+		struct module *module)
+{
+	return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/nds32/kernel/nds32_ksyms.c b/arch/nds32/kernel/nds32_ksyms.c
new file mode 100644
index 0000000..5ecebd0
--- /dev/null
+++ b/arch/nds32/kernel/nds32_ksyms.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/checksum.h>
+#include <asm/io.h>
+#include <asm/ftrace.h>
+#include <asm/proc-fns.h>
+
+/* mem functions */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memzero);
+
+/* user mem (segment) */
+EXPORT_SYMBOL(__arch_copy_from_user);
+EXPORT_SYMBOL(__arch_copy_to_user);
+EXPORT_SYMBOL(__arch_clear_user);
+
+/* cache handling */
+EXPORT_SYMBOL(cpu_icache_inval_all);
+EXPORT_SYMBOL(cpu_dcache_wbinval_all);
+EXPORT_SYMBOL(cpu_dma_inval_range);
+EXPORT_SYMBOL(cpu_dma_wb_range);
diff --git a/arch/nds32/kernel/process.c b/arch/nds32/kernel/process.c
new file mode 100644
index 0000000..65fda98
--- /dev/null
+++ b/arch/nds32/kernel/process.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/delay.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+#include <asm/elf.h>
+#include <asm/proc-fns.h>
+#include <linux/ptrace.h>
+#include <linux/reboot.h>
+
+extern void setup_mm_for_reboot(char mode);
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *proc_dir_cpu;
+EXPORT_SYMBOL(proc_dir_cpu);
+#endif
+
+extern inline void arch_reset(char mode)
+{
+	if (mode == 's') {
+		/* Use cpu handler, jump to 0 */
+		cpu_reset(0);
+	}
+}
+
+void (*pm_power_off) (void);
+EXPORT_SYMBOL(pm_power_off);
+
+static char reboot_mode_nds32 = 'h';
+
+int __init reboot_setup(char *str)
+{
+	reboot_mode_nds32 = str[0];
+	return 1;
+}
+
+static int cpub_pwroff(void)
+{
+	return 0;
+}
+
+__setup("reboot=", reboot_setup);
+
+void machine_halt(void)
+{
+	cpub_pwroff();
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void machine_power_off(void)
+{
+	if (pm_power_off)
+		pm_power_off();
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+void machine_restart(char *cmd)
+{
+	/*
+	 * Clean and disable cache, and turn off interrupts
+	 */
+	cpu_proc_fin();
+
+	/*
+	 * Tell the mm system that we are going to reboot -
+	 * we may need it to insert some 1:1 mappings so that
+	 * soft boot works.
+	 */
+	setup_mm_for_reboot(reboot_mode_nds32);
+
+	/* Execute kernel restart handler call chain */
+	do_kernel_restart(cmd);
+
+	/*
+	 * Now call the architecture specific reboot code.
+	 */
+	arch_reset(reboot_mode_nds32);
+
+	/*
+	 * Whoops - the architecture was unable to reboot.
+	 * Tell the user!
+	 */
+	mdelay(1000);
+	pr_info("Reboot failed -- System halted\n");
+	while (1) ;
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void show_regs(struct pt_regs *regs)
+{
+	printk("PC is at %pS\n", (void *)instruction_pointer(regs));
+	printk("LP is at %pS\n", (void *)regs->lp);
+	pr_info("pc : [<%08lx>]    lp : [<%08lx>]    %s\n"
+		"sp : %08lx  fp : %08lx  gp : %08lx\n",
+		instruction_pointer(regs),
+		regs->lp, print_tainted(), regs->sp, regs->fp, regs->gp);
+	pr_info("r25: %08lx  r24: %08lx\n", regs->uregs[25], regs->uregs[24]);
+
+	pr_info("r23: %08lx  r22: %08lx  r21: %08lx  r20: %08lx\n",
+		regs->uregs[23], regs->uregs[22],
+		regs->uregs[21], regs->uregs[20]);
+	pr_info("r19: %08lx  r18: %08lx  r17: %08lx  r16: %08lx\n",
+		regs->uregs[19], regs->uregs[18],
+		regs->uregs[17], regs->uregs[16]);
+	pr_info("r15: %08lx  r14: %08lx  r13: %08lx  r12: %08lx\n",
+		regs->uregs[15], regs->uregs[14],
+		regs->uregs[13], regs->uregs[12]);
+	pr_info("r11: %08lx  r10: %08lx  r9 : %08lx  r8 : %08lx\n",
+		regs->uregs[11], regs->uregs[10],
+		regs->uregs[9], regs->uregs[8]);
+	pr_info("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
+		regs->uregs[7], regs->uregs[6], regs->uregs[5], regs->uregs[4]);
+	pr_info("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
+		regs->uregs[3], regs->uregs[2], regs->uregs[1], regs->uregs[0]);
+	pr_info("  IRQs o%s  Segment %s\n",
+		interrupts_enabled(regs) ? "n" : "ff",
+		segment_eq(get_fs(), get_ds())? "kernel" : "user");
+}
+
+EXPORT_SYMBOL(show_regs);
+
+void flush_thread(void)
+{
+}
+
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
+int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+	    unsigned long stk_sz, struct task_struct *p)
+{
+	struct pt_regs *childregs = task_pt_regs(p);
+
+	memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
+
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		memset(childregs, 0, sizeof(struct pt_regs));
+		/* kernel thread fn */
+		p->thread.cpu_context.r6 = stack_start;
+		/* kernel thread argument */
+		p->thread.cpu_context.r7 = stk_sz;
+	} else {
+		*childregs = *current_pt_regs();
+		if (stack_start)
+			childregs->sp = stack_start;
+		/* child get zero as ret. */
+		childregs->uregs[0] = 0;
+		childregs->osp = 0;
+		if (clone_flags & CLONE_SETTLS)
+			childregs->uregs[25] = childregs->uregs[3];
+	}
+	/* cpu context switching  */
+	p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
+	p->thread.cpu_context.sp = (unsigned long)childregs;
+
+#ifdef CONFIG_HWZOL
+	childregs->lb = 0;
+	childregs->le = 0;
+	childregs->lc = 0;
+#endif
+
+	return 0;
+}
+
+/*
+ * fill in the fpe structure for a core dump...
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpu)
+{
+	int fpvalid = 0;
+	return fpvalid;
+}
+
+EXPORT_SYMBOL(dump_fpu);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long fp, lr;
+	unsigned long stack_start, stack_end;
+	int count = 0;
+
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	if (IS_ENABLED(CONFIG_FRAME_POINTER)) {
+		stack_start = (unsigned long)end_of_stack(p);
+		stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+
+		fp = thread_saved_fp(p);
+		do {
+			if (fp < stack_start || fp > stack_end)
+				return 0;
+			lr = ((unsigned long *)fp)[0];
+			if (!in_sched_functions(lr))
+				return lr;
+			fp = *(unsigned long *)(fp + 4);
+		} while (count++ < 16);
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(get_wchan);
diff --git a/arch/nds32/kernel/ptrace.c b/arch/nds32/kernel/ptrace.c
new file mode 100644
index 0000000..eaaf7a9
--- /dev/null
+++ b/arch/nds32/kernel/ptrace.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/tracehook.h>
+#include <linux/elf.h>
+#include <linux/sched/task_stack.h>
+
+enum nds32_regset {
+	REGSET_GPR,
+};
+
+static int gpr_get(struct task_struct *target,
+		   const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   void *kbuf, void __user * ubuf)
+{
+	struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs;
+	return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1);
+}
+
+static int gpr_set(struct task_struct *target, const struct user_regset *regset,
+		   unsigned int pos, unsigned int count,
+		   const void *kbuf, const void __user * ubuf)
+{
+	int err;
+	struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
+
+	err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
+	if (err)
+		return err;
+
+	task_pt_regs(target)->user_regs = newregs;
+	return 0;
+}
+
+static const struct user_regset nds32_regsets[] = {
+	[REGSET_GPR] = {
+			.core_note_type = NT_PRSTATUS,
+			.n = sizeof(struct user_pt_regs) / sizeof(u32),
+			.size = sizeof(elf_greg_t),
+			.align = sizeof(elf_greg_t),
+			.get = gpr_get,
+			.set = gpr_set}
+};
+
+static const struct user_regset_view nds32_user_view = {
+	.name = "nds32",
+	.e_machine = EM_NDS32,
+	.regsets = nds32_regsets,
+	.n = ARRAY_SIZE(nds32_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+	return &nds32_user_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+	user_disable_single_step(child);
+}
+
+/* do_ptrace()
+ *
+ * Provide ptrace defined service.
+ */
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+		 unsigned long data)
+{
+	int ret = -EIO;
+
+	switch (request) {
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+	struct pt_regs *regs;
+	regs = task_pt_regs(child);
+	regs->ipsw |= PSW_mskHSS;
+	set_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *child)
+{
+	struct pt_regs *regs;
+	regs = task_pt_regs(child);
+	regs->ipsw &= ~PSW_mskHSS;
+	clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* sys_trace()
+ *
+ * syscall trace handler.
+ */
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+		if (tracehook_report_syscall_entry(regs))
+			forget_syscall(regs);
+	}
+	return regs->syscallno;
+}
+
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+	int step = test_thread_flag(TIF_SINGLESTEP);
+	if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(regs, step);
+
+}
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
new file mode 100644
index 0000000..63a1a5e
--- /dev/null
+++ b/arch/nds32/kernel/setup.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/cpu.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/memblock.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/proc-fns.h>
+#include <asm/cache_info.h>
+#include <asm/elf.h>
+#include <nds32_intrinsic.h>
+
+#define HWCAP_MFUSR_PC		0x000001
+#define HWCAP_EXT		0x000002
+#define HWCAP_EXT2		0x000004
+#define HWCAP_FPU		0x000008
+#define HWCAP_AUDIO		0x000010
+#define HWCAP_BASE16		0x000020
+#define HWCAP_STRING		0x000040
+#define HWCAP_REDUCED_REGS	0x000080
+#define HWCAP_VIDEO		0x000100
+#define HWCAP_ENCRYPT		0x000200
+#define HWCAP_EDM		0x000400
+#define HWCAP_LMDMA		0x000800
+#define HWCAP_PFM		0x001000
+#define HWCAP_HSMP		0x002000
+#define HWCAP_TRACE		0x004000
+#define HWCAP_DIV		0x008000
+#define HWCAP_MAC		0x010000
+#define HWCAP_L2C		0x020000
+#define HWCAP_FPU_DP		0x040000
+#define HWCAP_V2		0x080000
+#define HWCAP_DX_REGS		0x100000
+
+unsigned long cpu_id, cpu_rev, cpu_cfgid;
+char cpu_series;
+char *endianness = NULL;
+
+unsigned int __atags_pointer __initdata;
+unsigned int elf_hwcap;
+EXPORT_SYMBOL(elf_hwcap);
+
+/*
+ * The following string table, must sync with HWCAP_xx bitmask,
+ * which is defined in <asm/procinfo.h>
+ */
+static const char *hwcap_str[] = {
+	"mfusr_pc",
+	"perf1",
+	"perf2",
+	"fpu",
+	"audio",
+	"16b",
+	"string",
+	"reduced_regs",
+	"video",
+	"encrypt",
+	"edm",
+	"lmdma",
+	"pfm",
+	"hsmp",
+	"trace",
+	"div",
+	"mac",
+	"l2c",
+	"dx_regs",
+	"v2",
+	NULL,
+};
+
+#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
+#define WRITE_METHOD "write through"
+#else
+#define WRITE_METHOD "write back"
+#endif
+
+struct cache_info L1_cache_info[2];
+static void __init dump_cpu_info(int cpu)
+{
+	int i, p = 0;
+	char str[sizeof(hwcap_str) + 16];
+
+	for (i = 0; hwcap_str[i]; i++) {
+		if (elf_hwcap & (1 << i)) {
+			sprintf(str + p, "%s ", hwcap_str[i]);
+			p += strlen(hwcap_str[i]) + 1;
+		}
+	}
+
+	pr_info("CPU%d Features: %s\n", cpu, str);
+
+	L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
+	L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
+	L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
+	L1_cache_info[ICACHE].size =
+	    L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
+	    L1_cache_info[ICACHE].sets / 1024;
+	pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
+		L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
+		L1_cache_info[ICACHE].line_size);
+	L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
+	L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
+	L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
+	L1_cache_info[DCACHE].size =
+	    L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
+	    L1_cache_info[DCACHE].sets / 1024;
+	pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
+		L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
+		L1_cache_info[DCACHE].line_size);
+	pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
+	if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
+		pr_crit
+		    ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
+		     L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
+#ifdef CONFIG_CPU_CACHE_ALIASING
+	{
+		int aliasing_num;
+		aliasing_num =
+		    L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
+		    L1_cache_info[ICACHE].ways;
+		L1_cache_info[ICACHE].aliasing_num = aliasing_num;
+		L1_cache_info[ICACHE].aliasing_mask =
+		    (aliasing_num - 1) << PAGE_SHIFT;
+		aliasing_num =
+		    L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
+		    L1_cache_info[DCACHE].ways;
+		L1_cache_info[DCACHE].aliasing_num = aliasing_num;
+		L1_cache_info[DCACHE].aliasing_mask =
+		    (aliasing_num - 1) << PAGE_SHIFT;
+	}
+#endif
+}
+
+static void __init setup_cpuinfo(void)
+{
+	unsigned long tmp = 0, cpu_name;
+
+	cpu_dcache_inval_all();
+	cpu_icache_inval_all();
+	__nds32__isb();
+
+	cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
+	cpu_name = ((cpu_id) & 0xf0) >> 4;
+	cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
+	cpu_id = cpu_id & 0xf;
+	cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
+	cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
+
+	pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
+		cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
+
+	elf_hwcap |= HWCAP_MFUSR_PC;
+
+	if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
+		if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
+			elf_hwcap |= HWCAP_DIV;
+
+		if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
+		    || (cpu_id == 12 && cpu_rev < 4))
+			elf_hwcap |= HWCAP_MAC;
+	} else {
+		elf_hwcap |= HWCAP_V2;
+		elf_hwcap |= HWCAP_DIV;
+		elf_hwcap |= HWCAP_MAC;
+	}
+
+	if (cpu_cfgid & 0x0001)
+		elf_hwcap |= HWCAP_EXT;
+
+	if (cpu_cfgid & 0x0002)
+		elf_hwcap |= HWCAP_BASE16;
+
+	if (cpu_cfgid & 0x0004)
+		elf_hwcap |= HWCAP_EXT2;
+
+	if (cpu_cfgid & 0x0008)
+		elf_hwcap |= HWCAP_FPU;
+
+	if (cpu_cfgid & 0x0010)
+		elf_hwcap |= HWCAP_STRING;
+
+	if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
+		endianness = "MSB";
+	else
+		endianness = "LSB";
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
+		elf_hwcap |= HWCAP_EDM;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
+		elf_hwcap |= HWCAP_LMDMA;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
+		elf_hwcap |= HWCAP_PFM;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
+		elf_hwcap |= HWCAP_HSMP;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
+		elf_hwcap |= HWCAP_TRACE;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
+		elf_hwcap |= HWCAP_AUDIO;
+
+	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
+		elf_hwcap |= HWCAP_L2C;
+
+	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
+	if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
+		tmp |= CACHE_CTL_mskDC_EN;
+
+	if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
+		tmp |= CACHE_CTL_mskIC_EN;
+	__nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
+
+	dump_cpu_info(smp_processor_id());
+}
+
+static void __init setup_memory(void)
+{
+	unsigned long ram_start_pfn;
+	unsigned long free_ram_start_pfn;
+	phys_addr_t memory_start, memory_end;
+	struct memblock_region *region;
+
+	memory_end = memory_start = 0;
+
+	/* Find main memory where is the kernel */
+	for_each_memblock(memory, region) {
+		memory_start = region->base;
+		memory_end = region->base + region->size;
+		pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
+			memory_start, memory_end);
+	}
+
+	if (!memory_end) {
+		panic("No memory!");
+	}
+
+	ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
+	/* free_ram_start_pfn is first page after kernel */
+	free_ram_start_pfn = PFN_UP(__pa(&_end));
+	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
+	/* it could update max_pfn */
+	if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
+		max_low_pfn = max_pfn;
+	else {
+		max_low_pfn = MAXMEM_PFN + ram_start_pfn;
+		if (!IS_ENABLED(CONFIG_HIGHMEM))
+			max_pfn = MAXMEM_PFN + ram_start_pfn;
+	}
+	/* high_memory is related with VMALLOC */
+	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
+	min_low_pfn = free_ram_start_pfn;
+
+	/*
+	 * initialize the boot-time allocator (with low memory only).
+	 *
+	 * This makes the memory from the end of the kernel to the end of
+	 * RAM usable.
+	 */
+	memblock_set_bottom_up(true);
+	memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
+
+	early_init_fdt_reserve_self();
+	early_init_fdt_scan_reserved_mem();
+
+	memblock_dump_all();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	early_init_devtree(__atags_pointer ? \
+		phys_to_virt(__atags_pointer) : __dtb_start);
+
+	setup_cpuinfo();
+
+	init_mm.start_code = (unsigned long)&_stext;
+	init_mm.end_code = (unsigned long)&_etext;
+	init_mm.end_data = (unsigned long)&_edata;
+	init_mm.brk = (unsigned long)&_end;
+
+	/* setup bootmem allocator */
+	setup_memory();
+
+	/* paging_init() sets up the MMU and marks all pages as reserved */
+	paging_init();
+
+	/* invalidate all TLB entries because the new mapping is created */
+	__nds32__tlbop_flua();
+
+	/* use generic way to parse */
+	parse_early_param();
+
+	unflatten_and_copy_device_tree();
+
+	if(IS_ENABLED(CONFIG_VT)) {
+		if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
+			conswitchp = &dummy_con;
+	}
+
+	*cmdline_p = boot_command_line;
+	early_trap_init();
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
+		   cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
+
+	seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
+		   CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
+		   CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
+		   CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
+
+	seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
+		   CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
+		   CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
+		   CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
+
+	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+		   loops_per_jiffy / (500000 / HZ),
+		   (loops_per_jiffy / (5000 / HZ)) % 100);
+
+	/* dump out the processor features */
+	seq_puts(m, "Features\t: ");
+
+	for (i = 0; hwcap_str[i]; i++)
+		if (elf_hwcap & (1 << i))
+			seq_printf(m, "%s ", hwcap_str[i]);
+
+	seq_puts(m, "\n\n");
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t * pos)
+{
+	return *pos < 1 ? (void *)1 : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t * pos)
+{
+	++*pos;
+	return NULL;
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+struct seq_operations cpuinfo_op = {
+	.start = c_start,
+	.next = c_next,
+	.stop = c_stop,
+	.show = c_show
+};
diff --git a/arch/nds32/kernel/signal.c b/arch/nds32/kernel/signal.c
new file mode 100644
index 0000000..5d01f6e
--- /dev/null
+++ b/arch/nds32/kernel/signal.c
@@ -0,0 +1,324 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/personality.h>
+#include <linux/freezer.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/ucontext.h>
+#include <asm/unistd.h>
+
+#include <asm/ptrace.h>
+#include <asm/vdso.h>
+
+struct rt_sigframe {
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+static int restore_sigframe(struct pt_regs *regs,
+			    struct rt_sigframe __user * sf)
+{
+	sigset_t set;
+	int err;
+
+	err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
+	if (err == 0) {
+		set_current_blocked(&set);
+	}
+
+	__get_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err);
+	__get_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err);
+	__get_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err);
+	__get_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err);
+	__get_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err);
+	__get_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err);
+	__get_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err);
+	__get_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err);
+	__get_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err);
+	__get_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err);
+	__get_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err);
+	__get_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err);
+	__get_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err);
+	__get_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err);
+	__get_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err);
+	__get_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err);
+	__get_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err);
+	__get_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err);
+	__get_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err);
+	__get_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err);
+	__get_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err);
+	__get_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err);
+	__get_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err);
+	__get_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err);
+	__get_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err);
+	__get_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err);
+
+	__get_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err);
+	__get_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err);
+	__get_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err);
+	__get_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err);
+	__get_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err);
+#if defined(CONFIG_HWZOL)
+	__get_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
+	__get_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
+	__get_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
+#endif
+
+	/*
+	 * Avoid sys_rt_sigreturn() restarting.
+	 */
+	forget_syscall(regs);
+	return err;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	/*
+	 * Since we stacked the signal on a 64-bit boundary,
+	 * then 'sp' should be two-word aligned here.  If it's
+	 * not, then the user is trying to mess with us.
+	 */
+	if (regs->sp & 7)
+		goto badframe;
+
+	frame = (struct rt_sigframe __user *)regs->sp;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+
+	if (restore_sigframe(regs, frame))
+		goto badframe;
+
+	if (restore_altstack(&frame->uc.uc_stack))
+		goto badframe;
+
+	return regs->uregs[0];
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+static int
+setup_sigframe(struct rt_sigframe __user * sf, struct pt_regs *regs,
+	       sigset_t * set)
+{
+	int err = 0;
+
+	__put_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err);
+	__put_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err);
+	__put_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err);
+	__put_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err);
+	__put_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err);
+	__put_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err);
+	__put_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err);
+	__put_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err);
+	__put_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err);
+	__put_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err);
+	__put_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err);
+	__put_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err);
+	__put_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err);
+	__put_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err);
+	__put_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err);
+	__put_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err);
+	__put_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err);
+	__put_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err);
+	__put_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err);
+	__put_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err);
+	__put_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err);
+
+	__put_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err);
+	__put_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err);
+	__put_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err);
+	__put_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err);
+	__put_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err);
+	__put_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err);
+	__put_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err);
+	__put_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err);
+	__put_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err);
+	__put_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err);
+#if defined(CONFIG_HWZOL)
+	__put_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err);
+	__put_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err);
+	__put_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err);
+#endif
+
+	__put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no,
+			 err);
+	__put_user_error(current->thread.error_code,
+			 &sf->uc.uc_mcontext.error_code, err);
+	__put_user_error(current->thread.address,
+			 &sf->uc.uc_mcontext.fault_address, err);
+	__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+
+	err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
+
+	return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+					struct pt_regs *regs, int framesize)
+{
+	unsigned long sp;
+
+	/* Default to using normal stack */
+	sp = regs->sp;
+
+	/*
+	 * If we are on the alternate signal stack and would overflow it, don't.
+	 * Return an always-bogus address instead so we will die with SIGSEGV.
+	 */
+	if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+		return (void __user __force *)(-1UL);
+
+	/* This is the X/Open sanctioned signal stack switching. */
+	sp = (sigsp(sp, ksig) - framesize);
+
+	/*
+	 * nds32 mandates 8-byte alignment
+	 */
+	sp &= ~0x7UL;
+
+	return (void __user *)sp;
+}
+
+static int
+setup_return(struct pt_regs *regs, struct ksignal *ksig, void __user * frame)
+{
+	unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
+	unsigned long retcode;
+
+	retcode = VDSO_SYMBOL(current->mm->context.vdso, rt_sigtramp);
+	regs->uregs[0] = ksig->sig;
+	regs->sp = (unsigned long)frame;
+	regs->lp = retcode;
+	regs->ipc = handler;
+
+	return 0;
+}
+
+static int
+setup_rt_frame(struct ksignal *ksig, sigset_t * set, struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame =
+	    get_sigframe(ksig, regs, sizeof(*frame));
+	int err = 0;
+
+	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+		return -EFAULT;
+
+	__put_user_error(0, &frame->uc.uc_flags, err);
+	__put_user_error(NULL, &frame->uc.uc_link, err);
+
+	err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+	err |= setup_sigframe(frame, regs, set);
+	if (err == 0) {
+		setup_return(regs, ksig, frame);
+		if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
+			err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+			regs->uregs[1] = (unsigned long)&frame->info;
+			regs->uregs[2] = (unsigned long)&frame->uc;
+		}
+	}
+	return err;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+	int ret;
+	sigset_t *oldset = sigmask_to_save();
+
+	if (in_syscall(regs)) {
+		/* Avoid additional syscall restarting via ret_slow_syscall. */
+		forget_syscall(regs);
+
+		switch (regs->uregs[0]) {
+		case -ERESTART_RESTARTBLOCK:
+		case -ERESTARTNOHAND:
+			regs->uregs[0] = -EINTR;
+			break;
+		case -ERESTARTSYS:
+			if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+				regs->uregs[0] = -EINTR;
+				break;
+			}
+		case -ERESTARTNOINTR:
+			regs->uregs[0] = regs->orig_r0;
+			regs->ipc -= 4;
+			break;
+		}
+	}
+	/*
+	 * Set up the stack frame
+	 */
+	ret = setup_rt_frame(ksig, oldset, regs);
+
+	signal_setup_done(ret, ksig, 0);
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+	struct ksignal ksig;
+
+	if (get_signal(&ksig)) {
+		handle_signal(&ksig, regs);
+		return;
+	}
+
+	/*
+	 * If we were from a system call, check for system call restarting...
+	 */
+	if (in_syscall(regs)) {
+		/* Restart the system call - no handlers present */
+
+		/* Avoid additional syscall restarting via ret_slow_syscall. */
+		forget_syscall(regs);
+
+		switch (regs->uregs[0]) {
+		case -ERESTART_RESTARTBLOCK:
+			regs->uregs[15] = __NR_restart_syscall;
+		case -ERESTARTNOHAND:
+		case -ERESTARTSYS:
+		case -ERESTARTNOINTR:
+			regs->uregs[0] = regs->orig_r0;
+			regs->ipc -= 0x4;
+			break;
+		}
+	}
+	restore_saved_sigmask();
+}
+
+asmlinkage void
+do_notify_resume(struct pt_regs *regs, unsigned int thread_flags)
+{
+	if (thread_flags & _TIF_SIGPENDING)
+		do_signal(regs);
+
+	if (thread_flags & _TIF_NOTIFY_RESUME) {
+		clear_thread_flag(TIF_NOTIFY_RESUME);
+		tracehook_notify_resume(regs);
+	}
+}
diff --git a/arch/nds32/kernel/stacktrace.c b/arch/nds32/kernel/stacktrace.c
new file mode 100644
index 0000000..d974c0c
--- /dev/null
+++ b/arch/nds32/kernel/stacktrace.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+
+void save_stack_trace(struct stack_trace *trace)
+{
+	save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+	unsigned long *fpn;
+	int skip = trace->skip;
+	int savesched;
+	int graph_idx = 0;
+
+	if (tsk == current) {
+		__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
+		savesched = 1;
+	} else {
+		fpn = (unsigned long *)thread_saved_fp(tsk);
+		savesched = 0;
+	}
+
+	while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3)
+	       && (fpn >= (unsigned long *)TASK_SIZE)) {
+		unsigned long lpp, fpp;
+
+		lpp = fpn[LP_OFFSET];
+		fpp = fpn[FP_OFFSET];
+		if (!__kernel_text_address(lpp))
+			break;
+		else
+			lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
+
+		if (savesched || !in_sched_functions(lpp)) {
+			if (skip) {
+				skip--;
+			} else {
+				trace->entries[trace->nr_entries++] = lpp;
+				if (trace->nr_entries >= trace->max_entries)
+					break;
+			}
+		}
+		fpn = (unsigned long *)fpp;
+	}
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/nds32/kernel/sys_nds32.c b/arch/nds32/kernel/sys_nds32.c
new file mode 100644
index 0000000..9de93ab
--- /dev/null
+++ b/arch/nds32/kernel/sys_nds32.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/cachectl.h>
+#include <asm/proc-fns.h>
+
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+	       unsigned long, prot, unsigned long, flags,
+	       unsigned long, fd, unsigned long, pgoff)
+{
+	if (pgoff & (~PAGE_MASK >> 12))
+		return -EINVAL;
+
+	return sys_mmap_pgoff(addr, len, prot, flags, fd,
+			      pgoff >> (PAGE_SHIFT - 12));
+}
+
+SYSCALL_DEFINE4(fadvise64_64_wrapper,int, fd, int, advice, loff_t, offset,
+					 loff_t, len)
+{
+	return sys_fadvise64_64(fd, offset, len, advice);
+}
+
+SYSCALL_DEFINE3(cacheflush, unsigned int, start, unsigned int, end, int, cache)
+{
+	struct vm_area_struct *vma;
+	bool flushi = true, wbd = true;
+
+	vma = find_vma(current->mm, start);
+	if (!vma)
+		return -EFAULT;
+	switch (cache) {
+	case ICACHE:
+		wbd = false;
+		break;
+	case DCACHE:
+		flushi = false;
+		break;
+	case BCACHE:
+		break;
+	default:
+		return -EINVAL;
+	}
+	cpu_cache_wbinval_range_check(vma, start, end, flushi, wbd);
+
+	return 0;
+}
diff --git a/arch/nds32/kernel/syscall_table.c b/arch/nds32/kernel/syscall_table.c
new file mode 100644
index 0000000..7879c06
--- /dev/null
+++ b/arch/nds32/kernel/syscall_table.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+#define sys_rt_sigreturn sys_rt_sigreturn_wrapper
+#define sys_fadvise64_64 sys_fadvise64_64_wrapper
+void *sys_call_table[__NR_syscalls] __aligned(8192) = {
+	[0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/nds32/kernel/time.c b/arch/nds32/kernel/time.c
new file mode 100644
index 0000000..ac9d78c
--- /dev/null
+++ b/arch/nds32/kernel/time.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/clocksource.h>
+#include <linux/clk-provider.h>
+
+void __init time_init(void)
+{
+	of_clk_init(NULL);
+	timer_probe();
+}
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
new file mode 100644
index 0000000..1496aab
--- /dev/null
+++ b/arch/nds32/kernel/traps.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/personality.h>
+#include <linux/kallsyms.h>
+#include <linux/hardirq.h>
+#include <linux/kdebug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+
+#include <asm/proc-fns.h>
+#include <asm/unistd.h>
+
+#include <linux/ptrace.h>
+#include <nds32_intrinsic.h>
+
+extern void show_pte(struct mm_struct *mm, unsigned long addr);
+
+/*
+ * Dump out the contents of some memory nicely...
+ */
+void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
+{
+	unsigned long first;
+	mm_segment_t fs;
+	int i;
+
+	/*
+	 * We need to switch to kernel mode so that we can use __get_user
+	 * to safely read from kernel space.  Note that we now dump the
+	 * code first, just in case the backtrace kills us.
+	 */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
+
+	for (first = bottom & ~31; first < top; first += 32) {
+		unsigned long p;
+		char str[sizeof(" 12345678") * 8 + 1];
+
+		memset(str, ' ', sizeof(str));
+		str[sizeof(str) - 1] = '\0';
+
+		for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
+			if (p >= bottom && p < top) {
+				unsigned long val;
+				if (__get_user(val, (unsigned long *)p) == 0)
+					sprintf(str + i * 9, " %08lx", val);
+				else
+					sprintf(str + i * 9, " ????????");
+			}
+		}
+		pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
+	}
+
+	set_fs(fs);
+}
+
+EXPORT_SYMBOL(dump_mem);
+
+static void dump_instr(struct pt_regs *regs)
+{
+	unsigned long addr = instruction_pointer(regs);
+	mm_segment_t fs;
+	char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
+	int i;
+
+	return;
+	/*
+	 * We need to switch to kernel mode so that we can use __get_user
+	 * to safely read from kernel space.  Note that we now dump the
+	 * code first, just in case the backtrace kills us.
+	 */
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+
+	pr_emerg("Code: ");
+	for (i = -4; i < 1; i++) {
+		unsigned int val, bad;
+
+		bad = __get_user(val, &((u32 *) addr)[i]);
+
+		if (!bad) {
+			p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
+		} else {
+			p += sprintf(p, "bad PC value");
+			break;
+		}
+	}
+	pr_emerg("Code: %s\n", str);
+
+	set_fs(fs);
+}
+
+#define LOOP_TIMES (100)
+static void __dump(struct task_struct *tsk, unsigned long *base_reg)
+{
+	unsigned long ret_addr;
+	int cnt = LOOP_TIMES, graph = 0;
+	pr_emerg("Call Trace:\n");
+	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
+		while (!kstack_end(base_reg)) {
+			ret_addr = *base_reg++;
+			if (__kernel_text_address(ret_addr)) {
+				ret_addr = ftrace_graph_ret_addr(
+						tsk, &graph, ret_addr, NULL);
+				print_ip_sym(ret_addr);
+			}
+			if (--cnt < 0)
+				break;
+		}
+	} else {
+		while (!kstack_end((void *)base_reg) &&
+		       !((unsigned long)base_reg & 0x3) &&
+		       ((unsigned long)base_reg >= TASK_SIZE)) {
+			unsigned long next_fp;
+			ret_addr = base_reg[LP_OFFSET];
+			next_fp = base_reg[FP_OFFSET];
+			if (__kernel_text_address(ret_addr)) {
+
+				ret_addr = ftrace_graph_ret_addr(
+						tsk, &graph, ret_addr, NULL);
+				print_ip_sym(ret_addr);
+			}
+			if (--cnt < 0)
+				break;
+			base_reg = (unsigned long *)next_fp;
+		}
+	}
+	pr_emerg("\n");
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+	unsigned long *base_reg;
+
+	if (!tsk)
+		tsk = current;
+	if (!IS_ENABLED(CONFIG_FRAME_POINTER)) {
+		if (tsk != current)
+			base_reg = (unsigned long *)(tsk->thread.cpu_context.sp);
+		else
+			__asm__ __volatile__("\tori\t%0, $sp, #0\n":"=r"(base_reg));
+	} else {
+		if (tsk != current)
+			base_reg = (unsigned long *)(tsk->thread.cpu_context.fp);
+		else
+			__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(base_reg));
+	}
+	__dump(tsk, base_reg);
+	barrier();
+}
+
+DEFINE_SPINLOCK(die_lock);
+
+/*
+ * This function is protected against re-entrancy.
+ */
+void die(const char *str, struct pt_regs *regs, int err)
+{
+	struct task_struct *tsk = current;
+	static int die_counter;
+
+	console_verbose();
+	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+
+	pr_emerg("Internal error: %s: %x [#%d]\n", str, err, ++die_counter);
+	print_modules();
+	pr_emerg("CPU: %i\n", smp_processor_id());
+	show_regs(regs);
+	pr_emerg("Process %s (pid: %d, stack limit = 0x%p)\n",
+		 tsk->comm, tsk->pid, end_of_stack(tsk));
+
+	if (!user_mode(regs) || in_interrupt()) {
+		dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
+		dump_instr(regs);
+		dump_stack();
+	}
+
+	bust_spinlocks(0);
+	spin_unlock_irq(&die_lock);
+	do_exit(SIGSEGV);
+}
+
+EXPORT_SYMBOL(die);
+
+void die_if_kernel(const char *str, struct pt_regs *regs, int err)
+{
+	if (user_mode(regs))
+		return;
+
+	die(str, regs, err);
+}
+
+int bad_syscall(int n, struct pt_regs *regs)
+{
+	if (current->personality != PER_LINUX) {
+		send_sig(SIGSEGV, current, 1);
+		return regs->uregs[0];
+	}
+
+	force_sig_fault(SIGILL, ILL_ILLTRP,
+			(void __user *)instruction_pointer(regs) - 4, current);
+	die_if_kernel("Oops - bad syscall", regs, n);
+	return regs->uregs[0];
+}
+
+void __pte_error(const char *file, int line, unsigned long val)
+{
+	pr_emerg("%s:%d: bad pte %08lx.\n", file, line, val);
+}
+
+void __pmd_error(const char *file, int line, unsigned long val)
+{
+	pr_emerg("%s:%d: bad pmd %08lx.\n", file, line, val);
+}
+
+void __pgd_error(const char *file, int line, unsigned long val)
+{
+	pr_emerg("%s:%d: bad pgd %08lx.\n", file, line, val);
+}
+
+extern char *exception_vector, *exception_vector_end;
+void __init trap_init(void)
+{
+	return;
+}
+
+void __init early_trap_init(void)
+{
+	unsigned long ivb = 0;
+	unsigned long base = PAGE_OFFSET;
+
+	memcpy((unsigned long *)base, (unsigned long *)&exception_vector,
+	       ((unsigned long)&exception_vector_end -
+		(unsigned long)&exception_vector));
+	ivb = __nds32__mfsr(NDS32_SR_IVB);
+	/* Check platform support. */
+	if (((ivb & IVB_mskNIVIC) >> IVB_offNIVIC) < 2)
+		panic
+		    ("IVIC mode is not allowed on the platform with interrupt controller\n");
+	__nds32__mtsr((ivb & ~IVB_mskESZ) | (IVB_valESZ16 << IVB_offESZ) |
+		      IVB_BASE, NDS32_SR_IVB);
+	__nds32__mtsr(INT_MASK_INITAIAL_VAL, NDS32_SR_INT_MASK);
+
+	/*
+	 * 0x800 = 128 vectors * 16byte.
+	 * It should be enough to flush a page.
+	 */
+	cpu_cache_wbinval_page(base, true);
+}
+
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code)
+{
+	tsk->thread.trap_no = ENTRY_DEBUG_RELATED;
+	tsk->thread.error_code = error_code;
+
+	force_sig_fault(SIGTRAP, si_code,
+			(void __user *)instruction_pointer(regs), tsk);
+}
+
+void do_debug_trap(unsigned long entry, unsigned long addr,
+		   unsigned long type, struct pt_regs *regs)
+{
+	if (notify_die(DIE_OOPS, "Oops", regs, addr, type, SIGTRAP)
+	    == NOTIFY_STOP)
+		return;
+
+	if (user_mode(regs)) {
+		/* trap_signal */
+		send_sigtrap(current, regs, 0, TRAP_BRKPT);
+	} else {
+		/* kernel_trap */
+		if (!fixup_exception(regs))
+			die("unexpected kernel_trap", regs, 0);
+	}
+}
+
+void unhandled_interruption(struct pt_regs *regs)
+{
+	pr_emerg("unhandled_interruption\n");
+	show_regs(regs);
+	if (!user_mode(regs))
+		do_exit(SIGKILL);
+	force_sig(SIGKILL, current);
+}
+
+void unhandled_exceptions(unsigned long entry, unsigned long addr,
+			  unsigned long type, struct pt_regs *regs)
+{
+	pr_emerg("Unhandled Exception: entry: %lx addr:%lx itype:%lx\n", entry,
+		 addr, type);
+	show_regs(regs);
+	if (!user_mode(regs))
+		do_exit(SIGKILL);
+	force_sig(SIGKILL, current);
+}
+
+extern int do_page_fault(unsigned long entry, unsigned long addr,
+			 unsigned int error_code, struct pt_regs *regs);
+
+/*
+ * 2:DEF dispatch for TLB MISC exception handler
+*/
+
+void do_dispatch_tlb_misc(unsigned long entry, unsigned long addr,
+			  unsigned long type, struct pt_regs *regs)
+{
+	type = type & (ITYPE_mskINST | ITYPE_mskETYPE);
+	if ((type & ITYPE_mskETYPE) < 5) {
+		/* Permission exceptions */
+		do_page_fault(entry, addr, type, regs);
+	} else
+		unhandled_exceptions(entry, addr, type, regs);
+}
+
+void do_revinsn(struct pt_regs *regs)
+{
+	pr_emerg("Reserved Instruction\n");
+	show_regs(regs);
+	if (!user_mode(regs))
+		do_exit(SIGILL);
+	force_sig(SIGILL, current);
+}
+
+#ifdef CONFIG_ALIGNMENT_TRAP
+extern int unalign_access_mode;
+extern int do_unaligned_access(unsigned long addr, struct pt_regs *regs);
+#endif
+void do_dispatch_general(unsigned long entry, unsigned long addr,
+			 unsigned long itype, struct pt_regs *regs,
+			 unsigned long oipc)
+{
+	unsigned int swid = itype >> ITYPE_offSWID;
+	unsigned long type = itype & (ITYPE_mskINST | ITYPE_mskETYPE);
+	if (type == ETYPE_ALIGNMENT_CHECK) {
+#ifdef CONFIG_ALIGNMENT_TRAP
+		/* Alignment check */
+		if (user_mode(regs) && unalign_access_mode) {
+			int ret;
+			ret = do_unaligned_access(addr, regs);
+
+			if (ret == 0)
+				return;
+
+			if (ret == -EFAULT)
+				pr_emerg
+				    ("Unhandled unaligned access exception\n");
+		}
+#endif
+		do_page_fault(entry, addr, type, regs);
+	} else if (type == ETYPE_RESERVED_INSTRUCTION) {
+		/* Reserved instruction */
+		do_revinsn(regs);
+	} else if (type == ETYPE_TRAP && swid == SWID_RAISE_INTERRUPT_LEVEL) {
+		/* trap, used on v3 EDM target debugging workaround */
+		/*
+		 * DIPC(OIPC) is passed as parameter before
+		 * interrupt is enabled, so the DIPC will not be corrupted
+		 * even though interrupts are coming in
+		 */
+		/*
+		 * 1. update ipc
+		 * 2. update pt_regs ipc with oipc
+		 * 3. update pt_regs ipsw (clear DEX)
+		 */
+		__asm__ volatile ("mtsr %0, $IPC\n\t"::"r" (oipc));
+		regs->ipc = oipc;
+		if (regs->pipsw & PSW_mskDEX) {
+			pr_emerg
+			    ("Nested Debug exception is possibly happened\n");
+			pr_emerg("ipc:%08x pipc:%08x\n",
+				 (unsigned int)regs->ipc,
+				 (unsigned int)regs->pipc);
+		}
+		do_debug_trap(entry, addr, itype, regs);
+		regs->ipsw &= ~PSW_mskDEX;
+	} else
+		unhandled_exceptions(entry, addr, type, regs);
+}
diff --git a/arch/nds32/kernel/vdso.c b/arch/nds32/kernel/vdso.c
new file mode 100644
index 0000000..016f158
--- /dev/null
+++ b/arch/nds32/kernel/vdso.c
@@ -0,0 +1,230 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/cache.h>
+#include <linux/clocksource.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+#include <linux/vmalloc.h>
+#include <linux/random.h>
+
+#include <asm/cacheflush.h>
+#include <asm/vdso.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vdso_timer_info.h>
+#include <asm/cache_info.h>
+extern struct cache_info L1_cache_info[2];
+extern char vdso_start[], vdso_end[];
+static unsigned long vdso_pages __ro_after_init;
+static unsigned long timer_mapping_base;
+
+struct timer_info_t timer_info = {
+	.cycle_count_down = true,
+	.mapping_base = EMPTY_TIMER_MAPPING,
+	.cycle_count_reg_offset = EMPTY_REG_OFFSET
+};
+/*
+ * The vDSO data page.
+ */
+static struct page *no_pages[] = { NULL };
+
+static union {
+	struct vdso_data data;
+	u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vm_special_mapping vdso_spec[2] __ro_after_init = {
+	{
+	 .name = "[vvar]",
+	 .pages = no_pages,
+	 },
+	{
+	 .name = "[vdso]",
+	 },
+};
+
+static void get_timer_node_info(void)
+{
+	timer_mapping_base = timer_info.mapping_base;
+	vdso_data->cycle_count_offset =
+		timer_info.cycle_count_reg_offset;
+	vdso_data->cycle_count_down =
+		timer_info.cycle_count_down;
+}
+
+static int __init vdso_init(void)
+{
+	int i;
+	struct page **vdso_pagelist;
+
+	if (memcmp(vdso_start, "\177ELF", 4)) {
+		pr_err("vDSO is not a valid ELF object!\n");
+		return -EINVAL;
+	}
+	/* Creat a timer io mapping to get clock cycles counter */
+	get_timer_node_info();
+
+	vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+	pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+		vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
+
+	/* Allocate the vDSO pagelist */
+	vdso_pagelist = kcalloc(vdso_pages, sizeof(struct page *), GFP_KERNEL);
+	if (vdso_pagelist == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < vdso_pages; i++)
+		vdso_pagelist[i] = virt_to_page(vdso_start + i * PAGE_SIZE);
+	vdso_spec[1].pages = &vdso_pagelist[0];
+
+	return 0;
+}
+
+arch_initcall(vdso_init);
+
+unsigned long inline vdso_random_addr(unsigned long vdso_mapping_len)
+{
+	unsigned long start = current->mm->mmap_base, end, offset, addr;
+	start = PAGE_ALIGN(start);
+
+	/* Round the lowest possible end address up to a PMD boundary. */
+	end = (start + vdso_mapping_len + PMD_SIZE - 1) & PMD_MASK;
+	if (end >= TASK_SIZE)
+		end = TASK_SIZE;
+	end -= vdso_mapping_len;
+
+	if (end > start) {
+		offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+		addr = start + (offset << PAGE_SHIFT);
+	} else {
+		addr = start;
+	}
+	return addr;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+	struct vm_area_struct *vma;
+	unsigned long addr = 0;
+	pgprot_t prot;
+	int ret, vvar_page_num = 2;
+
+	vdso_text_len = vdso_pages << PAGE_SHIFT;
+
+	if(timer_mapping_base == EMPTY_VALUE)
+		vvar_page_num = 1;
+	/* Be sure to map the data page */
+	vdso_mapping_len = vdso_text_len + vvar_page_num * PAGE_SIZE;
+#ifdef CONFIG_CPU_CACHE_ALIASING
+	vdso_mapping_len += L1_cache_info[DCACHE].aliasing_num - 1;
+#endif
+
+	if (down_write_killable(&mm->mmap_sem))
+		return -EINTR;
+
+	addr = vdso_random_addr(vdso_mapping_len);
+	vdso_base = get_unmapped_area(NULL, addr, vdso_mapping_len, 0, 0);
+	if (IS_ERR_VALUE(vdso_base)) {
+		ret = vdso_base;
+		goto up_fail;
+	}
+
+#ifdef CONFIG_CPU_CACHE_ALIASING
+	{
+		unsigned int aliasing_mask =
+		    L1_cache_info[DCACHE].aliasing_mask;
+		unsigned int page_colour_ofs;
+		page_colour_ofs = ((unsigned int)vdso_data & aliasing_mask) -
+		    (vdso_base & aliasing_mask);
+		vdso_base += page_colour_ofs & aliasing_mask;
+	}
+#endif
+
+	vma = _install_special_mapping(mm, vdso_base, vvar_page_num * PAGE_SIZE,
+				       VM_READ | VM_MAYREAD, &vdso_spec[0]);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto up_fail;
+	}
+
+	/*Map vdata to user space */
+	ret = io_remap_pfn_range(vma, vdso_base,
+				 virt_to_phys(vdso_data) >> PAGE_SHIFT,
+				 PAGE_SIZE, vma->vm_page_prot);
+	if (ret)
+		goto up_fail;
+
+	/*Map timer to user space */
+	vdso_base += PAGE_SIZE;
+	prot = __pgprot(_PAGE_V | _PAGE_M_UR_KR | _PAGE_D |  _PAGE_C_DEV);
+	ret = io_remap_pfn_range(vma, vdso_base, timer_mapping_base >> PAGE_SHIFT,
+				 PAGE_SIZE, prot);
+	if (ret)
+		goto up_fail;
+
+	/*Map vdso to user space */
+	vdso_base += PAGE_SIZE;
+	mm->context.vdso = (void *)vdso_base;
+	vma = _install_special_mapping(mm, vdso_base, vdso_text_len,
+				       VM_READ | VM_EXEC |
+				       VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+				       &vdso_spec[1]);
+	if (IS_ERR(vma)) {
+		ret = PTR_ERR(vma);
+		goto up_fail;
+	}
+
+	up_write(&mm->mmap_sem);
+	return 0;
+
+up_fail:
+	mm->context.vdso = NULL;
+	up_write(&mm->mmap_sem);
+	return ret;
+}
+
+static void vdso_write_begin(struct vdso_data *vdata)
+{
+	++vdso_data->seq_count;
+	smp_wmb();		/* Pairs with smp_rmb in vdso_read_retry */
+}
+
+static void vdso_write_end(struct vdso_data *vdata)
+{
+	smp_wmb();		/* Pairs with smp_rmb in vdso_read_begin */
+	++vdso_data->seq_count;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+	vdso_write_begin(vdso_data);
+	vdso_data->cs_mask = tk->tkr_mono.mask;
+	vdso_data->cs_mult = tk->tkr_mono.mult;
+	vdso_data->cs_shift = tk->tkr_mono.shift;
+	vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
+	vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+	vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
+	vdso_data->xtime_clock_sec = tk->xtime_sec;
+	vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
+	vdso_data->xtime_coarse_sec = tk->xtime_sec;
+	vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
+	    tk->tkr_mono.shift;
+	vdso_write_end(vdso_data);
+}
+
+void update_vsyscall_tz(void)
+{
+	vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
+	vdso_data->tz_dsttime = sys_tz.tz_dsttime;
+}
diff --git a/arch/nds32/kernel/vdso/Makefile b/arch/nds32/kernel/vdso/Makefile
new file mode 100644
index 0000000..e6c50a7
--- /dev/null
+++ b/arch/nds32/kernel/vdso/Makefile
@@ -0,0 +1,82 @@
+#
+# Building a vDSO image for AArch64.
+#
+# Author: Will Deacon <will.deacon@arm.com>
+# Heavily based on the vDSO Makefiles for other archs.
+#
+
+obj-vdso := note.o datapage.o sigreturn.o gettimeofday.o
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+ccflags-y := -shared -fno-common -fno-builtin
+ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+		$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
+ccflags-y += -fPIC -Wl,-shared -g
+
+# Disable gcov profiling for VDSO code
+GCOV_PROFILE := n
+
+
+obj-y += vdso.o
+extra-y += vdso.lds
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+
+# Force dependency
+$(obj)/vdso.o : $(obj)/vdso.so
+
+# Link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso)
+	$(call if_changed,vdsold)
+
+
+# Strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+	$(call if_changed,objcopy)
+
+# Generate VDSO offsets using helper script
+gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
+quiet_cmd_vdsosym = VDSOSYM $@
+define cmd_vdsosym
+	$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
+endef
+
+include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
+	$(call if_changed,vdsosym)
+
+
+
+# Assembly rules for the .S files
+
+sigreturn.o : sigreturn.S
+	$(call if_changed_dep,vdsoas)
+
+note.o : note.S
+	$(call if_changed_dep,vdsoas)
+
+datapage.o : datapage.S
+	$(call if_changed_dep,vdsoas)
+
+gettimeofday.o : gettimeofday.c FORCE
+	$(call if_changed_dep,vdsocc)
+
+# Actual build commands
+quiet_cmd_vdsold = VDSOL   $@
+      cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
+quiet_cmd_vdsoas = VDSOA   $@
+      cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
+quiet_cmd_vdsocc = VDSOA   $@
+      cmd_vdsocc = $(CC) $(c_flags) -c -o $@ $<
+
+# Install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+      cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+	@mkdir -p $(MODLIB)/vdso
+	$(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/nds32/kernel/vdso/datapage.S b/arch/nds32/kernel/vdso/datapage.S
new file mode 100644
index 0000000..4a62c3c
--- /dev/null
+++ b/arch/nds32/kernel/vdso/datapage.S
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ENTRY(__get_timerpage)
+	sethi	$r0, hi20(. + PAGE_SIZE + 8)
+	ori	$r0, $r0, lo12(. + PAGE_SIZE + 4)
+	mfusr	$r1, $pc
+	sub	$r0, $r1, $r0
+	ret
+ENDPROC(__get_timerpage)
+
+ENTRY(__get_datapage)
+	sethi	$r0, hi20(. + 2*PAGE_SIZE + 8)
+	ori	$r0, $r0, lo12(. + 2*PAGE_SIZE + 4)
+	mfusr	$r1, $pc
+	sub	$r0, $r1, $r0
+	ret
+ENDPROC(__get_datapage)
diff --git a/arch/nds32/kernel/vdso/gen_vdso_offsets.sh b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh
new file mode 100755
index 0000000..01924ff
--- /dev/null
+++ b/arch/nds32/kernel/vdso/gen_vdso_offsets.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+#
+# Match symbols in the DSO that look like VDSO_*; produce a header file
+# of constant offsets into the shared object.
+#
+# Doing this inside the Makefile will break the $(filter-out) function,
+# causing Kbuild to rebuild the vdso-offsets header file every time.
+#
+# Author: Will Deacon <will.deacon@arm.com
+#
+
+LC_ALL=C
+sed -n -e 's/^00*/0/' -e \
+'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p'
diff --git a/arch/nds32/kernel/vdso/gettimeofday.c b/arch/nds32/kernel/vdso/gettimeofday.c
new file mode 100644
index 0000000..038721a
--- /dev/null
+++ b/arch/nds32/kernel/vdso/gettimeofday.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/compiler.h>
+#include <linux/hrtimer.h>
+#include <linux/time.h>
+#include <asm/io.h>
+#include <asm/barrier.h>
+#include <asm/bug.h>
+#include <asm/page.h>
+#include <asm/unistd.h>
+#include <asm/vdso_datapage.h>
+#include <asm/vdso_timer_info.h>
+#include <asm/asm-offsets.h>
+
+#define X(x) #x
+#define Y(x) X(x)
+
+extern struct vdso_data *__get_datapage(void);
+extern struct vdso_data *__get_timerpage(void);
+
+static notrace unsigned int __vdso_read_begin(const struct vdso_data *vdata)
+{
+	u32 seq;
+repeat:
+	seq = READ_ONCE(vdata->seq_count);
+	if (seq & 1) {
+		cpu_relax();
+		goto repeat;
+	}
+	return seq;
+}
+
+static notrace unsigned int vdso_read_begin(const struct vdso_data *vdata)
+{
+	unsigned int seq;
+
+	seq = __vdso_read_begin(vdata);
+
+	smp_rmb();		/* Pairs with smp_wmb in vdso_write_end */
+	return seq;
+}
+
+static notrace int vdso_read_retry(const struct vdso_data *vdata, u32 start)
+{
+	smp_rmb();		/* Pairs with smp_wmb in vdso_write_begin */
+	return vdata->seq_count != start;
+}
+
+static notrace long clock_gettime_fallback(clockid_t _clkid,
+					   struct timespec *_ts)
+{
+	register struct timespec *ts asm("$r1") = _ts;
+	register clockid_t clkid asm("$r0") = _clkid;
+	register long ret asm("$r0");
+
+	asm volatile ("movi	$r15, %3\n"
+		      "syscall 	0x0\n"
+		      :"=r" (ret)
+		      :"r"(clkid), "r"(ts), "i"(__NR_clock_gettime)
+		      :"$r15", "memory");
+
+	return ret;
+}
+
+static notrace int do_realtime_coarse(struct timespec *ts,
+				      struct vdso_data *vdata)
+{
+	u32 seq;
+
+	do {
+		seq = vdso_read_begin(vdata);
+
+		ts->tv_sec = vdata->xtime_coarse_sec;
+		ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+	} while (vdso_read_retry(vdata, seq));
+	return 0;
+}
+
+static notrace int do_monotonic_coarse(struct timespec *ts,
+				       struct vdso_data *vdata)
+{
+	struct timespec tomono;
+	u32 seq;
+
+	do {
+		seq = vdso_read_begin(vdata);
+
+		ts->tv_sec = vdata->xtime_coarse_sec;
+		ts->tv_nsec = vdata->xtime_coarse_nsec;
+
+		tomono.tv_sec = vdata->wtm_clock_sec;
+		tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+	} while (vdso_read_retry(vdata, seq));
+
+	ts->tv_sec += tomono.tv_sec;
+	timespec_add_ns(ts, tomono.tv_nsec);
+	return 0;
+}
+
+static notrace inline u64 vgetsns(struct vdso_data *vdso)
+{
+	u32 cycle_now;
+	u32 cycle_delta;
+	u32 *timer_cycle_base;
+
+	timer_cycle_base =
+	    (u32 *) ((char *)__get_timerpage() + vdso->cycle_count_offset);
+	cycle_now = readl_relaxed(timer_cycle_base);
+	if (true == vdso->cycle_count_down)
+		cycle_now = ~(*timer_cycle_base);
+	cycle_delta = cycle_now - (u32) vdso->cs_cycle_last;
+	return ((u64) cycle_delta & vdso->cs_mask) * vdso->cs_mult;
+}
+
+static notrace int do_realtime(struct timespec *ts, struct vdso_data *vdata)
+{
+	unsigned count;
+	u64 ns;
+	do {
+		count = vdso_read_begin(vdata);
+		ts->tv_sec = vdata->xtime_clock_sec;
+		ns = vdata->xtime_clock_nsec;
+		ns += vgetsns(vdata);
+		ns >>= vdata->cs_shift;
+	} while (vdso_read_retry(vdata, count));
+
+	ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+	ts->tv_nsec = ns;
+
+	return 0;
+}
+
+static notrace int do_monotonic(struct timespec *ts, struct vdso_data *vdata)
+{
+	struct timespec tomono;
+	u64 nsecs;
+	u32 seq;
+
+	do {
+		seq = vdso_read_begin(vdata);
+
+		ts->tv_sec = vdata->xtime_clock_sec;
+		nsecs = vdata->xtime_clock_nsec;
+		nsecs += vgetsns(vdata);
+		nsecs >>= vdata->cs_shift;
+
+		tomono.tv_sec = vdata->wtm_clock_sec;
+		tomono.tv_nsec = vdata->wtm_clock_nsec;
+
+	} while (vdso_read_retry(vdata, seq));
+
+	ts->tv_sec += tomono.tv_sec;
+	ts->tv_nsec = 0;
+	timespec_add_ns(ts, nsecs + tomono.tv_nsec);
+	return 0;
+}
+
+notrace int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
+{
+	struct vdso_data *vdata;
+	int ret = -1;
+
+	vdata = __get_datapage();
+	if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
+		return clock_gettime_fallback(clkid, ts);
+
+	switch (clkid) {
+	case CLOCK_REALTIME_COARSE:
+		ret = do_realtime_coarse(ts, vdata);
+		break;
+	case CLOCK_MONOTONIC_COARSE:
+		ret = do_monotonic_coarse(ts, vdata);
+		break;
+	case CLOCK_REALTIME:
+		ret = do_realtime(ts, vdata);
+		break;
+	case CLOCK_MONOTONIC:
+		ret = do_monotonic(ts, vdata);
+		break;
+	default:
+		break;
+	}
+
+	if (ret)
+		ret = clock_gettime_fallback(clkid, ts);
+
+	return ret;
+}
+
+static notrace int clock_getres_fallback(clockid_t _clk_id,
+					  struct timespec *_res)
+{
+	register clockid_t clk_id asm("$r0") = _clk_id;
+	register struct timespec *res asm("$r1") = _res;
+	register int ret asm("$r0");
+
+	asm volatile ("movi	$r15, %3\n"
+		      "syscall	0x0\n"
+		      :"=r" (ret)
+		      :"r"(clk_id), "r"(res), "i"(__NR_clock_getres)
+		      :"$r15", "memory");
+
+	return ret;
+}
+
+notrace int __vdso_clock_getres(clockid_t clk_id, struct timespec *res)
+{
+	if (res == NULL)
+		return 0;
+	switch (clk_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		res->tv_sec = 0;
+		res->tv_nsec = CLOCK_REALTIME_RES;
+		break;
+	case CLOCK_REALTIME_COARSE:
+	case CLOCK_MONOTONIC_COARSE:
+		res->tv_sec = 0;
+		res->tv_nsec = CLOCK_COARSE_RES;
+		break;
+	default:
+		return clock_getres_fallback(clk_id, res);
+	}
+	return 0;
+}
+
+static notrace inline int gettimeofday_fallback(struct timeval *_tv,
+						struct timezone *_tz)
+{
+	register struct timeval *tv asm("$r0") = _tv;
+	register struct timezone *tz asm("$r1") = _tz;
+	register int ret asm("$r0");
+
+	asm volatile ("movi	$r15, %3\n"
+		      "syscall	0x0\n"
+		      :"=r" (ret)
+		      :"r"(tv), "r"(tz), "i"(__NR_gettimeofday)
+		      :"$r15", "memory");
+
+	return ret;
+}
+
+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+	struct timespec ts;
+	struct vdso_data *vdata;
+	int ret;
+
+	vdata = __get_datapage();
+
+	if (vdata->cycle_count_offset == EMPTY_REG_OFFSET)
+		return gettimeofday_fallback(tv, tz);
+
+	ret = do_realtime(&ts, vdata);
+
+	if (tv) {
+		tv->tv_sec = ts.tv_sec;
+		tv->tv_usec = ts.tv_nsec / 1000;
+	}
+	if (tz) {
+		tz->tz_minuteswest = vdata->tz_minuteswest;
+		tz->tz_dsttime = vdata->tz_dsttime;
+	}
+
+	return ret;
+}
diff --git a/arch/nds32/kernel/vdso/note.S b/arch/nds32/kernel/vdso/note.S
new file mode 100644
index 0000000..0aeaa19
--- /dev/null
+++ b/arch/nds32/kernel/vdso/note.S
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+	.long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/nds32/kernel/vdso/sigreturn.S b/arch/nds32/kernel/vdso/sigreturn.S
new file mode 100644
index 0000000..67e4d1d
--- /dev/null
+++ b/arch/nds32/kernel/vdso/sigreturn.S
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+	.text
+
+ENTRY(__kernel_rt_sigreturn)
+	.cfi_startproc
+	movi $r15, __NR_rt_sigreturn
+	/*
+	 * The SWID of syscall should be __NR_rt_sigreturn to synchronize
+	 * the unwinding scheme in gcc
+	 */
+	syscall	__NR_rt_sigreturn
+	.cfi_endproc
+ENDPROC(__kernel_rt_sigreturn)
diff --git a/arch/nds32/kernel/vdso/vdso.S b/arch/nds32/kernel/vdso/vdso.S
new file mode 100644
index 0000000..16737c1
--- /dev/null
+++ b/arch/nds32/kernel/vdso/vdso.S
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2012 ARM Limited
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/const.h>
+#include <asm/page.h>
+
+	.globl vdso_start, vdso_end
+	.section .rodata
+	.balign PAGE_SIZE
+vdso_start:
+	.incbin "arch/nds32/kernel/vdso/vdso.so"
+	.balign PAGE_SIZE
+vdso_end:
+
+	.previous
diff --git a/arch/nds32/kernel/vdso/vdso.lds.S b/arch/nds32/kernel/vdso/vdso.lds.S
new file mode 100644
index 0000000..1f2b160
--- /dev/null
+++ b/arch/nds32/kernel/vdso/vdso.lds.S
@@ -0,0 +1,75 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (C) 2005-2017 Andes Technology Corporation
+ */
+
+
+#include <linux/const.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+
+OUTPUT_ARCH(nds32)
+
+SECTIONS
+{
+	. = SIZEOF_HEADERS;
+
+	.hash		: { *(.hash) }			:text
+	.gnu.hash	: { *(.gnu.hash) }
+	.dynsym		: { *(.dynsym) }
+	.dynstr		: { *(.dynstr) }
+	.gnu.version	: { *(.gnu.version) }
+	.gnu.version_d	: { *(.gnu.version_d) }
+	.gnu.version_r	: { *(.gnu.version_r) }
+
+	.note		: { *(.note.*) }		:text	:note
+
+
+	.text		: { *(.text*) }			:text
+
+	.eh_frame_hdr	: { *(.eh_frame_hdr) }		:text	:eh_frame_hdr
+	.eh_frame	: { KEEP (*(.eh_frame)) }	:text
+
+	.dynamic	: { *(.dynamic) }		:text	:dynamic
+
+	.rodata		: { *(.rodata*) }		:text
+
+
+	/DISCARD/	: {
+		*(.note.GNU-stack)
+		*(.data .data.* .gnu.linkonce.d.* .sdata*)
+		*(.bss .sbss .dynbss .dynsbss)
+	}
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+	text		PT_LOAD		FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+	dynamic		PT_DYNAMIC	FLAGS(4);		/* PF_R */
+	note		PT_NOTE		FLAGS(4);		/* PF_R */
+	eh_frame_hdr	PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+	LINUX_4 {
+	global:
+		__kernel_rt_sigreturn;
+		__vdso_gettimeofday;
+		__vdso_clock_getres;
+		__vdso_clock_gettime;
+	local: *;
+	};
+}
+
+/*
+ * Make the rt_sigreturn code visible to the kernel.
+ */
+VDSO_rt_sigtramp	= __kernel_rt_sigreturn;
diff --git a/arch/nds32/kernel/vmlinux.lds.S b/arch/nds32/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..9e90f30
--- /dev/null
+++ b/arch/nds32/kernel/vmlinux.lds.S
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/cache.h>
+#include <asm/memory.h>
+
+#define LOAD_OFFSET	(PAGE_OFFSET - PHYS_OFFSET)
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(nds32)
+ENTRY(_stext_lma)
+jiffies = jiffies_64;
+
+#if defined(CONFIG_GCOV_KERNEL)
+#define NDS32_EXIT_KEEP(x)	x
+#else
+#define NDS32_EXIT_KEEP(x)
+#endif
+
+SECTIONS
+{
+	_stext_lma = TEXTADDR - LOAD_OFFSET;
+	. = TEXTADDR;
+	__init_begin = .;
+	HEAD_TEXT_SECTION
+	.exit.text : {
+		NDS32_EXIT_KEEP(EXIT_TEXT)
+	}
+	INIT_TEXT_SECTION(PAGE_SIZE)
+	INIT_DATA_SECTION(16)
+	.exit.data : {
+		NDS32_EXIT_KEEP(EXIT_DATA)
+	}
+	PERCPU_SECTION(L1_CACHE_BYTES)
+	__init_end = .;
+
+	. = ALIGN(PAGE_SIZE);
+	_stext = .;
+	/* Real text segment */
+	.text : AT(ADDR(.text) - LOAD_OFFSET) {
+		_text = .;		/* Text and read-only data	*/
+		TEXT_TEXT
+		SCHED_TEXT
+		CPUIDLE_TEXT
+		LOCK_TEXT
+		KPROBES_TEXT
+		IRQENTRY_TEXT
+		*(.fixup)
+	}
+
+	_etext = .;			/* End of text and rodata section */
+
+	_sdata = .;
+	RO_DATA_SECTION(PAGE_SIZE)
+	RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+	_edata  =  .;
+
+	EXCEPTION_TABLE(16)
+	NOTES
+	BSS_SECTION(4, 4, 4)
+	_end = .;
+
+	STABS_DEBUG
+	DWARF_DEBUG
+
+	DISCARDS
+}