v4.19.13 snapshot.
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
new file mode 100644
index 0000000..6b68558
--- /dev/null
+++ b/arch/nds32/mm/Makefile
@@ -0,0 +1,7 @@
+obj-y				:= extable.o tlb.o \
+				   fault.o init.o ioremap.o mmap.o \
+                                   mm-nds32.o cacheflush.o proc.o
+
+obj-$(CONFIG_ALIGNMENT_TRAP)	+= alignment.o
+obj-$(CONFIG_HIGHMEM)           += highmem.o
+CFLAGS_proc-n13.o		+= -fomit-frame-pointer
diff --git a/arch/nds32/mm/alignment.c b/arch/nds32/mm/alignment.c
new file mode 100644
index 0000000..e1aed9d
--- /dev/null
+++ b/arch/nds32/mm/alignment.c
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <linux/sysctl.h>
+#include <asm/unaligned.h>
+
+#define DEBUG(enable, tagged, ...)				\
+	do{							\
+		if (enable) {					\
+			if (tagged)				\
+			pr_warn("[ %30s() ] ", __func__);	\
+			pr_warn(__VA_ARGS__);			\
+		}						\
+	} while (0)
+
+#define RT(inst)	(((inst) >> 20) & 0x1FUL)
+#define RA(inst)	(((inst) >> 15) & 0x1FUL)
+#define RB(inst)	(((inst) >> 10) & 0x1FUL)
+#define SV(inst)	(((inst) >> 8) & 0x3UL)
+#define IMM(inst)	(((inst) >> 0) & 0x7FFFUL)
+
+#define RA3(inst)	(((inst) >> 3) & 0x7UL)
+#define RT3(inst)	(((inst) >> 6) & 0x7UL)
+#define IMM3U(inst)	(((inst) >> 0) & 0x7UL)
+
+#define RA5(inst)	(((inst) >> 0) & 0x1FUL)
+#define RT4(inst)	(((inst) >> 5) & 0xFUL)
+
+#define GET_IMMSVAL(imm_value) \
+	(((imm_value >> 14) & 0x1) ? (imm_value - 0x8000) : imm_value)
+
+#define __get8_data(val,addr,err)	\
+	__asm__(					\
+	"1:	lbi.bi	%1, [%2], #1\n"			\
+	"2:\n"						\
+	"	.pushsection .text.fixup,\"ax\"\n"	\
+	"	.align	2\n"				\
+	"3:	movi	%0, #1\n"			\
+	"	j	2b\n"				\
+	"	.popsection\n"				\
+	"	.pushsection __ex_table,\"a\"\n"	\
+	"	.align	3\n"				\
+	"	.long	1b, 3b\n"			\
+	"	.popsection\n"				\
+	: "=r" (err), "=&r" (val), "=r" (addr)		\
+	: "0" (err), "2" (addr))
+
+#define get16_data(addr, val_ptr)				\
+	do {							\
+		unsigned int err = 0, v, a = addr;		\
+		__get8_data(v,a,err);				\
+		*val_ptr =  v << 0;				\
+		__get8_data(v,a,err);				\
+		*val_ptr |= v << 8;				\
+		if (err)					\
+			goto fault;				\
+		*val_ptr = le16_to_cpu(*val_ptr);		\
+	} while(0)
+
+#define get32_data(addr, val_ptr)				\
+	do {							\
+		unsigned int err = 0, v, a = addr;		\
+		__get8_data(v,a,err);				\
+		*val_ptr =  v << 0;				\
+		__get8_data(v,a,err);				\
+		*val_ptr |= v << 8;				\
+		__get8_data(v,a,err);				\
+		*val_ptr |= v << 16;				\
+		__get8_data(v,a,err);				\
+		*val_ptr |= v << 24;				\
+		if (err)					\
+			goto fault;				\
+		*val_ptr = le32_to_cpu(*val_ptr);		\
+	} while(0)
+
+#define get_data(addr, val_ptr, len)				\
+	if (len == 2)						\
+		get16_data(addr, val_ptr);			\
+	else							\
+		get32_data(addr, val_ptr);
+
+#define set16_data(addr, val)					\
+	do {							\
+		unsigned int err = 0, *ptr = addr ;		\
+		val = le32_to_cpu(val);				\
+		__asm__(					\
+                "1:	sbi.bi 	%2, [%1], #1\n"			\
+                "	srli 	%2, %2, #8\n"			\
+                "2:	sbi	%2, [%1]\n"			\
+		"3:\n"						\
+		"	.pushsection .text.fixup,\"ax\"\n"	\
+		"	.align	2\n"				\
+		"4:	movi	%0, #1\n"			\
+		"	j	3b\n"				\
+		"	.popsection\n"				\
+		"	.pushsection __ex_table,\"a\"\n"	\
+		"	.align	3\n"				\
+		"	.long	1b, 4b\n"			\
+		"	.long	2b, 4b\n"			\
+		"	.popsection\n"				\
+		: "=r" (err), "+r" (ptr), "+r" (val)		\
+		: "0" (err)					\
+		);						\
+		if (err)					\
+			goto fault;				\
+	} while(0)
+
+#define set32_data(addr, val)					\
+	do {							\
+		unsigned int err = 0, *ptr = addr ;		\
+		val = le32_to_cpu(val);				\
+		__asm__(					\
+                "1:	sbi.bi 	%2, [%1], #1\n"			\
+                "	srli 	%2, %2, #8\n"			\
+                "2:	sbi.bi 	%2, [%1], #1\n"			\
+                "	srli 	%2, %2, #8\n"			\
+                "3:	sbi.bi 	%2, [%1], #1\n"			\
+                "	srli 	%2, %2, #8\n"			\
+                "4:	sbi 	%2, [%1]\n"			\
+		"5:\n"						\
+		"	.pushsection .text.fixup,\"ax\"\n"	\
+		"	.align	2\n"				\
+		"6:	movi	%0, #1\n"			\
+		"	j	5b\n"				\
+		"	.popsection\n"				\
+		"	.pushsection __ex_table,\"a\"\n"	\
+		"	.align	3\n"				\
+		"	.long	1b, 6b\n"			\
+		"	.long	2b, 6b\n"			\
+		"	.long	3b, 6b\n"			\
+		"	.long	4b, 6b\n"			\
+		"	.popsection\n"				\
+		: "=r" (err), "+r" (ptr), "+r" (val)		\
+		: "0" (err)					\
+		);						\
+		if (err)					\
+			goto fault;				\
+	} while(0)
+#define set_data(addr, val, len)				\
+	if (len == 2)						\
+		set16_data(addr, val);				\
+	else							\
+		set32_data(addr, val);
+#define NDS32_16BIT_INSTRUCTION	0x80000000
+
+extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
+extern pte_t va_kernel_present(unsigned long addr);
+extern int va_readable(struct pt_regs *regs, unsigned long addr);
+extern int va_writable(struct pt_regs *regs, unsigned long addr);
+
+int unalign_access_mode = 0, unalign_access_debug = 0;
+
+static inline unsigned long *idx_to_addr(struct pt_regs *regs, int idx)
+{
+	/* this should be consistent with ptrace.h */
+	if (idx >= 0 && idx <= 25)	/* R0-R25 */
+		return &regs->uregs[0] + idx;
+	else if (idx >= 28 && idx <= 30)	/* FP, GP, LP */
+		return &regs->fp + (idx - 28);
+	else if (idx == 31)	/* SP */
+		return &regs->sp;
+	else
+		return NULL;	/* cause a segfault */
+}
+
+static inline unsigned long get_inst(unsigned long addr)
+{
+	return be32_to_cpu(get_unaligned((u32 *) addr));
+}
+
+static inline unsigned long sign_extend(unsigned long val, int len)
+{
+	unsigned long ret = 0;
+	unsigned char *s, *t;
+	int i = 0;
+
+	val = cpu_to_le32(val);
+
+	s = (void *)&val;
+	t = (void *)&ret;
+
+	while (i++ < len)
+		*t++ = *s++;
+
+	if (((*(t - 1)) & 0x80) && (i < 4)) {
+
+		while (i++ <= 4)
+			*t++ = 0xff;
+	}
+
+	return le32_to_cpu(ret);
+}
+
+static inline int do_16(unsigned long inst, struct pt_regs *regs)
+{
+	int imm, regular, load, len, addr_mode, idx_mode;
+	unsigned long unaligned_addr, target_val, source_idx, target_idx,
+	    shift = 0;
+	switch ((inst >> 9) & 0x3F) {
+
+	case 0x12:		/* LHI333    */
+		imm = 1;
+		regular = 1;
+		load = 1;
+		len = 2;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x10:		/* LWI333    */
+		imm = 1;
+		regular = 1;
+		load = 1;
+		len = 4;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x11:		/* LWI333.bi */
+		imm = 1;
+		regular = 0;
+		load = 1;
+		len = 4;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x1A:		/* LWI450    */
+		imm = 0;
+		regular = 1;
+		load = 1;
+		len = 4;
+		addr_mode = 5;
+		idx_mode = 4;
+		break;
+	case 0x16:		/* SHI333    */
+		imm = 1;
+		regular = 1;
+		load = 0;
+		len = 2;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x14:		/* SWI333    */
+		imm = 1;
+		regular = 1;
+		load = 0;
+		len = 4;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x15:		/* SWI333.bi */
+		imm = 1;
+		regular = 0;
+		load = 0;
+		len = 4;
+		addr_mode = 3;
+		idx_mode = 3;
+		break;
+	case 0x1B:		/* SWI450    */
+		imm = 0;
+		regular = 1;
+		load = 0;
+		len = 4;
+		addr_mode = 5;
+		idx_mode = 4;
+		break;
+
+	default:
+		return -EFAULT;
+	}
+
+	if (addr_mode == 3) {
+		unaligned_addr = *idx_to_addr(regs, RA3(inst));
+		source_idx = RA3(inst);
+	} else {
+		unaligned_addr = *idx_to_addr(regs, RA5(inst));
+		source_idx = RA5(inst);
+	}
+
+	if (idx_mode == 3)
+		target_idx = RT3(inst);
+	else
+		target_idx = RT4(inst);
+
+	if (imm)
+		shift = IMM3U(inst) * len;
+
+	if (regular)
+		unaligned_addr += shift;
+
+	if (load) {
+		if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
+			return -EACCES;
+
+		get_data(unaligned_addr, &target_val, len);
+		*idx_to_addr(regs, target_idx) = target_val;
+	} else {
+		if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
+			return -EACCES;
+		target_val = *idx_to_addr(regs, target_idx);
+		set_data((void *)unaligned_addr, target_val, len);
+	}
+
+	if (!regular)
+		*idx_to_addr(regs, source_idx) = unaligned_addr + shift;
+	regs->ipc += 2;
+
+	return 0;
+fault:
+	return -EACCES;
+}
+
+static inline int do_32(unsigned long inst, struct pt_regs *regs)
+{
+	int imm, regular, load, len, sign_ext;
+	unsigned long unaligned_addr, target_val, shift;
+
+	unaligned_addr = *idx_to_addr(regs, RA(inst));
+
+	switch ((inst >> 25) << 1) {
+
+	case 0x02:		/* LHI       */
+		imm = 1;
+		regular = 1;
+		load = 1;
+		len = 2;
+		sign_ext = 0;
+		break;
+	case 0x0A:		/* LHI.bi    */
+		imm = 1;
+		regular = 0;
+		load = 1;
+		len = 2;
+		sign_ext = 0;
+		break;
+	case 0x22:		/* LHSI      */
+		imm = 1;
+		regular = 1;
+		load = 1;
+		len = 2;
+		sign_ext = 1;
+		break;
+	case 0x2A:		/* LHSI.bi   */
+		imm = 1;
+		regular = 0;
+		load = 1;
+		len = 2;
+		sign_ext = 1;
+		break;
+	case 0x04:		/* LWI       */
+		imm = 1;
+		regular = 1;
+		load = 1;
+		len = 4;
+		sign_ext = 0;
+		break;
+	case 0x0C:		/* LWI.bi    */
+		imm = 1;
+		regular = 0;
+		load = 1;
+		len = 4;
+		sign_ext = 0;
+		break;
+	case 0x12:		/* SHI       */
+		imm = 1;
+		regular = 1;
+		load = 0;
+		len = 2;
+		sign_ext = 0;
+		break;
+	case 0x1A:		/* SHI.bi    */
+		imm = 1;
+		regular = 0;
+		load = 0;
+		len = 2;
+		sign_ext = 0;
+		break;
+	case 0x14:		/* SWI       */
+		imm = 1;
+		regular = 1;
+		load = 0;
+		len = 4;
+		sign_ext = 0;
+		break;
+	case 0x1C:		/* SWI.bi    */
+		imm = 1;
+		regular = 0;
+		load = 0;
+		len = 4;
+		sign_ext = 0;
+		break;
+
+	default:
+		switch (inst & 0xff) {
+
+		case 0x01:	/* LH        */
+			imm = 0;
+			regular = 1;
+			load = 1;
+			len = 2;
+			sign_ext = 0;
+			break;
+		case 0x05:	/* LH.bi     */
+			imm = 0;
+			regular = 0;
+			load = 1;
+			len = 2;
+			sign_ext = 0;
+			break;
+		case 0x11:	/* LHS       */
+			imm = 0;
+			regular = 1;
+			load = 1;
+			len = 2;
+			sign_ext = 1;
+			break;
+		case 0x15:	/* LHS.bi    */
+			imm = 0;
+			regular = 0;
+			load = 1;
+			len = 2;
+			sign_ext = 1;
+			break;
+		case 0x02:	/* LW        */
+			imm = 0;
+			regular = 1;
+			load = 1;
+			len = 4;
+			sign_ext = 0;
+			break;
+		case 0x06:	/* LW.bi     */
+			imm = 0;
+			regular = 0;
+			load = 1;
+			len = 4;
+			sign_ext = 0;
+			break;
+		case 0x09:	/* SH        */
+			imm = 0;
+			regular = 1;
+			load = 0;
+			len = 2;
+			sign_ext = 0;
+			break;
+		case 0x0D:	/* SH.bi     */
+			imm = 0;
+			regular = 0;
+			load = 0;
+			len = 2;
+			sign_ext = 0;
+			break;
+		case 0x0A:	/* SW        */
+			imm = 0;
+			regular = 1;
+			load = 0;
+			len = 4;
+			sign_ext = 0;
+			break;
+		case 0x0E:	/* SW.bi     */
+			imm = 0;
+			regular = 0;
+			load = 0;
+			len = 4;
+			sign_ext = 0;
+			break;
+
+		default:
+			return -EFAULT;
+		}
+	}
+
+	if (imm)
+		shift = GET_IMMSVAL(IMM(inst)) * len;
+	else
+		shift = *idx_to_addr(regs, RB(inst)) << SV(inst);
+
+	if (regular)
+		unaligned_addr += shift;
+
+	if (load) {
+
+		if (!access_ok(VERIFY_READ, (void *)unaligned_addr, len))
+			return -EACCES;
+
+		get_data(unaligned_addr, &target_val, len);
+
+		if (sign_ext)
+			*idx_to_addr(regs, RT(inst)) =
+			    sign_extend(target_val, len);
+		else
+			*idx_to_addr(regs, RT(inst)) = target_val;
+	} else {
+
+		if (!access_ok(VERIFY_WRITE, (void *)unaligned_addr, len))
+			return -EACCES;
+
+		target_val = *idx_to_addr(regs, RT(inst));
+		set_data((void *)unaligned_addr, target_val, len);
+	}
+
+	if (!regular)
+		*idx_to_addr(regs, RA(inst)) = unaligned_addr + shift;
+
+	regs->ipc += 4;
+
+	return 0;
+fault:
+	return -EACCES;
+}
+
+int do_unaligned_access(unsigned long addr, struct pt_regs *regs)
+{
+	unsigned long inst;
+	int ret = -EFAULT;
+	mm_segment_t seg = get_fs();
+
+	inst = get_inst(regs->ipc);
+
+	DEBUG((unalign_access_debug > 0), 1,
+	      "Faulting addr: 0x%08lx, pc: 0x%08lx [inst: 0x%08lx ]\n", addr,
+	      regs->ipc, inst);
+
+	set_fs(USER_DS);
+
+	if (inst & NDS32_16BIT_INSTRUCTION)
+		ret = do_16((inst >> 16) & 0xffff, regs);
+	else
+		ret = do_32(inst, regs);
+	set_fs(seg);
+
+	return ret;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static struct ctl_table alignment_tbl[3] = {
+	{
+	 .procname = "enable",
+	 .data = &unalign_access_mode,
+	 .maxlen = sizeof(unalign_access_mode),
+	 .mode = 0666,
+	 .proc_handler = &proc_dointvec
+	}
+	,
+	{
+	 .procname = "debug_info",
+	 .data = &unalign_access_debug,
+	 .maxlen = sizeof(unalign_access_debug),
+	 .mode = 0644,
+	 .proc_handler = &proc_dointvec
+	}
+	,
+	{}
+};
+
+static struct ctl_table nds32_sysctl_table[2] = {
+	{
+	 .procname = "unaligned_access",
+	 .mode = 0555,
+	 .child = alignment_tbl},
+	{}
+};
+
+static struct ctl_path nds32_path[2] = {
+	{.procname = "nds32"},
+	{}
+};
+
+/*
+ * Initialize nds32 alignment-correction interface
+ */
+static int __init nds32_sysctl_init(void)
+{
+	register_sysctl_paths(nds32_path, nds32_sysctl_table);
+	return 0;
+}
+
+__initcall(nds32_sysctl_init);
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
new file mode 100644
index 0000000..2547036
--- /dev/null
+++ b/arch/nds32/mm/cacheflush.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/proc-fns.h>
+#include <asm/shmparam.h>
+#include <asm/cache_info.h>
+
+extern struct cache_info L1_cache_info[2];
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size, flags;
+	line_size = L1_cache_info[DCACHE].line_size;
+	start = start & ~(line_size - 1);
+	end = (end + line_size - 1) & ~(line_size - 1);
+	local_irq_save(flags);
+	cpu_cache_wbinval_range(start, end, 1);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+	unsigned long flags;
+	unsigned long kaddr;
+	local_irq_save(flags);
+	kaddr = (unsigned long)kmap_atomic(page);
+	cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+	kunmap_atomic((void *)kaddr);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+	                     unsigned long addr, int len)
+{
+	unsigned long kaddr;
+	kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
+	flush_icache_range(kaddr, kaddr + len);
+	kunmap_atomic((void *)kaddr);
+}
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+		      pte_t * pte)
+{
+	struct page *page;
+	unsigned long pfn = pte_pfn(*pte);
+	unsigned long flags;
+
+	if (!pfn_valid(pfn))
+		return;
+
+	if (vma->vm_mm == current->active_mm) {
+		local_irq_save(flags);
+		__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
+		__nds32__tlbop_rwr(*pte);
+		__nds32__isb();
+		local_irq_restore(flags);
+	}
+	page = pfn_to_page(pfn);
+
+	if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
+	    (vma->vm_flags & VM_EXEC)) {
+		unsigned long kaddr;
+		local_irq_save(flags);
+		kaddr = (unsigned long)kmap_atomic(page);
+		cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+		kunmap_atomic((void *)kaddr);
+		local_irq_restore(flags);
+	}
+}
+#ifdef CONFIG_CPU_CACHE_ALIASING
+extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
+
+static inline unsigned long aliasing(unsigned long addr, unsigned long page)
+{
+	return ((addr & PAGE_MASK) ^ page) & (SHMLBA - 1);
+}
+
+static inline unsigned long kremap0(unsigned long uaddr, unsigned long pa)
+{
+	unsigned long kaddr, pte;
+
+#define BASE_ADDR0 0xffffc000
+	kaddr = BASE_ADDR0 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
+	pte = (pa | PAGE_KERNEL);
+	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
+	__nds32__tlbop_rwlk(pte);
+	__nds32__isb();
+	return kaddr;
+}
+
+static inline void kunmap01(unsigned long kaddr)
+{
+	__nds32__tlbop_unlk(kaddr);
+	__nds32__tlbop_inv(kaddr);
+	__nds32__isb();
+}
+
+static inline unsigned long kremap1(unsigned long uaddr, unsigned long pa)
+{
+	unsigned long kaddr, pte;
+
+#define BASE_ADDR1 0xffff8000
+	kaddr = BASE_ADDR1 | (uaddr & L1_cache_info[DCACHE].aliasing_mask);
+	pte = (pa | PAGE_KERNEL);
+	__nds32__mtsr_dsb(kaddr, NDS32_SR_TLB_VPN);
+	__nds32__tlbop_rwlk(pte);
+	__nds32__isb();
+	return kaddr;
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	cpu_dcache_wbinval_all();
+	cpu_icache_inval_all();
+	local_irq_restore(flags);
+}
+
+void flush_cache_dup_mm(struct mm_struct *mm)
+{
+}
+
+void flush_cache_range(struct vm_area_struct *vma,
+		       unsigned long start, unsigned long end)
+{
+	unsigned long flags;
+
+	if ((end - start) > 8 * PAGE_SIZE) {
+		cpu_dcache_wbinval_all();
+		if (vma->vm_flags & VM_EXEC)
+			cpu_icache_inval_all();
+		return;
+	}
+	local_irq_save(flags);
+	while (start < end) {
+		if (va_present(vma->vm_mm, start))
+			cpu_cache_wbinval_page(start, vma->vm_flags & VM_EXEC);
+		start += PAGE_SIZE;
+	}
+	local_irq_restore(flags);
+	return;
+}
+
+void flush_cache_page(struct vm_area_struct *vma,
+		      unsigned long addr, unsigned long pfn)
+{
+	unsigned long vto, flags;
+
+	local_irq_save(flags);
+	vto = kremap0(addr, pfn << PAGE_SHIFT);
+	cpu_cache_wbinval_page(vto, vma->vm_flags & VM_EXEC);
+	kunmap01(vto);
+	local_irq_restore(flags);
+}
+
+void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+	cpu_dcache_wbinval_all();
+	cpu_icache_inval_all();
+}
+
+void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+	cpu_dcache_wbinval_all();
+	cpu_icache_inval_all();
+}
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+		    struct page *to)
+{
+	cpu_dcache_wbinval_page((unsigned long)vaddr);
+	cpu_icache_inval_page((unsigned long)vaddr);
+	copy_page(vto, vfrom);
+	cpu_dcache_wbinval_page((unsigned long)vto);
+	cpu_icache_inval_page((unsigned long)vto);
+}
+
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+	cpu_dcache_wbinval_page((unsigned long)vaddr);
+	cpu_icache_inval_page((unsigned long)vaddr);
+	clear_page(addr);
+	cpu_dcache_wbinval_page((unsigned long)addr);
+	cpu_icache_inval_page((unsigned long)addr);
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+			unsigned long vaddr, struct vm_area_struct *vma)
+{
+	unsigned long vto, vfrom, flags, kto, kfrom, pfrom, pto;
+	kto = ((unsigned long)page_address(to) & PAGE_MASK);
+	kfrom = ((unsigned long)page_address(from) & PAGE_MASK);
+	pto = page_to_phys(to);
+	pfrom = page_to_phys(from);
+
+	local_irq_save(flags);
+	if (aliasing(vaddr, (unsigned long)kfrom))
+		cpu_dcache_wb_page((unsigned long)kfrom);
+	vto = kremap0(vaddr, pto);
+	vfrom = kremap1(vaddr, pfrom);
+	copy_page((void *)vto, (void *)vfrom);
+	kunmap01(vfrom);
+	kunmap01(vto);
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(copy_user_highpage);
+
+void clear_user_highpage(struct page *page, unsigned long vaddr)
+{
+	unsigned long vto, flags, kto;
+
+	kto = ((unsigned long)page_address(page) & PAGE_MASK);
+
+	local_irq_save(flags);
+	if (aliasing(kto, vaddr) && kto != 0) {
+		cpu_dcache_inval_page(kto);
+		cpu_icache_inval_page(kto);
+	}
+	vto = kremap0(vaddr, page_to_phys(page));
+	clear_page((void *)vto);
+	kunmap01(vto);
+	local_irq_restore(flags);
+}
+
+EXPORT_SYMBOL(clear_user_highpage);
+
+void flush_dcache_page(struct page *page)
+{
+	struct address_space *mapping;
+
+	mapping = page_mapping(page);
+	if (mapping && !mapping_mapped(mapping))
+		set_bit(PG_dcache_dirty, &page->flags);
+	else {
+		unsigned long kaddr, flags;
+
+		kaddr = (unsigned long)page_address(page);
+		local_irq_save(flags);
+		cpu_dcache_wbinval_page(kaddr);
+		if (mapping) {
+			unsigned long vaddr, kto;
+
+			vaddr = page->index << PAGE_SHIFT;
+			if (aliasing(vaddr, kaddr)) {
+				kto = kremap0(vaddr, page_to_phys(page));
+				cpu_dcache_wbinval_page(kto);
+				kunmap01(kto);
+			}
+		}
+		local_irq_restore(flags);
+	}
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+		       unsigned long vaddr, void *dst, void *src, int len)
+{
+	unsigned long line_size, start, end, vto, flags;
+
+	local_irq_save(flags);
+	vto = kremap0(vaddr, page_to_phys(page));
+	dst = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
+	memcpy(dst, src, len);
+	if (vma->vm_flags & VM_EXEC) {
+		line_size = L1_cache_info[DCACHE].line_size;
+		start = (unsigned long)dst & ~(line_size - 1);
+		end =
+		    ((unsigned long)dst + len + line_size - 1) & ~(line_size -
+								   1);
+		cpu_cache_wbinval_range(start, end, 1);
+	}
+	kunmap01(vto);
+	local_irq_restore(flags);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+			 unsigned long vaddr, void *dst, void *src, int len)
+{
+	unsigned long vto, flags;
+
+	local_irq_save(flags);
+	vto = kremap0(vaddr, page_to_phys(page));
+	src = (void *)(vto | (vaddr & (PAGE_SIZE - 1)));
+	memcpy(dst, src, len);
+	kunmap01(vto);
+	local_irq_restore(flags);
+}
+
+void flush_anon_page(struct vm_area_struct *vma,
+		     struct page *page, unsigned long vaddr)
+{
+	unsigned long kaddr, flags, ktmp;
+	if (!PageAnon(page))
+		return;
+
+	if (vma->vm_mm != current->active_mm)
+		return;
+
+	local_irq_save(flags);
+	if (vma->vm_flags & VM_EXEC)
+		cpu_icache_inval_page(vaddr & PAGE_MASK);
+	kaddr = (unsigned long)page_address(page);
+	if (aliasing(vaddr, kaddr)) {
+		ktmp = kremap0(vaddr, page_to_phys(page));
+		cpu_dcache_wbinval_page(ktmp);
+		kunmap01(ktmp);
+	}
+	local_irq_restore(flags);
+}
+
+void flush_kernel_dcache_page(struct page *page)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	cpu_dcache_wbinval_page((unsigned long)page_address(page));
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+void flush_kernel_vmap_range(void *addr, int size)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	cpu_dcache_wb_range((unsigned long)addr, (unsigned long)addr +  size);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *addr, int size)
+{
+	unsigned long flags;
+	local_irq_save(flags);
+	cpu_dcache_inval_range((unsigned long)addr, (unsigned long)addr + size);
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
+#endif
diff --git a/arch/nds32/mm/extable.c b/arch/nds32/mm/extable.c
new file mode 100644
index 0000000..db7f0a7
--- /dev/null
+++ b/arch/nds32/mm/extable.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fixup;
+
+	fixup = search_exception_tables(instruction_pointer(regs));
+	if (fixup)
+		regs->ipc = fixup->fixup;
+
+	return fixup != NULL;
+}
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
new file mode 100644
index 0000000..b740534
--- /dev/null
+++ b/arch/nds32/mm/fault.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/ptrace.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/hardirq.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+extern void die(const char *str, struct pt_regs *regs, long err);
+
+/*
+ * This is useful to dump out the page tables associated with
+ * 'addr' in mm 'mm'.
+ */
+void show_pte(struct mm_struct *mm, unsigned long addr)
+{
+	pgd_t *pgd;
+	if (!mm)
+		mm = &init_mm;
+
+	pr_alert("pgd = %p\n", mm->pgd);
+	pgd = pgd_offset(mm, addr);
+	pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd));
+
+	do {
+		pmd_t *pmd;
+
+		if (pgd_none(*pgd))
+			break;
+
+		if (pgd_bad(*pgd)) {
+			pr_alert("(bad)");
+			break;
+		}
+
+		pmd = pmd_offset(pgd, addr);
+#if PTRS_PER_PMD != 1
+		pr_alert(", *pmd=%08lx", pmd_val(*pmd));
+#endif
+
+		if (pmd_none(*pmd))
+			break;
+
+		if (pmd_bad(*pmd)) {
+			pr_alert("(bad)");
+			break;
+		}
+
+		if (IS_ENABLED(CONFIG_HIGHMEM))
+		{
+			pte_t *pte;
+			/* We must not map this if we have highmem enabled */
+			pte = pte_offset_map(pmd, addr);
+			pr_alert(", *pte=%08lx", pte_val(*pte));
+			pte_unmap(pte);
+		}
+	} while (0);
+
+	pr_alert("\n");
+}
+
+void do_page_fault(unsigned long entry, unsigned long addr,
+		   unsigned int error_code, struct pt_regs *regs)
+{
+	struct task_struct *tsk;
+	struct mm_struct *mm;
+	struct vm_area_struct *vma;
+	int si_code;
+	vm_fault_t fault;
+	unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	error_code = error_code & (ITYPE_mskINST | ITYPE_mskETYPE);
+	tsk = current;
+	mm = tsk->mm;
+	si_code = SEGV_MAPERR;
+	/*
+	 * We fault-in kernel-space virtual memory on-demand. The
+	 * 'reference' page table is init_mm.pgd.
+	 *
+	 * NOTE! We MUST NOT take any locks for this case. We may
+	 * be in an interrupt or a critical region, and should
+	 * only copy the information from the master page table,
+	 * nothing more.
+	 */
+	if (addr >= TASK_SIZE) {
+		if (user_mode(regs))
+			goto bad_area_nosemaphore;
+
+		if (addr >= TASK_SIZE && addr < VMALLOC_END
+		    && (entry == ENTRY_PTE_NOT_PRESENT))
+			goto vmalloc_fault;
+		else
+			goto no_context;
+	}
+
+	/* Send a signal to the task for handling the unalignment access. */
+	if (entry == ENTRY_GENERAL_EXCPETION
+	    && error_code == ETYPE_ALIGNMENT_CHECK) {
+		if (user_mode(regs))
+			goto bad_area_nosemaphore;
+		else
+			goto no_context;
+	}
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (unlikely(faulthandler_disabled() || !mm))
+		goto no_context;
+
+	/*
+	 * As per x86, we may deadlock here. However, since the kernel only
+	 * validly references user space from well defined areas of the code,
+	 * we can bug out early if this is from code which shouldn't.
+	 */
+	if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+		if (!user_mode(regs) &&
+		    !search_exception_tables(instruction_pointer(regs)))
+			goto no_context;
+retry:
+		down_read(&mm->mmap_sem);
+	} else {
+		/*
+		 * The above down_read_trylock() might have succeeded in which
+		 * case, we'll have missed the might_sleep() from down_read().
+		 */
+		might_sleep();
+		if (IS_ENABLED(CONFIG_DEBUG_VM)) {
+			if (!user_mode(regs) &&
+			    !search_exception_tables(instruction_pointer(regs)))
+				goto no_context;
+		}
+	}
+
+	vma = find_vma(mm, addr);
+
+	if (unlikely(!vma))
+		goto bad_area;
+
+	if (vma->vm_start <= addr)
+		goto good_area;
+
+	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
+		goto bad_area;
+
+	if (unlikely(expand_stack(vma, addr)))
+		goto bad_area;
+
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it..
+	 */
+
+good_area:
+	si_code = SEGV_ACCERR;
+
+	/* first do some preliminary protection checks */
+	if (entry == ENTRY_PTE_NOT_PRESENT) {
+		if (error_code & ITYPE_mskINST)
+			mask = VM_EXEC;
+		else {
+			mask = VM_READ | VM_WRITE;
+			if (vma->vm_flags & VM_WRITE)
+				flags |= FAULT_FLAG_WRITE;
+		}
+	} else if (entry == ENTRY_TLB_MISC) {
+		switch (error_code & ITYPE_mskETYPE) {
+		case RD_PROT:
+			mask = VM_READ;
+			break;
+		case WRT_PROT:
+			mask = VM_WRITE;
+			flags |= FAULT_FLAG_WRITE;
+			break;
+		case NOEXEC:
+			mask = VM_EXEC;
+			break;
+		case PAGE_MODIFY:
+			mask = VM_WRITE;
+			flags |= FAULT_FLAG_WRITE;
+			break;
+		case ACC_BIT:
+			BUG();
+		default:
+			break;
+		}
+
+	}
+	if (!(vma->vm_flags & mask))
+		goto bad_area;
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+
+	fault = handle_mm_fault(vma, addr, flags);
+
+	/*
+	 * If we need to retry but a fatal signal is pending, handle the
+	 * signal first. We do not need to release the mmap_sem because it
+	 * would already be released in __lock_page_or_retry in mm/filemap.c.
+	 */
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+		if (!user_mode(regs))
+			goto no_context;
+		return;
+	}
+
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		else
+			goto bad_area;
+	}
+
+	/*
+	 * Major/minor page fault accounting is only done on the initial
+	 * attempt. If we go through a retry, it is extremely likely that the
+	 * page will be found in page cache at that point.
+	 */
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			tsk->maj_flt++;
+		else
+			tsk->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			flags |= FAULT_FLAG_TRIED;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+			goto retry;
+		}
+	}
+
+	up_read(&mm->mmap_sem);
+	return;
+
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+
+	/* User mode accesses just cause a SIGSEGV */
+
+	if (user_mode(regs)) {
+		tsk->thread.address = addr;
+		tsk->thread.error_code = error_code;
+		tsk->thread.trap_no = entry;
+		force_sig_fault(SIGSEGV, si_code, (void __user *)addr, tsk);
+		return;
+	}
+
+no_context:
+
+	/* Are we prepared to handle this kernel fault?
+	 *
+	 * (The kernel has valid exception-points in the source
+	 *  when it acesses user-memory. When it fails in one
+	 *  of those points, we find it in a table and do a jump
+	 *  to some fixup code that loads an appropriate error
+	 *  code)
+	 */
+
+	{
+		const struct exception_table_entry *entry;
+
+		if ((entry =
+		     search_exception_tables(instruction_pointer(regs))) !=
+		    NULL) {
+			/* Adjust the instruction pointer in the stackframe */
+			instruction_pointer(regs) = entry->fixup;
+			return;
+		}
+	}
+
+	/*
+	 * Oops. The kernel tried to access some bad page. We'll have to
+	 * terminate things with extreme prejudice.
+	 */
+
+	bust_spinlocks(1);
+	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
+		 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+		 "paging request", addr);
+
+	show_pte(mm, addr);
+	die("Oops", regs, error_code);
+	bust_spinlocks(0);
+	do_exit(SIGKILL);
+
+	return;
+
+	/*
+	 * We ran out of memory, or some other thing happened to us that made
+	 * us unable to handle the page fault gracefully.
+	 */
+
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (!user_mode(regs))
+		goto no_context;
+	pagefault_out_of_memory();
+	return;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (!user_mode(regs))
+		goto no_context;
+
+	/*
+	 * Send a sigbus
+	 */
+	tsk->thread.address = addr;
+	tsk->thread.error_code = error_code;
+	tsk->thread.trap_no = entry;
+	force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, tsk);
+
+	return;
+
+vmalloc_fault:
+	{
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 *
+		 * Use current_pgd instead of tsk->active_mm->pgd
+		 * since the latter might be unavailable if this
+		 * code is executed in a misfortunately run irq
+		 * (like inside schedule() between switch_mm and
+		 *  switch_to...).
+		 */
+
+		unsigned int index = pgd_index(addr);
+		pgd_t *pgd, *pgd_k;
+		pud_t *pud, *pud_k;
+		pmd_t *pmd, *pmd_k;
+		pte_t *pte_k;
+
+		pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index;
+		pgd_k = init_mm.pgd + index;
+
+		if (!pgd_present(*pgd_k))
+			goto no_context;
+
+		pud = pud_offset(pgd, addr);
+		pud_k = pud_offset(pgd_k, addr);
+		if (!pud_present(*pud_k))
+			goto no_context;
+
+		pmd = pmd_offset(pud, addr);
+		pmd_k = pmd_offset(pud_k, addr);
+		if (!pmd_present(*pmd_k))
+			goto no_context;
+
+		if (!pmd_present(*pmd))
+			set_pmd(pmd, *pmd_k);
+		else
+			BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+
+		/*
+		 * Since the vmalloc area is global, we don't
+		 * need to copy individual PTE's, it is enough to
+		 * copy the pgd pointer into the pte page of the
+		 * root task. If that is there, we'll find our pte if
+		 * it exists.
+		 */
+
+		/* Make sure the actual PTE exists as well to
+		 * catch kernel vmalloc-area accesses to non-mapped
+		 * addres. If we don't do this, this will just
+		 * silently loop forever.
+		 */
+
+		pte_k = pte_offset_kernel(pmd_k, addr);
+		if (!pte_present(*pte_k))
+			goto no_context;
+
+		return;
+	}
+}
diff --git a/arch/nds32/mm/highmem.c b/arch/nds32/mm/highmem.c
new file mode 100644
index 0000000..e17cb8a
--- /dev/null
+++ b/arch/nds32/mm/highmem.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/bootmem.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+
+void *kmap(struct page *page)
+{
+	unsigned long vaddr;
+	might_sleep();
+	if (!PageHighMem(page))
+		return page_address(page);
+	vaddr = (unsigned long)kmap_high(page);
+	return (void *)vaddr;
+}
+
+EXPORT_SYMBOL(kmap);
+
+void kunmap(struct page *page)
+{
+	BUG_ON(in_interrupt());
+	if (!PageHighMem(page))
+		return;
+	kunmap_high(page);
+}
+
+EXPORT_SYMBOL(kunmap);
+
+void *kmap_atomic(struct page *page)
+{
+	unsigned int idx;
+	unsigned long vaddr, pte;
+	int type;
+	pte_t *ptep;
+
+	preempt_disable();
+	pagefault_disable();
+	if (!PageHighMem(page))
+		return page_address(page);
+
+	type = kmap_atomic_idx_push();
+
+	idx = type + KM_TYPE_NR * smp_processor_id();
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+	pte = (page_to_pfn(page) << PAGE_SHIFT) | (PAGE_KERNEL);
+	ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+	set_pte(ptep, pte);
+
+	__nds32__tlbop_inv(vaddr);
+	__nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN);
+	__nds32__tlbop_rwr(pte);
+	__nds32__isb();
+	return (void *)vaddr;
+}
+
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+	if (kvaddr >= (void *)FIXADDR_START) {
+		unsigned long vaddr = (unsigned long)kvaddr;
+		pte_t *ptep;
+		kmap_atomic_idx_pop();
+		__nds32__tlbop_inv(vaddr);
+		__nds32__isb();
+		ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+		set_pte(ptep, 0);
+	}
+	pagefault_enable();
+	preempt_enable();
+}
+
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c
new file mode 100644
index 0000000..c713d2a
--- /dev/null
+++ b/arch/nds32/mm/init.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 1995-2005 Russell King
+// Copyright (C) 2012 ARM Ltd.
+// Copyright (C) 2013-2017 Andes Technology Corporation
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/swap.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mman.h>
+#include <linux/nodemask.h>
+#include <linux/initrd.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/tlb.h>
+#include <asm/page.h>
+
+DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+DEFINE_SPINLOCK(anon_alias_lock);
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
+extern unsigned long phys_initrd_start;
+extern unsigned long phys_initrd_size;
+
+/*
+ * empty_zero_page is a special page that is used for
+ * zero-initialized data and COW.
+ */
+struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
+
+static void __init zone_sizes_init(void)
+{
+	unsigned long zones_size[MAX_NR_ZONES];
+
+	/* Clear the zone sizes */
+	memset(zones_size, 0, sizeof(zones_size));
+
+	zones_size[ZONE_NORMAL] = max_low_pfn;
+#ifdef CONFIG_HIGHMEM
+	zones_size[ZONE_HIGHMEM] = max_pfn;
+#endif
+	free_area_init(zones_size);
+
+}
+
+/*
+ * Map all physical memory under high_memory into kernel's address space.
+ *
+ * This is explicitly coded for two-level page tables, so if you need
+ * something else then this needs to change.
+ */
+static void __init map_ram(void)
+{
+	unsigned long v, p, e;
+	pgd_t *pge;
+	pud_t *pue;
+	pmd_t *pme;
+	pte_t *pte;
+	/* These mark extents of read-only kernel pages...
+	 * ...from vmlinux.lds.S
+	 */
+
+	p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
+	e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
+
+	v = (u32) __va(p);
+	pge = pgd_offset_k(v);
+
+	while (p < e) {
+		int j;
+		pue = pud_offset(pge, v);
+		pme = pmd_offset(pue, v);
+
+		if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
+			panic("%s: Kernel hardcoded for "
+			      "two-level page tables", __func__);
+		}
+
+		/* Alloc one page for holding PTE's... */
+		pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+		memset(pte, 0, PAGE_SIZE);
+		set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
+
+		/* Fill the newly allocated page with PTE'S */
+		for (j = 0; p < e && j < PTRS_PER_PTE;
+		     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
+			/* Create mapping between p and v. */
+			/* TODO: more fine grant for page access permission */
+			set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
+		}
+
+		pge++;
+	}
+}
+static pmd_t *fixmap_pmd_p;
+static void __init fixedrange_init(void)
+{
+	unsigned long vaddr;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+#ifdef CONFIG_HIGHMEM
+	pte_t *pte;
+#endif /* CONFIG_HIGHMEM */
+
+	/*
+	 * Fixed mappings:
+	 */
+	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+	pgd = swapper_pg_dir + pgd_index(vaddr);
+	pud = pud_offset(pgd, vaddr);
+	pmd = pmd_offset(pud, vaddr);
+	fixmap_pmd_p = (pmd_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+	memset(fixmap_pmd_p, 0, PAGE_SIZE);
+	set_pmd(pmd, __pmd(__pa(fixmap_pmd_p) + _PAGE_KERNEL_TABLE));
+
+#ifdef CONFIG_HIGHMEM
+	/*
+	 * Permanent kmaps:
+	 */
+	vaddr = PKMAP_BASE;
+
+	pgd = swapper_pg_dir + pgd_index(vaddr);
+	pud = pud_offset(pgd, vaddr);
+	pmd = pmd_offset(pud, vaddr);
+	pte = (pte_t *) __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+	memset(pte, 0, PAGE_SIZE);
+	set_pmd(pmd, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
+	pkmap_page_table = pte;
+#endif /* CONFIG_HIGHMEM */
+}
+
+/*
+ * paging_init() sets up the page tables, initialises the zone memory
+ * maps, and sets up the zero page, bad page and bad page tables.
+ */
+void __init paging_init(void)
+{
+	int i;
+	void *zero_page;
+
+	pr_info("Setting up paging and PTEs.\n");
+	/* clear out the init_mm.pgd that will contain the kernel's mappings */
+	for (i = 0; i < PTRS_PER_PGD; i++)
+		swapper_pg_dir[i] = __pgd(1);
+
+	map_ram();
+
+	fixedrange_init();
+
+	/* allocate space for empty_zero_page */
+	zero_page = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
+	memset(zero_page, 0, PAGE_SIZE);
+	zone_sizes_init();
+
+	empty_zero_page = virt_to_page(zero_page);
+	flush_dcache_page(empty_zero_page);
+}
+
+static inline void __init free_highmem(void)
+{
+#ifdef CONFIG_HIGHMEM
+	unsigned long pfn;
+	for (pfn = PFN_UP(__pa(high_memory)); pfn < max_pfn; pfn++) {
+		phys_addr_t paddr = (phys_addr_t) pfn << PAGE_SHIFT;
+		if (!memblock_is_reserved(paddr))
+			free_highmem_page(pfn_to_page(pfn));
+	}
+#endif
+}
+
+static void __init set_max_mapnr_init(void)
+{
+	max_mapnr = max_pfn;
+}
+
+/*
+ * mem_init() marks the free areas in the mem_map and tells us how much
+ * memory is free.  This is done after various parts of the system have
+ * claimed their memory after the kernel image.
+ */
+void __init mem_init(void)
+{
+	phys_addr_t memory_start = memblock_start_of_DRAM();
+	BUG_ON(!mem_map);
+	set_max_mapnr_init();
+
+	free_highmem();
+
+	/* this will put all low memory onto the freelists */
+	free_all_bootmem();
+	mem_init_print_info(NULL);
+
+	pr_info("virtual kernel memory layout:\n"
+		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#ifdef CONFIG_HIGHMEM
+		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+#endif
+		"    consist : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB)\n"
+		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
+		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
+		FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10,
+#ifdef CONFIG_HIGHMEM
+		PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+		(LAST_PKMAP * PAGE_SIZE) >> 10,
+#endif
+		CONSISTENT_BASE, CONSISTENT_END,
+		((CONSISTENT_END) - (CONSISTENT_BASE)) >> 20, VMALLOC_START,
+		(unsigned long)VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20,
+		(unsigned long)__va(memory_start), (unsigned long)high_memory,
+		((unsigned long)high_memory -
+		 (unsigned long)__va(memory_start)) >> 20,
+		(unsigned long)&__init_begin, (unsigned long)&__init_end,
+		((unsigned long)&__init_end -
+		 (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext,
+		(unsigned long)&_edata,
+		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
+		(unsigned long)&_text, (unsigned long)&_etext,
+		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
+
+	/*
+	 * Check boundaries twice: Some fundamental inconsistencies can
+	 * be detected at build time already.
+	 */
+#ifdef CONFIG_HIGHMEM
+	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
+	BUILD_BUG_ON((CONSISTENT_END) > PKMAP_BASE);
+#endif
+	BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+	BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
+
+#ifdef CONFIG_HIGHMEM
+	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > FIXADDR_START);
+	BUG_ON(CONSISTENT_END > PKMAP_BASE);
+#endif
+	BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+	BUG_ON(VMALLOC_START >= VMALLOC_END);
+	BUG_ON((unsigned long)high_memory > VMALLOC_START);
+
+	return;
+}
+
+void free_initmem(void)
+{
+	free_initmem_default(-1);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
+
+void __set_fixmap(enum fixed_addresses idx,
+			       phys_addr_t phys, pgprot_t flags)
+{
+	unsigned long addr = __fix_to_virt(idx);
+	pte_t *pte;
+
+	BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+	pte = (pte_t *)&fixmap_pmd_p[pte_index(addr)];;
+
+	if (pgprot_val(flags)) {
+		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+	} else {
+		pte_clear(&init_mm, addr, pte);
+		flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+	}
+}
diff --git a/arch/nds32/mm/ioremap.c b/arch/nds32/mm/ioremap.c
new file mode 100644
index 0000000..690140b
--- /dev/null
+++ b/arch/nds32/mm/ioremap.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
+
+static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
+				      void *caller)
+{
+	struct vm_struct *area;
+	unsigned long addr, offset, last_addr;
+	pgprot_t prot;
+
+	/* Don't allow wraparound or zero size */
+	last_addr = phys_addr + size - 1;
+	if (!size || last_addr < phys_addr)
+		return NULL;
+
+	/*
+	 * Mappings have to be page-aligned
+	 */
+	offset = phys_addr & ~PAGE_MASK;
+	phys_addr &= PAGE_MASK;
+	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+	/*
+	 * Ok, go for it..
+	 */
+	area = get_vm_area_caller(size, VM_IOREMAP, caller);
+	if (!area)
+		return NULL;
+
+	area->phys_addr = phys_addr;
+	addr = (unsigned long)area->addr;
+	prot = __pgprot(_PAGE_V | _PAGE_M_KRW | _PAGE_D |
+			_PAGE_G | _PAGE_C_DEV);
+	if (ioremap_page_range(addr, addr + size, phys_addr, prot)) {
+		vunmap((void *)addr);
+		return NULL;
+	}
+	return (__force void __iomem *)(offset + (char *)addr);
+
+}
+
+void __iomem *ioremap(phys_addr_t phys_addr, size_t size)
+{
+	return __ioremap_caller(phys_addr, size,
+				__builtin_return_address(0));
+}
+
+EXPORT_SYMBOL(ioremap);
+
+void iounmap(volatile void __iomem * addr)
+{
+	vunmap((void *)(PAGE_MASK & (unsigned long)addr));
+}
+
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/nds32/mm/mm-nds32.c b/arch/nds32/mm/mm-nds32.c
new file mode 100644
index 0000000..3b43798
--- /dev/null
+++ b/arch/nds32/mm/mm-nds32.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/init_task.h>
+#include <asm/pgalloc.h>
+
+#define FIRST_KERNEL_PGD_NR	(USER_PTRS_PER_PGD)
+
+/*
+ * need to get a page for level 1
+ */
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *new_pgd, *init_pgd;
+	int i;
+
+	new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0);
+	if (!new_pgd)
+		return NULL;
+	for (i = 0; i < PTRS_PER_PGD; i++) {
+		(*new_pgd) = 1;
+		new_pgd++;
+	}
+	new_pgd -= PTRS_PER_PGD;
+
+	init_pgd = pgd_offset_k(0);
+
+	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
+	       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
+
+	cpu_dcache_wb_range((unsigned long)new_pgd,
+			    (unsigned long)new_pgd +
+			    PTRS_PER_PGD * sizeof(pgd_t));
+	inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
+			    NR_PAGETABLE);
+
+	return new_pgd;
+}
+
+void pgd_free(struct mm_struct *mm, pgd_t * pgd)
+{
+	pmd_t *pmd;
+	struct page *pte;
+
+	if (!pgd)
+		return;
+
+	pmd = (pmd_t *) pgd;
+	if (pmd_none(*pmd))
+		goto free;
+	if (pmd_bad(*pmd)) {
+		pmd_ERROR(*pmd);
+		pmd_clear(pmd);
+		goto free;
+	}
+
+	pte = pmd_page(*pmd);
+	pmd_clear(pmd);
+	dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
+	pte_free(mm, pte);
+	mm_dec_nr_ptes(mm);
+	pmd_free(mm, pmd);
+free:
+	free_pages((unsigned long)pgd, 0);
+}
+
+/*
+ * In order to soft-boot, we need to insert a 1:1 mapping in place of
+ * the user-mode pages.  This will then ensure that we have predictable
+ * results when turning the mmu off
+ */
+void setup_mm_for_reboot(char mode)
+{
+	unsigned long pmdval;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	int i;
+
+	if (current->mm && current->mm->pgd)
+		pgd = current->mm->pgd;
+	else
+		pgd = init_mm.pgd;
+
+	for (i = 0; i < USER_PTRS_PER_PGD; i++) {
+		pmdval = (i << PGDIR_SHIFT);
+		pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT);
+		set_pmd(pmd, __pmd(pmdval));
+	}
+}
diff --git a/arch/nds32/mm/mmap.c b/arch/nds32/mm/mmap.c
new file mode 100644
index 0000000..c206b31
--- /dev/null
+++ b/arch/nds32/mm/mmap.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/sched.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+
+#define COLOUR_ALIGN(addr,pgoff)		\
+	((((addr)+SHMLBA-1)&~(SHMLBA-1)) +	\
+	 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
+
+/*
+ * We need to ensure that shared mappings are correctly aligned to
+ * avoid aliasing issues with VIPT caches.  We need to ensure that
+ * a specific page of an object is always mapped at a multiple of
+ * SHMLBA bytes.
+ *
+ * We unconditionally provide this function for all cases, however
+ * in the VIVT case, we optimise out the alignment rules.
+ */
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+		       unsigned long len, unsigned long pgoff,
+		       unsigned long flags)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	int do_align = 0;
+	struct vm_unmapped_area_info info;
+	int aliasing = 0;
+	if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
+		aliasing = 1;
+
+	/*
+	 * We only need to do colour alignment if either the I or D
+	 * caches alias.
+	 */
+	if (aliasing)
+		do_align = filp || (flags & MAP_SHARED);
+
+	/*
+	 * We enforce the MAP_FIXED case.
+	 */
+	if (flags & MAP_FIXED) {
+		if (aliasing && flags & MAP_SHARED &&
+		    (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (len > TASK_SIZE)
+		return -ENOMEM;
+
+	if (addr) {
+		if (do_align)
+			addr = COLOUR_ALIGN(addr, pgoff);
+		else
+			addr = PAGE_ALIGN(addr);
+
+		vma = find_vma(mm, addr);
+		if (TASK_SIZE - len >= addr &&
+		    (!vma || addr + len <= vma->vm_start))
+			return addr;
+	}
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = mm->mmap_base;
+	info.high_limit = TASK_SIZE;
+	info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+	info.align_offset = pgoff << PAGE_SHIFT;
+	return vm_unmapped_area(&info);
+}
diff --git a/arch/nds32/mm/proc.c b/arch/nds32/mm/proc.c
new file mode 100644
index 0000000..ba80992
--- /dev/null
+++ b/arch/nds32/mm/proc.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <asm/nds32.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/l2_cache.h>
+#include <nds32_intrinsic.h>
+
+#include <asm/cache_info.h>
+extern struct cache_info L1_cache_info[2];
+
+int va_kernel_present(unsigned long addr)
+{
+	pmd_t *pmd;
+	pte_t *ptep, pte;
+
+	pmd = pmd_offset(pgd_offset_k(addr), addr);
+	if (!pmd_none(*pmd)) {
+		ptep = pte_offset_map(pmd, addr);
+		pte = *ptep;
+		if (pte_present(pte))
+			return pte;
+	}
+	return 0;
+}
+
+pte_t va_present(struct mm_struct * mm, unsigned long addr)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *ptep, pte;
+
+	pgd = pgd_offset(mm, addr);
+	if (!pgd_none(*pgd)) {
+		pud = pud_offset(pgd, addr);
+		if (!pud_none(*pud)) {
+			pmd = pmd_offset(pud, addr);
+			if (!pmd_none(*pmd)) {
+				ptep = pte_offset_map(pmd, addr);
+				pte = *ptep;
+				if (pte_present(pte))
+					return pte;
+			}
+		}
+	}
+	return 0;
+
+}
+
+int va_readable(struct pt_regs *regs, unsigned long addr)
+{
+	struct mm_struct *mm = current->mm;
+	pte_t pte;
+	int ret = 0;
+
+	if (user_mode(regs)) {
+		/* user mode */
+		pte = va_present(mm, addr);
+		if (!pte && pte_read(pte))
+			ret = 1;
+	} else {
+		/* superuser mode is always readable, so we can only
+		 * check it is present or not*/
+		return (! !va_kernel_present(addr));
+	}
+	return ret;
+}
+
+int va_writable(struct pt_regs *regs, unsigned long addr)
+{
+	struct mm_struct *mm = current->mm;
+	pte_t pte;
+	int ret = 0;
+
+	if (user_mode(regs)) {
+		/* user mode */
+		pte = va_present(mm, addr);
+		if (!pte && pte_write(pte))
+			ret = 1;
+	} else {
+		/* superuser mode */
+		pte = va_kernel_present(addr);
+		if (!pte && pte_kernel_write(pte))
+			ret = 1;
+	}
+	return ret;
+}
+
+/*
+ * All
+ */
+void cpu_icache_inval_all(void)
+{
+	unsigned long end, line_size;
+
+	line_size = L1_cache_info[ICACHE].line_size;
+	end =
+	    line_size * L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].sets;
+
+	do {
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_IX_INVAL"::"r" (end));
+	} while (end > 0);
+	__nds32__isb();
+}
+
+void cpu_dcache_inval_all(void)
+{
+	__nds32__cctl_l1d_invalall();
+}
+
+#ifdef CONFIG_CACHE_L2
+void dcache_wb_all_level(void)
+{
+	unsigned long flags, cmd;
+	local_irq_save(flags);
+	__nds32__cctl_l1d_wball_alvl();
+	/* Section 1: Ensure the section 2 & 3 program code execution after */
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+
+	/* Section 2: Confirm the writeback all level is done in CPU and L2C */
+	cmd = CCTL_CMD_L2_SYNC;
+	L2_CMD_RDY();
+	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+	L2_CMD_RDY();
+
+	/* Section 3: Writeback whole L2 cache */
+	cmd = CCTL_ALL_CMD | CCTL_CMD_L2_IX_WB;
+	L2_CMD_RDY();
+	L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+	L2_CMD_RDY();
+	__nds32__msync_all();
+	local_irq_restore(flags);
+}
+EXPORT_SYMBOL(dcache_wb_all_level);
+#endif
+
+void cpu_dcache_wb_all(void)
+{
+	__nds32__cctl_l1d_wball_one_lvl();
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_dcache_wbinval_all(void)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	unsigned long flags;
+	local_irq_save(flags);
+#endif
+	cpu_dcache_wb_all();
+	cpu_dcache_inval_all();
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	local_irq_restore(flags);
+#endif
+}
+
+/*
+ * Page
+ */
+void cpu_icache_inval_page(unsigned long start)
+{
+	unsigned long line_size, end;
+
+	line_size = L1_cache_info[ICACHE].line_size;
+	end = start + PAGE_SIZE;
+
+	do {
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (end));
+	} while (end != start);
+	__nds32__isb();
+}
+
+void cpu_dcache_inval_page(unsigned long start)
+{
+	unsigned long line_size, end;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+	end = start + PAGE_SIZE;
+
+	do {
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+	} while (end != start);
+}
+
+void cpu_dcache_wb_page(unsigned long start)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	unsigned long line_size, end;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+	end = start + PAGE_SIZE;
+
+	do {
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+		end -= line_size;
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+	} while (end != start);
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+#endif
+}
+
+void cpu_dcache_wbinval_page(unsigned long start)
+{
+	unsigned long line_size, end;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+	end = start + PAGE_SIZE;
+
+	do {
+		end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+		end -= line_size;
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (end));
+#endif
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (end));
+	} while (end != start);
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_cache_wbinval_page(unsigned long page, int flushi)
+{
+	cpu_dcache_wbinval_page(page);
+	if (flushi)
+		cpu_icache_inval_page(page);
+}
+
+/*
+ * Range
+ */
+void cpu_icache_inval_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+
+	line_size = L1_cache_info[ICACHE].line_size;
+
+	while (end > start) {
+		__asm__ volatile ("\n\tcctl %0, L1I_VA_INVAL"::"r" (start));
+		start += line_size;
+	}
+	__nds32__isb();
+}
+
+void cpu_dcache_inval_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+
+	while (end > start) {
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
+		start += line_size;
+	}
+}
+
+void cpu_dcache_wb_range(unsigned long start, unsigned long end)
+{
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+	unsigned long line_size;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+
+	while (end > start) {
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
+		start += line_size;
+	}
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+#endif
+}
+
+void cpu_dcache_wbinval_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+
+	while (end > start) {
+#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_WB"::"r" (start));
+#endif
+		__asm__ volatile ("\n\tcctl %0, L1D_VA_INVAL"::"r" (start));
+		start += line_size;
+	}
+	__nds32__cctlidx_read(NDS32_CCTL_L1D_IX_RWD,0);
+}
+
+void cpu_cache_wbinval_range(unsigned long start, unsigned long end, int flushi)
+{
+	unsigned long line_size, align_start, align_end;
+
+	line_size = L1_cache_info[DCACHE].line_size;
+	align_start = start & ~(line_size - 1);
+	align_end = (end + line_size - 1) & ~(line_size - 1);
+	cpu_dcache_wbinval_range(align_start, align_end);
+
+	if (flushi) {
+		line_size = L1_cache_info[ICACHE].line_size;
+		align_start = start & ~(line_size - 1);
+		align_end = (end + line_size - 1) & ~(line_size - 1);
+		cpu_icache_inval_range(align_start, align_end);
+	}
+}
+
+void cpu_cache_wbinval_range_check(struct vm_area_struct *vma,
+				   unsigned long start, unsigned long end,
+				   bool flushi, bool wbd)
+{
+	unsigned long line_size, t_start, t_end;
+
+	if (!flushi && !wbd)
+		return;
+	line_size = L1_cache_info[DCACHE].line_size;
+	start = start & ~(line_size - 1);
+	end = (end + line_size - 1) & ~(line_size - 1);
+
+	if ((end - start) > (8 * PAGE_SIZE)) {
+		if (wbd)
+			cpu_dcache_wbinval_all();
+		if (flushi)
+			cpu_icache_inval_all();
+		return;
+	}
+
+	t_start = (start + PAGE_SIZE) & PAGE_MASK;
+	t_end = ((end - 1) & PAGE_MASK);
+
+	if ((start & PAGE_MASK) == t_end) {
+		if (va_present(vma->vm_mm, start)) {
+			if (wbd)
+				cpu_dcache_wbinval_range(start, end);
+			if (flushi)
+				cpu_icache_inval_range(start, end);
+		}
+		return;
+	}
+
+	if (va_present(vma->vm_mm, start)) {
+		if (wbd)
+			cpu_dcache_wbinval_range(start, t_start);
+		if (flushi)
+			cpu_icache_inval_range(start, t_start);
+	}
+
+	if (va_present(vma->vm_mm, end - 1)) {
+		if (wbd)
+			cpu_dcache_wbinval_range(t_end, end);
+		if (flushi)
+			cpu_icache_inval_range(t_end, end);
+	}
+
+	while (t_start < t_end) {
+		if (va_present(vma->vm_mm, t_start)) {
+			if (wbd)
+				cpu_dcache_wbinval_page(t_start);
+			if (flushi)
+				cpu_icache_inval_page(t_start);
+		}
+		t_start += PAGE_SIZE;
+	}
+}
+
+#ifdef CONFIG_CACHE_L2
+static inline void cpu_l2cache_op(unsigned long start, unsigned long end, unsigned long op)
+{
+	if (atl2c_base) {
+		unsigned long p_start = __pa(start);
+		unsigned long p_end = __pa(end);
+		unsigned long cmd;
+		unsigned long line_size;
+		/* TODO Can Use PAGE Mode to optimize if range large than PAGE_SIZE */
+		line_size = L2_CACHE_LINE_SIZE();
+		p_start = p_start & (~(line_size - 1));
+		p_end = (p_end + line_size - 1) & (~(line_size - 1));
+		cmd =
+		    (p_start & ~(line_size - 1)) | op |
+		    CCTL_SINGLE_CMD;
+		do {
+			L2_CMD_RDY();
+			L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+			cmd += line_size;
+			p_start += line_size;
+		} while (p_end > p_start);
+		cmd = CCTL_CMD_L2_SYNC;
+		L2_CMD_RDY();
+		L2C_W_REG(L2_CCTL_CMD_OFF, cmd);
+		L2_CMD_RDY();
+	}
+}
+#else
+#define cpu_l2cache_op(start,end,op) do { } while (0)
+#endif
+/*
+ * DMA
+ */
+void cpu_dma_wb_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+	unsigned long flags;
+	line_size = L1_cache_info[DCACHE].line_size;
+	start = start & (~(line_size - 1));
+	end = (end + line_size - 1) & (~(line_size - 1));
+	if (unlikely(start == end))
+		return;
+
+	local_irq_save(flags);
+	cpu_dcache_wb_range(start, end);
+	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WB);
+	__nds32__msync_all();
+	local_irq_restore(flags);
+}
+
+void cpu_dma_inval_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+	unsigned long old_start = start;
+	unsigned long old_end = end;
+	unsigned long flags;
+	line_size = L1_cache_info[DCACHE].line_size;
+	start = start & (~(line_size - 1));
+	end = (end + line_size - 1) & (~(line_size - 1));
+	if (unlikely(start == end))
+		return;
+	local_irq_save(flags);
+	if (start != old_start) {
+		cpu_dcache_wbinval_range(start, start + line_size);
+		cpu_l2cache_op(start, start + line_size, CCTL_CMD_L2_PA_WBINVAL);
+	}
+	if (end != old_end) {
+		cpu_dcache_wbinval_range(end - line_size, end);
+		cpu_l2cache_op(end - line_size, end, CCTL_CMD_L2_PA_WBINVAL);
+	}
+	cpu_dcache_inval_range(start, end);
+	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_INVAL);
+	__nds32__msync_all();
+	local_irq_restore(flags);
+
+}
+
+void cpu_dma_wbinval_range(unsigned long start, unsigned long end)
+{
+	unsigned long line_size;
+	unsigned long flags;
+	line_size = L1_cache_info[DCACHE].line_size;
+	start = start & (~(line_size - 1));
+	end = (end + line_size - 1) & (~(line_size - 1));
+	if (unlikely(start == end))
+		return;
+
+	local_irq_save(flags);
+	cpu_dcache_wbinval_range(start, end);
+	cpu_l2cache_op(start, end, CCTL_CMD_L2_PA_WBINVAL);
+	__nds32__msync_all();
+	local_irq_restore(flags);
+}
+
+void cpu_proc_init(void)
+{
+}
+
+void cpu_proc_fin(void)
+{
+}
+
+void cpu_do_idle(void)
+{
+	__nds32__standby_no_wake_grant();
+}
+
+void cpu_reset(unsigned long reset)
+{
+	u32 tmp;
+	GIE_DISABLE();
+	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
+	tmp &= ~(CACHE_CTL_mskIC_EN | CACHE_CTL_mskDC_EN);
+	__nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
+	cpu_dcache_wbinval_all();
+	cpu_icache_inval_all();
+
+	__asm__ __volatile__("jr.toff %0\n\t"::"r"(reset));
+}
+
+void cpu_switch_mm(struct mm_struct *mm)
+{
+	unsigned long cid;
+	cid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+	cid = (cid & ~TLB_MISC_mskCID) | mm->context.id;
+	__nds32__mtsr_dsb(cid, NDS32_SR_TLB_MISC);
+	__nds32__mtsr_isb(__pa(mm->pgd), NDS32_SR_L1_PPTB);
+}
diff --git a/arch/nds32/mm/tlb.c b/arch/nds32/mm/tlb.c
new file mode 100644
index 0000000..dd41f5e
--- /dev/null
+++ b/arch/nds32/mm/tlb.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2005-2017 Andes Technology Corporation
+
+#include <linux/spinlock_types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/nds32.h>
+#include <nds32_intrinsic.h>
+
+unsigned int cpu_last_cid = { TLB_MISC_mskCID + (2 << TLB_MISC_offCID) };
+
+DEFINE_SPINLOCK(cid_lock);
+
+void local_flush_tlb_range(struct vm_area_struct *vma,
+			   unsigned long start, unsigned long end)
+{
+	unsigned long flags, ocid, ncid;
+
+	if ((end - start) > 0x400000) {
+		__nds32__tlbop_flua();
+		__nds32__isb();
+		return;
+	}
+
+	spin_lock_irqsave(&cid_lock, flags);
+	ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+	ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
+	__nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
+	while (start < end) {
+		__nds32__tlbop_inv(start);
+		__nds32__isb();
+		start += PAGE_SIZE;
+	}
+	__nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
+	spin_unlock_irqrestore(&cid_lock, flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+	unsigned long flags, ocid, ncid;
+
+	spin_lock_irqsave(&cid_lock, flags);
+	ocid = __nds32__mfsr(NDS32_SR_TLB_MISC);
+	ncid = (ocid & ~TLB_MISC_mskCID) | vma->vm_mm->context.id;
+	__nds32__mtsr_dsb(ncid, NDS32_SR_TLB_MISC);
+	__nds32__tlbop_inv(addr);
+	__nds32__isb();
+	__nds32__mtsr_dsb(ocid, NDS32_SR_TLB_MISC);
+	spin_unlock_irqrestore(&cid_lock, flags);
+}