v4.19.13 snapshot.
diff --git a/arch/nios2/mm/Makefile b/arch/nios2/mm/Makefile
new file mode 100644
index 0000000..9d37faf
--- /dev/null
+++ b/arch/nios2/mm/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Nios2-specific parts of the memory manager.
+#
+
+obj-y	+= cacheflush.o
+obj-y	+= dma-mapping.o
+obj-y	+= extable.o
+obj-y	+= fault.o
+obj-y	+= init.o
+obj-y	+= ioremap.o
+obj-y	+= mmu_context.o
+obj-y	+= pgtable.o
+obj-y	+= tlb.o
+obj-y	+= uaccess.o
diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c
new file mode 100644
index 0000000..506f6e1
--- /dev/null
+++ b/arch/nios2/mm/cacheflush.c
@@ -0,0 +1,269 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+
+#include <asm/cacheflush.h>
+#include <asm/cpuinfo.h>
+
+static void __flush_dcache(unsigned long start, unsigned long end)
+{
+	unsigned long addr;
+
+	start &= ~(cpuinfo.dcache_line_size - 1);
+	end += (cpuinfo.dcache_line_size - 1);
+	end &= ~(cpuinfo.dcache_line_size - 1);
+
+	if (end > start + cpuinfo.dcache_size)
+		end = start + cpuinfo.dcache_size;
+
+	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
+		__asm__ __volatile__ ("   flushd 0(%0)\n"
+					: /* Outputs */
+					: /* Inputs  */ "r"(addr)
+					/* : No clobber */);
+	}
+}
+
+static void __invalidate_dcache(unsigned long start, unsigned long end)
+{
+	unsigned long addr;
+
+	start &= ~(cpuinfo.dcache_line_size - 1);
+	end += (cpuinfo.dcache_line_size - 1);
+	end &= ~(cpuinfo.dcache_line_size - 1);
+
+	for (addr = start; addr < end; addr += cpuinfo.dcache_line_size) {
+		__asm__ __volatile__ ("   initda 0(%0)\n"
+					: /* Outputs */
+					: /* Inputs  */ "r"(addr)
+					/* : No clobber */);
+	}
+}
+
+static void __flush_icache(unsigned long start, unsigned long end)
+{
+	unsigned long addr;
+
+	start &= ~(cpuinfo.icache_line_size - 1);
+	end += (cpuinfo.icache_line_size - 1);
+	end &= ~(cpuinfo.icache_line_size - 1);
+
+	if (end > start + cpuinfo.icache_size)
+		end = start + cpuinfo.icache_size;
+
+	for (addr = start; addr < end; addr += cpuinfo.icache_line_size) {
+		__asm__ __volatile__ ("   flushi %0\n"
+					: /* Outputs */
+					: /* Inputs  */ "r"(addr)
+					/* : No clobber */);
+	}
+	__asm__ __volatile(" flushp\n");
+}
+
+static void flush_aliases(struct address_space *mapping, struct page *page)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *mpnt;
+	pgoff_t pgoff;
+
+	pgoff = page->index;
+
+	flush_dcache_mmap_lock(mapping);
+	vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
+		unsigned long offset;
+
+		if (mpnt->vm_mm != mm)
+			continue;
+		if (!(mpnt->vm_flags & VM_MAYSHARE))
+			continue;
+
+		offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
+		flush_cache_page(mpnt, mpnt->vm_start + offset,
+			page_to_pfn(page));
+	}
+	flush_dcache_mmap_unlock(mapping);
+}
+
+void flush_cache_all(void)
+{
+	__flush_dcache(0, cpuinfo.dcache_size);
+	__flush_icache(0, cpuinfo.icache_size);
+}
+
+void flush_cache_mm(struct mm_struct *mm)
+{
+	flush_cache_all();
+}
+
+void flush_cache_dup_mm(struct mm_struct *mm)
+{
+	flush_cache_all();
+}
+
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+	__flush_dcache(start, end);
+	__flush_icache(start, end);
+}
+
+void flush_dcache_range(unsigned long start, unsigned long end)
+{
+	__flush_dcache(start, end);
+	__flush_icache(start, end);
+}
+EXPORT_SYMBOL(flush_dcache_range);
+
+void invalidate_dcache_range(unsigned long start, unsigned long end)
+{
+	__invalidate_dcache(start, end);
+}
+EXPORT_SYMBOL(invalidate_dcache_range);
+
+void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end)
+{
+	__flush_dcache(start, end);
+	if (vma == NULL || (vma->vm_flags & VM_EXEC))
+		__flush_icache(start, end);
+}
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+	unsigned long start = (unsigned long) page_address(page);
+	unsigned long end = start + PAGE_SIZE;
+
+	__flush_dcache(start, end);
+	__flush_icache(start, end);
+}
+
+void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
+			unsigned long pfn)
+{
+	unsigned long start = vmaddr;
+	unsigned long end = start + PAGE_SIZE;
+
+	__flush_dcache(start, end);
+	if (vma->vm_flags & VM_EXEC)
+		__flush_icache(start, end);
+}
+
+void __flush_dcache_page(struct address_space *mapping, struct page *page)
+{
+	/*
+	 * Writeback any data associated with the kernel mapping of this
+	 * page.  This ensures that data in the physical page is mutually
+	 * coherent with the kernels mapping.
+	 */
+	unsigned long start = (unsigned long)page_address(page);
+
+	__flush_dcache(start, start + PAGE_SIZE);
+}
+
+void flush_dcache_page(struct page *page)
+{
+	struct address_space *mapping;
+
+	/*
+	 * The zero page is never written to, so never has any dirty
+	 * cache lines, and therefore never needs to be flushed.
+	 */
+	if (page == ZERO_PAGE(0))
+		return;
+
+	mapping = page_mapping_file(page);
+
+	/* Flush this page if there are aliases. */
+	if (mapping && !mapping_mapped(mapping)) {
+		clear_bit(PG_dcache_clean, &page->flags);
+	} else {
+		__flush_dcache_page(mapping, page);
+		if (mapping) {
+			unsigned long start = (unsigned long)page_address(page);
+			flush_aliases(mapping,  page);
+			flush_icache_range(start, start + PAGE_SIZE);
+		}
+		set_bit(PG_dcache_clean, &page->flags);
+	}
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void update_mmu_cache(struct vm_area_struct *vma,
+		      unsigned long address, pte_t *pte)
+{
+	unsigned long pfn = pte_pfn(*pte);
+	struct page *page;
+	struct address_space *mapping;
+
+	if (!pfn_valid(pfn))
+		return;
+
+	/*
+	* The zero page is never written to, so never has any dirty
+	* cache lines, and therefore never needs to be flushed.
+	*/
+	page = pfn_to_page(pfn);
+	if (page == ZERO_PAGE(0))
+		return;
+
+	mapping = page_mapping_file(page);
+	if (!test_and_set_bit(PG_dcache_clean, &page->flags))
+		__flush_dcache_page(mapping, page);
+
+	if(mapping)
+	{
+		flush_aliases(mapping, page);
+		if (vma->vm_flags & VM_EXEC)
+			flush_icache_page(vma, page);
+	}
+}
+
+void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
+		    struct page *to)
+{
+	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
+	__flush_icache(vaddr, vaddr + PAGE_SIZE);
+	copy_page(vto, vfrom);
+	__flush_dcache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
+	__flush_icache((unsigned long)vto, (unsigned long)vto + PAGE_SIZE);
+}
+
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page)
+{
+	__flush_dcache(vaddr, vaddr + PAGE_SIZE);
+	__flush_icache(vaddr, vaddr + PAGE_SIZE);
+	clear_page(addr);
+	__flush_dcache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
+	__flush_icache((unsigned long)addr, (unsigned long)addr + PAGE_SIZE);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+			unsigned long user_vaddr,
+			void *dst, void *src, int len)
+{
+	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
+	memcpy(dst, src, len);
+	__flush_dcache((unsigned long)src, (unsigned long)src + len);
+	if (vma->vm_flags & VM_EXEC)
+		__flush_icache((unsigned long)src, (unsigned long)src + len);
+}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+			unsigned long user_vaddr,
+			void *dst, void *src, int len)
+{
+	flush_cache_page(vma, user_vaddr, page_to_pfn(page));
+	memcpy(dst, src, len);
+	__flush_dcache((unsigned long)dst, (unsigned long)dst + len);
+	if (vma->vm_flags & VM_EXEC)
+		__flush_icache((unsigned long)dst, (unsigned long)dst + len);
+}
diff --git a/arch/nios2/mm/dma-mapping.c b/arch/nios2/mm/dma-mapping.c
new file mode 100644
index 0000000..4af9e5b
--- /dev/null
+++ b/arch/nios2/mm/dma-mapping.c
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *  Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * Based on DMA code from MIPS.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/cache.h>
+#include <asm/cacheflush.h>
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	void *vaddr = phys_to_virt(paddr);
+
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		invalidate_dcache_range((unsigned long)vaddr,
+			(unsigned long)(vaddr + size));
+		break;
+	case DMA_TO_DEVICE:
+		/*
+		 * We just need to flush the caches here , but Nios2 flush
+		 * instruction will do both writeback and invalidate.
+		 */
+	case DMA_BIDIRECTIONAL: /* flush and invalidate */
+		flush_dcache_range((unsigned long)vaddr,
+			(unsigned long)(vaddr + size));
+		break;
+	default:
+		BUG();
+	}
+}
+
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
+{
+	void *vaddr = phys_to_virt(paddr);
+
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_FROM_DEVICE:
+		invalidate_dcache_range((unsigned long)vaddr,
+			(unsigned long)(vaddr + size));
+		break;
+	case DMA_TO_DEVICE:
+		break;
+	default:
+		BUG();
+	}
+}
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		gfp_t gfp, unsigned long attrs)
+{
+	void *ret;
+
+	/* optimized page clearing */
+	gfp |= __GFP_ZERO;
+
+	if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
+		gfp |= GFP_DMA;
+
+	ret = (void *) __get_free_pages(gfp, get_order(size));
+	if (ret != NULL) {
+		*dma_handle = virt_to_phys(ret);
+		flush_dcache_range((unsigned long) ret,
+			(unsigned long) ret + size);
+		ret = UNCAC_ADDR(ret);
+	}
+
+	return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+		dma_addr_t dma_handle, unsigned long attrs)
+{
+	unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
+
+	free_pages(addr, get_order(size));
+}
diff --git a/arch/nios2/mm/extable.c b/arch/nios2/mm/extable.c
new file mode 100644
index 0000000..2574dba
--- /dev/null
+++ b/arch/nios2/mm/extable.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2010, Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009, Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fixup;
+
+	fixup = search_exception_tables(regs->ea);
+	if (fixup) {
+		regs->ea = fixup->fixup;
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
new file mode 100644
index 0000000..24fd84c
--- /dev/null
+++ b/arch/nios2/mm/fault.c
@@ -0,0 +1,277 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * based on arch/mips/mm/fault.c which is:
+ *
+ * Copyright (C) 1995-2000 Ralf Baechle
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+
+#include <asm/mmu_context.h>
+#include <asm/traps.h>
+
+#define EXC_SUPERV_INSN_ACCESS	9  /* Supervisor only instruction address */
+#define EXC_SUPERV_DATA_ACCESS	11 /* Supervisor only data address */
+#define EXC_X_PROTECTION_FAULT	13 /* TLB permission violation (x) */
+#define EXC_R_PROTECTION_FAULT	14 /* TLB permission violation (r) */
+#define EXC_W_PROTECTION_FAULT	15 /* TLB permission violation (w) */
+
+/*
+ * This routine handles page faults.  It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
+				unsigned long address)
+{
+	struct vm_area_struct *vma = NULL;
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->mm;
+	int code = SEGV_MAPERR;
+	vm_fault_t fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	cause >>= 2;
+
+	/* Restart the instruction */
+	regs->ea -= 4;
+
+	/*
+	 * We fault-in kernel-space virtual memory on-demand. The
+	 * 'reference' page table is init_mm.pgd.
+	 *
+	 * NOTE! We MUST NOT take any locks for this case. We may
+	 * be in an interrupt or a critical region, and should
+	 * only copy the information from the master page table,
+	 * nothing more.
+	 */
+	if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) {
+		if (user_mode(regs))
+			goto bad_area_nosemaphore;
+		else
+			goto vmalloc_fault;
+	}
+
+	if (unlikely(address >= TASK_SIZE))
+		goto bad_area_nosemaphore;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (faulthandler_disabled() || !mm)
+		goto bad_area_nosemaphore;
+
+	if (user_mode(regs))
+		flags |= FAULT_FLAG_USER;
+
+	if (!down_read_trylock(&mm->mmap_sem)) {
+		if (!user_mode(regs) && !search_exception_tables(regs->ea))
+			goto bad_area_nosemaphore;
+retry:
+		down_read(&mm->mmap_sem);
+	}
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+	if (expand_stack(vma, address))
+		goto bad_area;
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+	code = SEGV_ACCERR;
+
+	switch (cause) {
+	case EXC_SUPERV_INSN_ACCESS:
+		goto bad_area;
+	case EXC_SUPERV_DATA_ACCESS:
+		goto bad_area;
+	case EXC_X_PROTECTION_FAULT:
+		if (!(vma->vm_flags & VM_EXEC))
+			goto bad_area;
+		break;
+	case EXC_R_PROTECTION_FAULT:
+		if (!(vma->vm_flags & VM_READ))
+			goto bad_area;
+		break;
+	case EXC_W_PROTECTION_FAULT:
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+		flags = FAULT_FLAG_WRITE;
+		break;
+	}
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+	fault = handle_mm_fault(vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGSEGV)
+			goto bad_area;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
+	}
+
+	/*
+	 * Major/minor page fault accounting is only done on the
+	 * initial attempt. If we go through a retry, it is extremely
+	 * likely that the page will be found in page cache at that point.
+	 */
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR)
+			current->maj_flt++;
+		else
+			current->min_flt++;
+		if (fault & VM_FAULT_RETRY) {
+			/* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
+			 * of starvation. */
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			flags |= FAULT_FLAG_TRIED;
+
+			/*
+			 * No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+
+	up_read(&mm->mmap_sem);
+	return;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+	/* User mode accesses just cause a SIGSEGV */
+	if (user_mode(regs)) {
+		if (unhandled_signal(current, SIGSEGV) && printk_ratelimit()) {
+			pr_info("%s: unhandled page fault (%d) at 0x%08lx, "
+				"cause %ld\n", current->comm, SIGSEGV, address, cause);
+			show_regs(regs);
+		}
+		_exception(SIGSEGV, regs, code, address);
+		return;
+	}
+
+no_context:
+	/* Are we prepared to handle this kernel fault? */
+	if (fixup_exception(regs))
+		return;
+
+	/*
+	 * Oops. The kernel tried to access some bad page. We'll have to
+	 * terminate things with extreme prejudice.
+	 */
+	bust_spinlocks(1);
+
+	pr_alert("Unable to handle kernel %s at virtual address %08lx",
+		address < PAGE_SIZE ? "NULL pointer dereference" :
+		"paging request", address);
+	pr_alert("ea = %08lx, ra = %08lx, cause = %ld\n", regs->ea, regs->ra,
+		cause);
+	panic("Oops");
+	return;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (!user_mode(regs))
+		goto no_context;
+	pagefault_out_of_memory();
+	return;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (!user_mode(regs))
+		goto no_context;
+
+	_exception(SIGBUS, regs, BUS_ADRERR, address);
+	return;
+
+vmalloc_fault:
+	{
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 *
+		 * Do _not_ use "tsk" here. We might be inside
+		 * an interrupt in the middle of a task switch..
+		 */
+		int offset = pgd_index(address);
+		pgd_t *pgd, *pgd_k;
+		pud_t *pud, *pud_k;
+		pmd_t *pmd, *pmd_k;
+		pte_t *pte_k;
+
+		pgd = pgd_current + offset;
+		pgd_k = init_mm.pgd + offset;
+
+		if (!pgd_present(*pgd_k))
+			goto no_context;
+		set_pgd(pgd, *pgd_k);
+
+		pud = pud_offset(pgd, address);
+		pud_k = pud_offset(pgd_k, address);
+		if (!pud_present(*pud_k))
+			goto no_context;
+		pmd = pmd_offset(pud, address);
+		pmd_k = pmd_offset(pud_k, address);
+		if (!pmd_present(*pmd_k))
+			goto no_context;
+		set_pmd(pmd, *pmd_k);
+
+		pte_k = pte_offset_kernel(pmd_k, address);
+		if (!pte_present(*pte_k))
+			goto no_context;
+
+		flush_tlb_one(address);
+		return;
+	}
+}
diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c
new file mode 100644
index 0000000..c92fe42
--- /dev/null
+++ b/arch/nios2/mm/init.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * based on arch/m68k/mm/init.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pagemap.h>
+#include <linux/bootmem.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/cpuinfo.h>
+#include <asm/processor.h>
+
+pgd_t *pgd_current;
+
+/*
+ * paging_init() continues the virtual memory environment setup which
+ * was begun by the code in arch/head.S.
+ * The parameters are pointers to where to stick the starting and ending
+ * addresses of available kernel virtual memory.
+ */
+void __init paging_init(void)
+{
+	unsigned long zones_size[MAX_NR_ZONES];
+
+	memset(zones_size, 0, sizeof(zones_size));
+
+	pagetable_init();
+	pgd_current = swapper_pg_dir;
+
+	zones_size[ZONE_NORMAL] = max_mapnr;
+
+	/* pass the memory from the bootmem allocator to the main allocator */
+	free_area_init(zones_size);
+
+	flush_dcache_range((unsigned long)empty_zero_page,
+			(unsigned long)empty_zero_page + PAGE_SIZE);
+}
+
+void __init mem_init(void)
+{
+	unsigned long end_mem   = memory_end; /* this must not include
+						kernel stack at top */
+
+	pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
+
+	end_mem &= PAGE_MASK;
+	high_memory = __va(end_mem);
+
+	/* this will put all memory onto the freelists */
+	free_all_bootmem();
+	mem_init_print_info(NULL);
+}
+
+void __init mmu_init(void)
+{
+	flush_tlb_all();
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void __init free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_reserved_area((void *)start, (void *)end, -1, "initrd");
+}
+#endif
+
+void __ref free_initmem(void)
+{
+	free_initmem_default(-1);
+}
+
+#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
+static struct page *kuser_page[1];
+
+static int alloc_kuser_page(void)
+{
+	extern char __kuser_helper_start[], __kuser_helper_end[];
+	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+	unsigned long vpage;
+
+	vpage = get_zeroed_page(GFP_ATOMIC);
+	if (!vpage)
+		return -ENOMEM;
+
+	/* Copy kuser helpers */
+	memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
+
+	flush_icache_range(vpage, vpage + KUSER_SIZE);
+	kuser_page[0] = virt_to_page(vpage);
+
+	return 0;
+}
+arch_initcall(alloc_kuser_page);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+	struct mm_struct *mm = current->mm;
+	int ret;
+
+	down_write(&mm->mmap_sem);
+
+	/* Map kuser helpers to user space address */
+	ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
+				      VM_READ | VM_EXEC | VM_MAYREAD |
+				      VM_MAYEXEC, kuser_page);
+
+	up_write(&mm->mmap_sem);
+
+	return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+	return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
+}
diff --git a/arch/nios2/mm/ioremap.c b/arch/nios2/mm/ioremap.c
new file mode 100644
index 0000000..3a28177
--- /dev/null
+++ b/arch/nios2/mm/ioremap.c
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/io.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+static inline void remap_area_pte(pte_t *pte, unsigned long address,
+				unsigned long size, unsigned long phys_addr,
+				unsigned long flags)
+{
+	unsigned long end;
+	unsigned long pfn;
+	pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
+				| _PAGE_WRITE | flags);
+
+	address &= ~PMD_MASK;
+	end = address + size;
+	if (end > PMD_SIZE)
+		end = PMD_SIZE;
+	if (address >= end)
+		BUG();
+	pfn = PFN_DOWN(phys_addr);
+	do {
+		if (!pte_none(*pte)) {
+			pr_err("remap_area_pte: page already exists\n");
+			BUG();
+		}
+		set_pte(pte, pfn_pte(pfn, pgprot));
+		address += PAGE_SIZE;
+		pfn++;
+		pte++;
+	} while (address && (address < end));
+}
+
+static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
+				unsigned long size, unsigned long phys_addr,
+				unsigned long flags)
+{
+	unsigned long end;
+
+	address &= ~PGDIR_MASK;
+	end = address + size;
+	if (end > PGDIR_SIZE)
+		end = PGDIR_SIZE;
+	phys_addr -= address;
+	if (address >= end)
+		BUG();
+	do {
+		pte_t *pte = pte_alloc_kernel(pmd, address);
+
+		if (!pte)
+			return -ENOMEM;
+		remap_area_pte(pte, address, end - address, address + phys_addr,
+			flags);
+		address = (address + PMD_SIZE) & PMD_MASK;
+		pmd++;
+	} while (address && (address < end));
+	return 0;
+}
+
+static int remap_area_pages(unsigned long address, unsigned long phys_addr,
+				unsigned long size, unsigned long flags)
+{
+	int error;
+	pgd_t *dir;
+	unsigned long end = address + size;
+
+	phys_addr -= address;
+	dir = pgd_offset(&init_mm, address);
+	flush_cache_all();
+	if (address >= end)
+		BUG();
+	do {
+		pud_t *pud;
+		pmd_t *pmd;
+
+		error = -ENOMEM;
+		pud = pud_alloc(&init_mm, dir, address);
+		if (!pud)
+			break;
+		pmd = pmd_alloc(&init_mm, pud, address);
+		if (!pmd)
+			break;
+		if (remap_area_pmd(pmd, address, end - address,
+			phys_addr + address, flags))
+			break;
+		error = 0;
+		address = (address + PGDIR_SIZE) & PGDIR_MASK;
+		dir++;
+	} while (address && (address < end));
+	flush_tlb_all();
+	return error;
+}
+
+#define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
+
+/*
+ * Map some physical address range into the kernel address space.
+ */
+void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
+			unsigned long cacheflag)
+{
+	struct vm_struct *area;
+	unsigned long offset;
+	unsigned long last_addr;
+	void *addr;
+
+	/* Don't allow wraparound or zero size */
+	last_addr = phys_addr + size - 1;
+
+	if (!size || last_addr < phys_addr)
+		return NULL;
+
+	/* Don't allow anybody to remap normal RAM that we're using */
+	if (phys_addr > PHYS_OFFSET && phys_addr < virt_to_phys(high_memory)) {
+		char *t_addr, *t_end;
+		struct page *page;
+
+		t_addr = __va(phys_addr);
+		t_end = t_addr + (size - 1);
+		for (page = virt_to_page(t_addr);
+			page <= virt_to_page(t_end); page++)
+			if (!PageReserved(page))
+				return NULL;
+	}
+
+	/*
+	 * Map uncached objects in the low part of address space to
+	 * CONFIG_NIOS2_IO_REGION_BASE
+	 */
+	if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
+	    IS_MAPPABLE_UNCACHEABLE(last_addr) &&
+	    !(cacheflag & _PAGE_CACHED))
+		return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
+
+	/* Mappings have to be page-aligned */
+	offset = phys_addr & ~PAGE_MASK;
+	phys_addr &= PAGE_MASK;
+	size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+	/* Ok, go for it */
+	area = get_vm_area(size, VM_IOREMAP);
+	if (!area)
+		return NULL;
+	addr = area->addr;
+	if (remap_area_pages((unsigned long) addr, phys_addr, size,
+		cacheflag)) {
+		vunmap(addr);
+		return NULL;
+	}
+	return (void __iomem *) (offset + (char *)addr);
+}
+EXPORT_SYMBOL(__ioremap);
+
+/*
+ * __iounmap unmaps nearly everything, so be careful
+ * it doesn't free currently pointer/page tables anymore but it
+ * wasn't used anyway and might be added later.
+ */
+void __iounmap(void __iomem *addr)
+{
+	struct vm_struct *p;
+
+	if ((unsigned long) addr > CONFIG_NIOS2_IO_REGION_BASE)
+		return;
+
+	p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
+	if (!p)
+		pr_err("iounmap: bad address %p\n", addr);
+	kfree(p);
+}
+EXPORT_SYMBOL(__iounmap);
diff --git a/arch/nios2/mm/mmu_context.c b/arch/nios2/mm/mmu_context.c
new file mode 100644
index 0000000..45d6b9c
--- /dev/null
+++ b/arch/nios2/mm/mmu_context.c
@@ -0,0 +1,116 @@
+/*
+ * MMU context handling.
+ *
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+
+#include <asm/cpuinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* The pids position and mask in context */
+#define PID_SHIFT	0
+#define PID_BITS	(cpuinfo.tlb_pid_num_bits)
+#define PID_MASK	((1UL << PID_BITS) - 1)
+
+/* The versions position and mask in context */
+#define VERSION_BITS	(32 - PID_BITS)
+#define VERSION_SHIFT	(PID_SHIFT + PID_BITS)
+#define VERSION_MASK	((1UL << VERSION_BITS) - 1)
+
+/* Return the version part of a context */
+#define CTX_VERSION(c)	(((c) >> VERSION_SHIFT) & VERSION_MASK)
+
+/* Return the pid part of a context */
+#define CTX_PID(c)	(((c) >> PID_SHIFT) & PID_MASK)
+
+/* Value of the first context (version 1, pid 0) */
+#define FIRST_CTX	((1UL << VERSION_SHIFT) | (0 << PID_SHIFT))
+
+static mm_context_t next_mmu_context;
+
+/*
+ * Initialize MMU context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+	/* We need to set this here because the value depends on runtime data
+	 * from cpuinfo */
+	next_mmu_context = FIRST_CTX;
+}
+
+/*
+ * Set new context (pid), keep way
+ */
+static void set_context(mm_context_t context)
+{
+	set_mmu_pid(CTX_PID(context));
+}
+
+static mm_context_t get_new_context(void)
+{
+	/* Return the next pid */
+	next_mmu_context += (1UL << PID_SHIFT);
+
+	/* If the pid field wraps around we increase the version and
+	 * flush the tlb */
+	if (unlikely(CTX_PID(next_mmu_context) == 0)) {
+		/* Version is incremented since the pid increment above
+		 * overflows info version */
+		flush_cache_all();
+		flush_tlb_all();
+	}
+
+	/* If the version wraps we start over with the first generation, we do
+	 * not need to flush the tlb here since it's always done above */
+	if (unlikely(CTX_VERSION(next_mmu_context) == 0))
+		next_mmu_context = FIRST_CTX;
+
+	return next_mmu_context;
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	       struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+
+	/* If the process context we are swapping in has a different context
+	 * generation then we have it should get a new generation/pid */
+	if (unlikely(CTX_VERSION(next->context) !=
+		CTX_VERSION(next_mmu_context)))
+		next->context = get_new_context();
+
+	/* Save the current pgd so the fast tlb handler can find it */
+	pgd_current = next->pgd;
+
+	/* Set the current context */
+	set_context(next->context);
+
+	local_irq_restore(flags);
+}
+
+/*
+ * After we have set current->mm to a new value, this activates
+ * the context for the new mm so we see the new mappings.
+ */
+void activate_mm(struct mm_struct *prev, struct mm_struct *next)
+{
+	next->context = get_new_context();
+	set_context(next->context);
+	pgd_current = next->pgd;
+}
+
+unsigned long get_pid_from_context(mm_context_t *context)
+{
+	return CTX_PID((*context));
+}
diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c
new file mode 100644
index 0000000..61e24a2
--- /dev/null
+++ b/arch/nios2/mm/pgtable.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/cpuinfo.h>
+
+/* pteaddr:
+ *   ptbase | vpn* | zero
+ *   31-22  | 21-2 | 1-0
+ *
+ *   *vpn is preserved on double fault
+ *
+ * tlbacc:
+ *   IG   |*flags| pfn
+ *   31-25|24-20 | 19-0
+ *
+ *   *crwxg
+ *
+ * tlbmisc:
+ *   resv  |way   |rd | we|pid |dbl|bad|perm|d
+ *   31-24 |23-20 |19 | 20|17-4|3  |2  |1   |0
+ *
+ */
+
+/*
+ * Initialize a new pgd / pmd table with invalid pointers.
+ */
+static void pgd_init(pgd_t *pgd)
+{
+	unsigned long *p = (unsigned long *) pgd;
+	int i;
+
+	for (i = 0; i < USER_PTRS_PER_PGD; i += 8) {
+		p[i + 0] = (unsigned long) invalid_pte_table;
+		p[i + 1] = (unsigned long) invalid_pte_table;
+		p[i + 2] = (unsigned long) invalid_pte_table;
+		p[i + 3] = (unsigned long) invalid_pte_table;
+		p[i + 4] = (unsigned long) invalid_pte_table;
+		p[i + 5] = (unsigned long) invalid_pte_table;
+		p[i + 6] = (unsigned long) invalid_pte_table;
+		p[i + 7] = (unsigned long) invalid_pte_table;
+	}
+}
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *ret, *init;
+
+	ret = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ORDER);
+	if (ret) {
+		init = pgd_offset(&init_mm, 0UL);
+		pgd_init(ret);
+		memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+	}
+
+	return ret;
+}
+
+void __init pagetable_init(void)
+{
+	/* Initialize the entire pgd.  */
+	pgd_init(swapper_pg_dir);
+	pgd_init(swapper_pg_dir + USER_PTRS_PER_PGD);
+}
diff --git a/arch/nios2/mm/tlb.c b/arch/nios2/mm/tlb.c
new file mode 100644
index 0000000..cf10326
--- /dev/null
+++ b/arch/nios2/mm/tlb.c
@@ -0,0 +1,275 @@
+/*
+ * Nios2 TLB handling
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ *   Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cpuinfo.h>
+
+#define TLB_INDEX_MASK		\
+	((((1UL << (cpuinfo.tlb_ptr_sz - cpuinfo.tlb_num_ways_log2))) - 1) \
+		<< PAGE_SHIFT)
+
+/* Used as illegal PHYS_ADDR for TLB mappings
+ */
+#define MAX_PHYS_ADDR 0
+
+static void get_misc_and_pid(unsigned long *misc, unsigned long *pid)
+{
+	*misc  = RDCTL(CTL_TLBMISC);
+	*misc &= (TLBMISC_PID | TLBMISC_WAY);
+	*pid  = *misc & TLBMISC_PID;
+}
+
+/*
+ * All entries common to a mm share an asid.  To effectively flush these
+ * entries, we just bump the asid.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+	if (current->mm == mm)
+		flush_tlb_all();
+	else
+		memset(&mm->context, 0, sizeof(mm_context_t));
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void flush_tlb_one_pid(unsigned long addr, unsigned long mmu_pid)
+{
+	unsigned int way;
+	unsigned long org_misc, pid_misc;
+
+	pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+	/* remember pid/way until we return. */
+	get_misc_and_pid(&org_misc, &pid_misc);
+
+	WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2);
+
+	for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+		unsigned long pteaddr;
+		unsigned long tlbmisc;
+		unsigned long pid;
+
+		tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+		WRCTL(CTL_TLBMISC, tlbmisc);
+		pteaddr = RDCTL(CTL_PTEADDR);
+		tlbmisc = RDCTL(CTL_TLBMISC);
+		pid = (tlbmisc >> TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK;
+		if (((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) &&
+				pid == mmu_pid) {
+			unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
+				((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
+				(addr & TLB_INDEX_MASK);
+			pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
+				vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
+
+			WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
+			tlbmisc = pid_misc | TLBMISC_WE |
+				(way << TLBMISC_WAY_SHIFT);
+			WRCTL(CTL_TLBMISC, tlbmisc);
+			WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+		}
+	}
+
+	WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			unsigned long end)
+{
+	unsigned long mmu_pid = get_pid_from_context(&vma->vm_mm->context);
+
+	while (start < end) {
+		flush_tlb_one_pid(start, mmu_pid);
+		start += PAGE_SIZE;
+	}
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	while (start < end) {
+		flush_tlb_one(start);
+		start += PAGE_SIZE;
+	}
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void flush_tlb_one(unsigned long addr)
+{
+	unsigned int way;
+	unsigned long org_misc, pid_misc;
+
+	pr_debug("Flush tlb-entry for vaddr=%#lx\n", addr);
+
+	/* remember pid/way until we return. */
+	get_misc_and_pid(&org_misc, &pid_misc);
+
+	WRCTL(CTL_PTEADDR, (addr >> PAGE_SHIFT) << 2);
+
+	for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+		unsigned long pteaddr;
+		unsigned long tlbmisc;
+
+		tlbmisc = pid_misc | TLBMISC_RD | (way << TLBMISC_WAY_SHIFT);
+		WRCTL(CTL_TLBMISC, tlbmisc);
+		pteaddr = RDCTL(CTL_PTEADDR);
+		tlbmisc = RDCTL(CTL_TLBMISC);
+
+		if ((((pteaddr >> 2) & 0xfffff)) == (addr >> PAGE_SHIFT)) {
+			unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE +
+				((PAGE_SIZE * cpuinfo.tlb_num_lines) * way) +
+				(addr & TLB_INDEX_MASK);
+
+			pr_debug("Flush entry by writing %#lx way=%dl pid=%ld\n",
+				vaddr, way, (pid_misc >> TLBMISC_PID_SHIFT));
+
+			tlbmisc = pid_misc | TLBMISC_WE |
+				(way << TLBMISC_WAY_SHIFT);
+			WRCTL(CTL_PTEADDR, (vaddr >> 12) << 2);
+			WRCTL(CTL_TLBMISC, tlbmisc);
+			WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+		}
+	}
+
+	WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void dump_tlb_line(unsigned long line)
+{
+	unsigned int way;
+	unsigned long org_misc;
+
+	pr_debug("dump tlb-entries for line=%#lx (addr %08lx)\n", line,
+		line << (PAGE_SHIFT + cpuinfo.tlb_num_ways_log2));
+
+	/* remember pid/way until we return */
+	org_misc = (RDCTL(CTL_TLBMISC) & (TLBMISC_PID | TLBMISC_WAY));
+
+	WRCTL(CTL_PTEADDR, line << 2);
+
+	for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+		unsigned long pteaddr;
+		unsigned long tlbmisc;
+		unsigned long tlbacc;
+
+		WRCTL(CTL_TLBMISC, TLBMISC_RD | (way << TLBMISC_WAY_SHIFT));
+		pteaddr = RDCTL(CTL_PTEADDR);
+		tlbmisc = RDCTL(CTL_TLBMISC);
+		tlbacc = RDCTL(CTL_TLBACC);
+
+		if ((tlbacc << PAGE_SHIFT) != (MAX_PHYS_ADDR & PAGE_MASK)) {
+			pr_debug("-- way:%02x vpn:0x%08lx phys:0x%08lx pid:0x%02lx flags:%c%c%c%c%c\n",
+				way,
+				(pteaddr << (PAGE_SHIFT-2)),
+				(tlbacc << PAGE_SHIFT),
+				((tlbmisc >> TLBMISC_PID_SHIFT) &
+				TLBMISC_PID_MASK),
+				(tlbacc & _PAGE_READ ? 'r' : '-'),
+				(tlbacc & _PAGE_WRITE ? 'w' : '-'),
+				(tlbacc & _PAGE_EXEC ? 'x' : '-'),
+				(tlbacc & _PAGE_GLOBAL ? 'g' : '-'),
+				(tlbacc & _PAGE_CACHED ? 'c' : '-'));
+		}
+	}
+
+	WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void dump_tlb(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < cpuinfo.tlb_num_lines; i++)
+		dump_tlb_line(i);
+}
+
+void flush_tlb_pid(unsigned long pid)
+{
+	unsigned int line;
+	unsigned int way;
+	unsigned long org_misc, pid_misc;
+
+	/* remember pid/way until we return */
+	get_misc_and_pid(&org_misc, &pid_misc);
+
+	for (line = 0; line < cpuinfo.tlb_num_lines; line++) {
+		WRCTL(CTL_PTEADDR, line << 2);
+
+		for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+			unsigned long pteaddr;
+			unsigned long tlbmisc;
+			unsigned long tlbacc;
+
+			tlbmisc = pid_misc | TLBMISC_RD |
+				(way << TLBMISC_WAY_SHIFT);
+			WRCTL(CTL_TLBMISC, tlbmisc);
+			pteaddr = RDCTL(CTL_PTEADDR);
+			tlbmisc = RDCTL(CTL_TLBMISC);
+			tlbacc = RDCTL(CTL_TLBACC);
+
+			if (((tlbmisc>>TLBMISC_PID_SHIFT) & TLBMISC_PID_MASK)
+				== pid) {
+				tlbmisc = pid_misc | TLBMISC_WE |
+					(way << TLBMISC_WAY_SHIFT);
+				WRCTL(CTL_TLBMISC, tlbmisc);
+				WRCTL(CTL_TLBACC,
+					(MAX_PHYS_ADDR >> PAGE_SHIFT));
+			}
+		}
+
+		WRCTL(CTL_TLBMISC, org_misc);
+	}
+}
+
+void flush_tlb_all(void)
+{
+	int i;
+	unsigned long vaddr = CONFIG_NIOS2_IO_REGION_BASE;
+	unsigned int way;
+	unsigned long org_misc, pid_misc, tlbmisc;
+
+	/* remember pid/way until we return */
+	get_misc_and_pid(&org_misc, &pid_misc);
+	pid_misc |= TLBMISC_WE;
+
+	/* Map each TLB entry to physcal address 0 with no-access and a
+	   bad ptbase */
+	for (way = 0; way < cpuinfo.tlb_num_ways; way++) {
+		tlbmisc = pid_misc | (way << TLBMISC_WAY_SHIFT);
+		for (i = 0; i < cpuinfo.tlb_num_lines; i++) {
+			WRCTL(CTL_PTEADDR, ((vaddr) >> PAGE_SHIFT) << 2);
+			WRCTL(CTL_TLBMISC, tlbmisc);
+			WRCTL(CTL_TLBACC, (MAX_PHYS_ADDR >> PAGE_SHIFT));
+			vaddr += 1UL << 12;
+		}
+	}
+
+	/* restore pid/way */
+	WRCTL(CTL_TLBMISC, org_misc);
+}
+
+void set_mmu_pid(unsigned long pid)
+{
+	WRCTL(CTL_TLBMISC, (RDCTL(CTL_TLBMISC) & TLBMISC_WAY) |
+		((pid & TLBMISC_PID_MASK) << TLBMISC_PID_SHIFT));
+}
diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c
new file mode 100644
index 0000000..34f10af
--- /dev/null
+++ b/arch/nios2/mm/uaccess.c
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2009, Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+asm(".global	raw_copy_from_user\n"
+	"   .type raw_copy_from_user, @function\n"
+	"raw_copy_from_user:\n"
+	"   movi  r2,7\n"
+	"   mov   r3,r4\n"
+	"   bge   r2,r6,1f\n"
+	"   xor   r2,r4,r5\n"
+	"   andi  r2,r2,3\n"
+	"   movi  r7,3\n"
+	"   beq   r2,zero,4f\n"
+	"1: addi  r6,r6,-1\n"
+	"   movi  r2,-1\n"
+	"   beq   r6,r2,3f\n"
+	"   mov   r7,r2\n"
+	"2: ldbu  r2,0(r5)\n"
+	"   addi  r6,r6,-1\n"
+	"   addi  r5,r5,1\n"
+	"   stb   r2,0(r3)\n"
+	"   addi  r3,r3,1\n"
+	"   bne   r6,r7,2b\n"
+	"3:\n"
+	"   addi  r2,r6,1\n"
+	"   ret\n"
+	"13:mov   r2,r6\n"
+	"   ret\n"
+	"4: andi  r2,r4,1\n"
+	"   cmpeq r2,r2,zero\n"
+	"   beq   r2,zero,7f\n"
+	"5: andi  r2,r3,2\n"
+	"   beq   r2,zero,6f\n"
+	"9: ldhu  r2,0(r5)\n"
+	"   addi  r6,r6,-2\n"
+	"   addi  r5,r5,2\n"
+	"   sth   r2,0(r3)\n"
+	"   addi  r3,r3,2\n"
+	"6: bge   r7,r6,1b\n"
+	"10:ldw   r2,0(r5)\n"
+	"   addi  r6,r6,-4\n"
+	"   addi  r5,r5,4\n"
+	"   stw   r2,0(r3)\n"
+	"   addi  r3,r3,4\n"
+	"   br    6b\n"
+	"7: ldbu  r2,0(r5)\n"
+	"   addi  r6,r6,-1\n"
+	"   addi  r5,r5,1\n"
+	"   addi  r3,r4,1\n"
+	"   stb   r2,0(r4)\n"
+	"   br    5b\n"
+	".section __ex_table,\"a\"\n"
+	".word 2b,3b\n"
+	".word 9b,13b\n"
+	".word 10b,13b\n"
+	".word 7b,13b\n"
+	".previous\n"
+	);
+EXPORT_SYMBOL(raw_copy_from_user);
+
+asm(
+	"   .global raw_copy_to_user\n"
+	"   .type raw_copy_to_user, @function\n"
+	"raw_copy_to_user:\n"
+	"   movi  r2,7\n"
+	"   mov   r3,r4\n"
+	"   bge   r2,r6,1f\n"
+	"   xor   r2,r4,r5\n"
+	"   andi  r2,r2,3\n"
+	"   movi  r7,3\n"
+	"   beq   r2,zero,4f\n"
+	/* Bail if we try to copy zero bytes  */
+	"1: addi  r6,r6,-1\n"
+	"   movi  r2,-1\n"
+	"   beq   r6,r2,3f\n"
+	/* Copy byte by byte for small copies and if src^dst != 0 */
+	"   mov   r7,r2\n"
+	"2: ldbu  r2,0(r5)\n"
+	"   addi  r5,r5,1\n"
+	"9: stb   r2,0(r3)\n"
+	"   addi  r6,r6,-1\n"
+	"   addi  r3,r3,1\n"
+	"   bne   r6,r7,2b\n"
+	"3: addi  r2,r6,1\n"
+	"   ret\n"
+	"13:mov   r2,r6\n"
+	"   ret\n"
+	/*  If 'to' is an odd address byte copy */
+	"4: andi  r2,r4,1\n"
+	"   cmpeq r2,r2,zero\n"
+	"   beq   r2,zero,7f\n"
+	/* If 'to' is not divideable by four copy halfwords */
+	"5: andi  r2,r3,2\n"
+	"   beq   r2,zero,6f\n"
+	"   ldhu  r2,0(r5)\n"
+	"   addi  r5,r5,2\n"
+	"10:sth   r2,0(r3)\n"
+	"   addi  r6,r6,-2\n"
+	"   addi  r3,r3,2\n"
+	/* Copy words */
+	"6: bge   r7,r6,1b\n"
+	"   ldw   r2,0(r5)\n"
+	"   addi  r5,r5,4\n"
+	"11:stw   r2,0(r3)\n"
+	"   addi  r6,r6,-4\n"
+	"   addi  r3,r3,4\n"
+	"   br    6b\n"
+	/* Copy remaining bytes */
+	"7: ldbu  r2,0(r5)\n"
+	"   addi  r5,r5,1\n"
+	"   addi  r3,r4,1\n"
+	"12: stb  r2,0(r4)\n"
+	"   addi  r6,r6,-1\n"
+	"   br    5b\n"
+	".section __ex_table,\"a\"\n"
+	".word 9b,3b\n"
+	".word 10b,13b\n"
+	".word 11b,13b\n"
+	".word 12b,13b\n"
+	".previous\n");
+EXPORT_SYMBOL(raw_copy_to_user);