v4.19.13 snapshot.
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
new file mode 100644
index 0000000..d39075b
--- /dev/null
+++ b/arch/sparc/mm/Makefile
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+# Makefile for the linux Sparc-specific parts of the memory manager.
+#
+
+asflags-y := -ansi
+ccflags-y := -Werror
+
+obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o gup.o
+obj-y                   += fault_$(BITS).o
+obj-y                   += init_$(BITS).o
+obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += srmmu_access.o
+obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
+obj-$(CONFIG_SPARC32)   += leon_mm.o
+
+# Only used by sparc64
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+# Only used by sparc32
+obj-$(CONFIG_HIGHMEM)   += highmem.o
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
new file mode 100644
index 0000000..241b406
--- /dev/null
+++ b/arch/sparc/mm/extable.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/sparc/mm/extable.c
+ */
+
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+
+void sort_extable(struct exception_table_entry *start,
+		  struct exception_table_entry *finish)
+{
+}
+
+/* Caller knows they are in a range if ret->fixup == 0 */
+const struct exception_table_entry *
+search_extable(const struct exception_table_entry *base,
+	       const size_t num,
+	       unsigned long value)
+{
+	int i;
+
+	/* Single insn entries are encoded as:
+	 *	word 1:	insn address
+	 *	word 2:	fixup code address
+	 *
+	 * Range entries are encoded as:
+	 *	word 1: first insn address
+	 *	word 2: 0
+	 *	word 3: last insn address + 4 bytes
+	 *	word 4: fixup code address
+	 *
+	 * Deleted entries are encoded as:
+	 *	word 1: unused
+	 *	word 2: -1
+	 *
+	 * See asm/uaccess.h for more details.
+	 */
+
+	/* 1. Try to find an exact match. */
+	for (i = 0; i < num; i++) {
+		if (base[i].fixup == 0) {
+			/* A range entry, skip both parts. */
+			i++;
+			continue;
+		}
+
+		/* A deleted entry; see trim_init_extable */
+		if (base[i].fixup == -1)
+			continue;
+
+		if (base[i].insn == value)
+			return &base[i];
+	}
+
+	/* 2. Try to find a range match. */
+	for (i = 0; i < (num - 1); i++) {
+		if (base[i].fixup)
+			continue;
+
+		if (base[i].insn <= value && base[i + 1].insn > value)
+			return &base[i];
+
+		i++;
+	}
+
+        return NULL;
+}
+
+#ifdef CONFIG_MODULES
+/* We could memmove them around; easier to mark the trimmed ones. */
+void trim_init_extable(struct module *m)
+{
+	unsigned int i;
+	bool range;
+
+	for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
+		range = m->extable[i].fixup == 0;
+
+		if (within_module_init(m->extable[i].insn, m)) {
+			m->extable[i].fixup = -1;
+			if (range)
+				m->extable[i+1].fixup = -1;
+		}
+		if (range)
+			i++;
+	}
+}
+#endif /* CONFIG_MODULES */
+
+/* Special extable search, which handles ranges.  Returns fixup */
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
+{
+	const struct exception_table_entry *entry;
+
+	entry = search_exception_tables(addr);
+	if (!entry)
+		return 0;
+
+	/* Inside range?  Fix g2 and return correct fixup */
+	if (!entry->fixup) {
+		*g2 = (addr - entry->insn) / 4;
+		return (entry + 1)->fixup;
+	}
+
+	return entry->fixup;
+}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
new file mode 100644
index 0000000..b0440b0
--- /dev/null
+++ b/arch/sparc/mm/fault_32.c
@@ -0,0 +1,462 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * fault.c:  Page fault handlers for the Sparc.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
+ * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/perf_event.h>
+#include <linux/interrupt.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/setup.h>
+#include <asm/smp.h>
+#include <asm/traps.h>
+
+#include "mm_32.h"
+
+int show_unhandled_signals = 1;
+
+static void __noreturn unhandled_fault(unsigned long address,
+				       struct task_struct *tsk,
+				       struct pt_regs *regs)
+{
+	if ((unsigned long) address < PAGE_SIZE) {
+		printk(KERN_ALERT
+		    "Unable to handle kernel NULL pointer dereference\n");
+	} else {
+		printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
+		       address);
+	}
+	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
+		(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
+	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
+		(tsk->mm ? (unsigned long) tsk->mm->pgd :
+			(unsigned long) tsk->active_mm->pgd));
+	die_if_kernel("Oops", regs);
+}
+
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+			    unsigned long address)
+{
+	struct pt_regs regs;
+	unsigned long g2;
+	unsigned int insn;
+	int i;
+
+	i = search_extables_range(ret_pc, &g2);
+	switch (i) {
+	case 3:
+		/* load & store will be handled by fixup */
+		return 3;
+
+	case 1:
+		/* store will be handled by fixup, load will bump out */
+		/* for _to_ macros */
+		insn = *((unsigned int *) pc);
+		if ((insn >> 21) & 1)
+			return 1;
+		break;
+
+	case 2:
+		/* load will be handled by fixup, store will bump out */
+		/* for _from_ macros */
+		insn = *((unsigned int *) pc);
+		if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
+			return 2;
+		break;
+
+	default:
+		break;
+	}
+
+	memset(&regs, 0, sizeof(regs));
+	regs.pc = pc;
+	regs.npc = pc + 4;
+	__asm__ __volatile__(
+		"rd %%psr, %0\n\t"
+		"nop\n\t"
+		"nop\n\t"
+		"nop\n" : "=r" (regs.psr));
+	unhandled_fault(address, current, &regs);
+
+	/* Not reached */
+	return 0;
+}
+
+static inline void
+show_signal_msg(struct pt_regs *regs, int sig, int code,
+		unsigned long address, struct task_struct *tsk)
+{
+	if (!unhandled_signal(tsk, sig))
+		return;
+
+	if (!printk_ratelimit())
+		return;
+
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
+	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+	       tsk->comm, task_pid_nr(tsk), address,
+	       (void *)regs->pc, (void *)regs->u_regs[UREG_I7],
+	       (void *)regs->u_regs[UREG_FP], code);
+
+	print_vma_addr(KERN_CONT " in ", regs->pc);
+
+	printk(KERN_CONT "\n");
+}
+
+static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+			       unsigned long addr)
+{
+	if (unlikely(show_unhandled_signals))
+		show_signal_msg(regs, sig, code,
+				addr, current);
+
+	force_sig_fault(sig, code, (void __user *) addr, 0, current);
+}
+
+static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
+{
+	unsigned int insn;
+
+	if (text_fault)
+		return regs->pc;
+
+	if (regs->psr & PSR_PS)
+		insn = *(unsigned int *) regs->pc;
+	else
+		__get_user(insn, (unsigned int *) regs->pc);
+
+	return safe_compute_effective_address(regs, insn);
+}
+
+static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+				      int text_fault)
+{
+	unsigned long addr = compute_si_addr(regs, text_fault);
+
+	__do_fault_siginfo(code, sig, regs, addr);
+}
+
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+			       unsigned long address)
+{
+	struct vm_area_struct *vma;
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->mm;
+	unsigned int fixup;
+	unsigned long g2;
+	int from_user = !(regs->psr & PSR_PS);
+	int code;
+	vm_fault_t fault;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	if (text_fault)
+		address = regs->pc;
+
+	/*
+	 * We fault-in kernel-space virtual memory on-demand. The
+	 * 'reference' page table is init_mm.pgd.
+	 *
+	 * NOTE! We MUST NOT take any locks for this case. We may
+	 * be in an interrupt or a critical region, and should
+	 * only copy the information from the master page table,
+	 * nothing more.
+	 */
+	code = SEGV_MAPERR;
+	if (address >= TASK_SIZE)
+		goto vmalloc_fault;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (pagefault_disabled() || !mm)
+		goto no_context;
+
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+retry:
+	down_read(&mm->mmap_sem);
+
+	if (!from_user && address >= PAGE_OFFSET)
+		goto bad_area;
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+	if (expand_stack(vma, address))
+		goto bad_area;
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it..
+	 */
+good_area:
+	code = SEGV_ACCERR;
+	if (write) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+	} else {
+		/* Allow reads even for write-only mappings */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	if (from_user)
+		flags |= FAULT_FLAG_USER;
+	if (write)
+		flags |= FAULT_FLAG_WRITE;
+
+	/*
+	 * If for any reason at all we couldn't handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+	fault = handle_mm_fault(vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		return;
+
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGSEGV)
+			goto bad_area;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
+	}
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			flags |= FAULT_FLAG_TRIED;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+
+	up_read(&mm->mmap_sem);
+	return;
+
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+bad_area:
+	up_read(&mm->mmap_sem);
+
+bad_area_nosemaphore:
+	/* User mode accesses just cause a SIGSEGV */
+	if (from_user) {
+		do_fault_siginfo(code, SIGSEGV, regs, text_fault);
+		return;
+	}
+
+	/* Is this in ex_table? */
+no_context:
+	g2 = regs->u_regs[UREG_G2];
+	if (!from_user) {
+		fixup = search_extables_range(regs->pc, &g2);
+		/* Values below 10 are reserved for other things */
+		if (fixup > 10) {
+			extern const unsigned int __memset_start[];
+			extern const unsigned int __memset_end[];
+			extern const unsigned int __csum_partial_copy_start[];
+			extern const unsigned int __csum_partial_copy_end[];
+
+#ifdef DEBUG_EXCEPTIONS
+			printk("Exception: PC<%08lx> faddr<%08lx>\n",
+			       regs->pc, address);
+			printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
+				regs->pc, fixup, g2);
+#endif
+			if ((regs->pc >= (unsigned long)__memset_start &&
+			     regs->pc < (unsigned long)__memset_end) ||
+			    (regs->pc >= (unsigned long)__csum_partial_copy_start &&
+			     regs->pc < (unsigned long)__csum_partial_copy_end)) {
+				regs->u_regs[UREG_I4] = address;
+				regs->u_regs[UREG_I5] = regs->pc;
+			}
+			regs->u_regs[UREG_G2] = g2;
+			regs->pc = fixup;
+			regs->npc = regs->pc + 4;
+			return;
+		}
+	}
+
+	unhandled_fault(address, tsk, regs);
+	do_exit(SIGKILL);
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	up_read(&mm->mmap_sem);
+	if (from_user) {
+		pagefault_out_of_memory();
+		return;
+	}
+	goto no_context;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, text_fault);
+	if (!from_user)
+		goto no_context;
+
+vmalloc_fault:
+	{
+		/*
+		 * Synchronize this task's top level page-table
+		 * with the 'reference' page table.
+		 */
+		int offset = pgd_index(address);
+		pgd_t *pgd, *pgd_k;
+		pmd_t *pmd, *pmd_k;
+
+		pgd = tsk->active_mm->pgd + offset;
+		pgd_k = init_mm.pgd + offset;
+
+		if (!pgd_present(*pgd)) {
+			if (!pgd_present(*pgd_k))
+				goto bad_area_nosemaphore;
+			pgd_val(*pgd) = pgd_val(*pgd_k);
+			return;
+		}
+
+		pmd = pmd_offset(pgd, address);
+		pmd_k = pmd_offset(pgd_k, address);
+
+		if (pmd_present(*pmd) || !pmd_present(*pmd_k))
+			goto bad_area_nosemaphore;
+
+		*pmd = *pmd_k;
+		return;
+	}
+}
+
+/* This always deals with user addresses. */
+static void force_user_fault(unsigned long address, int write)
+{
+	struct vm_area_struct *vma;
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->mm;
+	unsigned int flags = FAULT_FLAG_USER;
+	int code;
+
+	code = SEGV_MAPERR;
+
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+	if (expand_stack(vma, address))
+		goto bad_area;
+good_area:
+	code = SEGV_ACCERR;
+	if (write) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+		flags |= FAULT_FLAG_WRITE;
+	} else {
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+	switch (handle_mm_fault(vma, address, flags)) {
+	case VM_FAULT_SIGBUS:
+	case VM_FAULT_OOM:
+		goto do_sigbus;
+	}
+	up_read(&mm->mmap_sem);
+	return;
+bad_area:
+	up_read(&mm->mmap_sem);
+	__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
+	return;
+
+do_sigbus:
+	up_read(&mm->mmap_sem);
+	__do_fault_siginfo(BUS_ADRERR, SIGBUS, tsk->thread.kregs, address);
+}
+
+static void check_stack_aligned(unsigned long sp)
+{
+	if (sp & 0x7UL)
+		force_sig(SIGILL, current);
+}
+
+void window_overflow_fault(void)
+{
+	unsigned long sp;
+
+	sp = current_thread_info()->rwbuf_stkptrs[0];
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+		force_user_fault(sp + 0x38, 1);
+	force_user_fault(sp, 1);
+
+	check_stack_aligned(sp);
+}
+
+void window_underflow_fault(unsigned long sp)
+{
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+		force_user_fault(sp + 0x38, 0);
+	force_user_fault(sp, 0);
+
+	check_stack_aligned(sp);
+}
+
+void window_ret_fault(struct pt_regs *regs)
+{
+	unsigned long sp;
+
+	sp = regs->u_regs[UREG_FP];
+	if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
+		force_user_fault(sp + 0x38, 0);
+	force_user_fault(sp, 0);
+
+	check_stack_aligned(sp);
+}
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
new file mode 100644
index 0000000..8f8a604
--- /dev/null
+++ b/arch/sparc/mm/fault_64.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * arch/sparc64/mm/fault.c: Page fault handlers for the 64-bit Sparc.
+ *
+ * Copyright (C) 1996, 2008 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
+ */
+
+#include <asm/head.h>
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/signal.h>
+#include <linux/mm.h>
+#include <linux/extable.h>
+#include <linux/init.h>
+#include <linux/perf_event.h>
+#include <linux/interrupt.h>
+#include <linux/kprobes.h>
+#include <linux/kdebug.h>
+#include <linux/percpu.h>
+#include <linux/context_tracking.h>
+#include <linux/uaccess.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/openprom.h>
+#include <asm/oplib.h>
+#include <asm/asi.h>
+#include <asm/lsu.h>
+#include <asm/sections.h>
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+
+int show_unhandled_signals = 1;
+
+static inline __kprobes int notify_page_fault(struct pt_regs *regs)
+{
+	int ret = 0;
+
+	/* kprobe_running() needs smp_processor_id() */
+	if (kprobes_built_in() && !user_mode(regs)) {
+		preempt_disable();
+		if (kprobe_running() && kprobe_fault_handler(regs, 0))
+			ret = 1;
+		preempt_enable();
+	}
+	return ret;
+}
+
+static void __kprobes unhandled_fault(unsigned long address,
+				      struct task_struct *tsk,
+				      struct pt_regs *regs)
+{
+	if ((unsigned long) address < PAGE_SIZE) {
+		printk(KERN_ALERT "Unable to handle kernel NULL "
+		       "pointer dereference\n");
+	} else {
+		printk(KERN_ALERT "Unable to handle kernel paging request "
+		       "at virtual address %016lx\n", (unsigned long)address);
+	}
+	printk(KERN_ALERT "tsk->{mm,active_mm}->context = %016lx\n",
+	       (tsk->mm ?
+		CTX_HWBITS(tsk->mm->context) :
+		CTX_HWBITS(tsk->active_mm->context)));
+	printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %016lx\n",
+	       (tsk->mm ? (unsigned long) tsk->mm->pgd :
+		          (unsigned long) tsk->active_mm->pgd));
+	die_if_kernel("Oops", regs);
+}
+
+static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
+{
+	printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
+	       regs->tpc);
+	printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
+	printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
+	printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
+	dump_stack();
+	unhandled_fault(regs->tpc, current, regs);
+}
+
+/*
+ * We now make sure that mmap_sem is held in all paths that call 
+ * this. Additionally, to prevent kswapd from ripping ptes from
+ * under us, raise interrupts around the time that we look at the
+ * pte, kswapd will have to wait to get his smp ipi response from
+ * us. vmtruncate likewise. This saves us having to get pte lock.
+ */
+static unsigned int get_user_insn(unsigned long tpc)
+{
+	pgd_t *pgdp = pgd_offset(current->mm, tpc);
+	pud_t *pudp;
+	pmd_t *pmdp;
+	pte_t *ptep, pte;
+	unsigned long pa;
+	u32 insn = 0;
+
+	if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp)))
+		goto out;
+	pudp = pud_offset(pgdp, tpc);
+	if (pud_none(*pudp) || unlikely(pud_bad(*pudp)))
+		goto out;
+
+	/* This disables preemption for us as well. */
+	local_irq_disable();
+
+	pmdp = pmd_offset(pudp, tpc);
+	if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp)))
+		goto out_irq_enable;
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	if (is_hugetlb_pmd(*pmdp)) {
+		pa  = pmd_pfn(*pmdp) << PAGE_SHIFT;
+		pa += tpc & ~HPAGE_MASK;
+
+		/* Use phys bypass so we don't pollute dtlb/dcache. */
+		__asm__ __volatile__("lduwa [%1] %2, %0"
+				     : "=r" (insn)
+				     : "r" (pa), "i" (ASI_PHYS_USE_EC));
+	} else
+#endif
+	{
+		ptep = pte_offset_map(pmdp, tpc);
+		pte = *ptep;
+		if (pte_present(pte)) {
+			pa  = (pte_pfn(pte) << PAGE_SHIFT);
+			pa += (tpc & ~PAGE_MASK);
+
+			/* Use phys bypass so we don't pollute dtlb/dcache. */
+			__asm__ __volatile__("lduwa [%1] %2, %0"
+					     : "=r" (insn)
+					     : "r" (pa), "i" (ASI_PHYS_USE_EC));
+		}
+		pte_unmap(ptep);
+	}
+out_irq_enable:
+	local_irq_enable();
+out:
+	return insn;
+}
+
+static inline void
+show_signal_msg(struct pt_regs *regs, int sig, int code,
+		unsigned long address, struct task_struct *tsk)
+{
+	if (!unhandled_signal(tsk, sig))
+		return;
+
+	if (!printk_ratelimit())
+		return;
+
+	printk("%s%s[%d]: segfault at %lx ip %px (rpc %px) sp %px error %x",
+	       task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+	       tsk->comm, task_pid_nr(tsk), address,
+	       (void *)regs->tpc, (void *)regs->u_regs[UREG_I7],
+	       (void *)regs->u_regs[UREG_FP], code);
+
+	print_vma_addr(KERN_CONT " in ", regs->tpc);
+
+	printk(KERN_CONT "\n");
+}
+
+static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
+			     unsigned long fault_addr, unsigned int insn,
+			     int fault_code)
+{
+	unsigned long addr;
+
+	if (fault_code & FAULT_CODE_ITLB) {
+		addr = regs->tpc;
+	} else {
+		/* If we were able to probe the faulting instruction, use it
+		 * to compute a precise fault address.  Otherwise use the fault
+		 * time provided address which may only have page granularity.
+		 */
+		if (insn)
+			addr = compute_effective_address(regs, insn, 0);
+		else
+			addr = fault_addr;
+	}
+
+	if (unlikely(show_unhandled_signals))
+		show_signal_msg(regs, sig, code, addr, current);
+
+	force_sig_fault(sig, code, (void __user *) addr, 0, current);
+}
+
+static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
+{
+	if (!insn) {
+		if (!regs->tpc || (regs->tpc & 0x3))
+			return 0;
+		if (regs->tstate & TSTATE_PRIV) {
+			insn = *(unsigned int *) regs->tpc;
+		} else {
+			insn = get_user_insn(regs->tpc);
+		}
+	}
+	return insn;
+}
+
+static void __kprobes do_kernel_fault(struct pt_regs *regs, int si_code,
+				      int fault_code, unsigned int insn,
+				      unsigned long address)
+{
+	unsigned char asi = ASI_P;
+ 
+	if ((!insn) && (regs->tstate & TSTATE_PRIV))
+		goto cannot_handle;
+
+	/* If user insn could be read (thus insn is zero), that
+	 * is fine.  We will just gun down the process with a signal
+	 * in that case.
+	 */
+
+	if (!(fault_code & (FAULT_CODE_WRITE|FAULT_CODE_ITLB)) &&
+	    (insn & 0xc0800000) == 0xc0800000) {
+		if (insn & 0x2000)
+			asi = (regs->tstate >> 24);
+		else
+			asi = (insn >> 5);
+		if ((asi & 0xf2) == 0x82) {
+			if (insn & 0x1000000) {
+				handle_ldf_stq(insn, regs);
+			} else {
+				/* This was a non-faulting load. Just clear the
+				 * destination register(s) and continue with the next
+				 * instruction. -jj
+				 */
+				handle_ld_nf(insn, regs);
+			}
+			return;
+		}
+	}
+		
+	/* Is this in ex_table? */
+	if (regs->tstate & TSTATE_PRIV) {
+		const struct exception_table_entry *entry;
+
+		entry = search_exception_tables(regs->tpc);
+		if (entry) {
+			regs->tpc = entry->fixup;
+			regs->tnpc = regs->tpc + 4;
+			return;
+		}
+	} else {
+		/* The si_code was set to make clear whether
+		 * this was a SEGV_MAPERR or SEGV_ACCERR fault.
+		 */
+		do_fault_siginfo(si_code, SIGSEGV, regs, address, insn, fault_code);
+		return;
+	}
+
+cannot_handle:
+	unhandled_fault (address, current, regs);
+}
+
+static void noinline __kprobes bogus_32bit_fault_tpc(struct pt_regs *regs)
+{
+	static int times;
+
+	if (times++ < 10)
+		printk(KERN_ERR "FAULT[%s:%d]: 32-bit process reports "
+		       "64-bit TPC [%lx]\n",
+		       current->comm, current->pid,
+		       regs->tpc);
+	show_regs(regs);
+}
+
+asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
+{
+	enum ctx_state prev_state = exception_enter();
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned int insn = 0;
+	int si_code, fault_code;
+	vm_fault_t fault;
+	unsigned long address, mm_rss;
+	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+	fault_code = get_thread_fault_code();
+
+	if (notify_page_fault(regs))
+		goto exit_exception;
+
+	si_code = SEGV_MAPERR;
+	address = current_thread_info()->fault_address;
+
+	if ((fault_code & FAULT_CODE_ITLB) &&
+	    (fault_code & FAULT_CODE_DTLB))
+		BUG();
+
+	if (test_thread_flag(TIF_32BIT)) {
+		if (!(regs->tstate & TSTATE_PRIV)) {
+			if (unlikely((regs->tpc >> 32) != 0)) {
+				bogus_32bit_fault_tpc(regs);
+				goto intr_or_no_mm;
+			}
+		}
+		if (unlikely((address >> 32) != 0))
+			goto intr_or_no_mm;
+	}
+
+	if (regs->tstate & TSTATE_PRIV) {
+		unsigned long tpc = regs->tpc;
+
+		/* Sanity check the PC. */
+		if ((tpc >= KERNBASE && tpc < (unsigned long) __init_end) ||
+		    (tpc >= MODULES_VADDR && tpc < MODULES_END)) {
+			/* Valid, no problems... */
+		} else {
+			bad_kernel_pc(regs, address);
+			goto exit_exception;
+		}
+	} else
+		flags |= FAULT_FLAG_USER;
+
+	/*
+	 * If we're in an interrupt or have no user
+	 * context, we must not take the fault..
+	 */
+	if (faulthandler_disabled() || !mm)
+		goto intr_or_no_mm;
+
+	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+
+	if (!down_read_trylock(&mm->mmap_sem)) {
+		if ((regs->tstate & TSTATE_PRIV) &&
+		    !search_exception_tables(regs->tpc)) {
+			insn = get_fault_insn(regs, insn);
+			goto handle_kernel_fault;
+		}
+
+retry:
+		down_read(&mm->mmap_sem);
+	}
+
+	if (fault_code & FAULT_CODE_BAD_RA)
+		goto do_sigbus;
+
+	vma = find_vma(mm, address);
+	if (!vma)
+		goto bad_area;
+
+	/* Pure DTLB misses do not tell us whether the fault causing
+	 * load/store/atomic was a write or not, it only says that there
+	 * was no match.  So in such a case we (carefully) read the
+	 * instruction to try and figure this out.  It's an optimization
+	 * so it's ok if we can't do this.
+	 *
+	 * Special hack, window spill/fill knows the exact fault type.
+	 */
+	if (((fault_code &
+	      (FAULT_CODE_DTLB | FAULT_CODE_WRITE | FAULT_CODE_WINFIXUP)) == FAULT_CODE_DTLB) &&
+	    (vma->vm_flags & VM_WRITE) != 0) {
+		insn = get_fault_insn(regs, 0);
+		if (!insn)
+			goto continue_fault;
+		/* All loads, stores and atomics have bits 30 and 31 both set
+		 * in the instruction.  Bit 21 is set in all stores, but we
+		 * have to avoid prefetches which also have bit 21 set.
+		 */
+		if ((insn & 0xc0200000) == 0xc0200000 &&
+		    (insn & 0x01780000) != 0x01680000) {
+			/* Don't bother updating thread struct value,
+			 * because update_mmu_cache only cares which tlb
+			 * the access came from.
+			 */
+			fault_code |= FAULT_CODE_WRITE;
+		}
+	}
+continue_fault:
+
+	if (vma->vm_start <= address)
+		goto good_area;
+	if (!(vma->vm_flags & VM_GROWSDOWN))
+		goto bad_area;
+	if (!(fault_code & FAULT_CODE_WRITE)) {
+		/* Non-faulting loads shouldn't expand stack. */
+		insn = get_fault_insn(regs, insn);
+		if ((insn & 0xc0800000) == 0xc0800000) {
+			unsigned char asi;
+
+			if (insn & 0x2000)
+				asi = (regs->tstate >> 24);
+			else
+				asi = (insn >> 5);
+			if ((asi & 0xf2) == 0x82)
+				goto bad_area;
+		}
+	}
+	if (expand_stack(vma, address))
+		goto bad_area;
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it..
+	 */
+good_area:
+	si_code = SEGV_ACCERR;
+
+	/* If we took a ITLB miss on a non-executable page, catch
+	 * that here.
+	 */
+	if ((fault_code & FAULT_CODE_ITLB) && !(vma->vm_flags & VM_EXEC)) {
+		WARN(address != regs->tpc,
+		     "address (%lx) != regs->tpc (%lx)\n", address, regs->tpc);
+		WARN_ON(regs->tstate & TSTATE_PRIV);
+		goto bad_area;
+	}
+
+	if (fault_code & FAULT_CODE_WRITE) {
+		if (!(vma->vm_flags & VM_WRITE))
+			goto bad_area;
+
+		/* Spitfire has an icache which does not snoop
+		 * processor stores.  Later processors do...
+		 */
+		if (tlb_type == spitfire &&
+		    (vma->vm_flags & VM_EXEC) != 0 &&
+		    vma->vm_file != NULL)
+			set_thread_fault_code(fault_code |
+					      FAULT_CODE_BLKCOMMIT);
+
+		flags |= FAULT_FLAG_WRITE;
+	} else {
+		/* Allow reads even for write-only mappings */
+		if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+			goto bad_area;
+	}
+
+	fault = handle_mm_fault(vma, address, flags);
+
+	if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+		goto exit_exception;
+
+	if (unlikely(fault & VM_FAULT_ERROR)) {
+		if (fault & VM_FAULT_OOM)
+			goto out_of_memory;
+		else if (fault & VM_FAULT_SIGSEGV)
+			goto bad_area;
+		else if (fault & VM_FAULT_SIGBUS)
+			goto do_sigbus;
+		BUG();
+	}
+
+	if (flags & FAULT_FLAG_ALLOW_RETRY) {
+		if (fault & VM_FAULT_MAJOR) {
+			current->maj_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
+				      1, regs, address);
+		} else {
+			current->min_flt++;
+			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
+				      1, regs, address);
+		}
+		if (fault & VM_FAULT_RETRY) {
+			flags &= ~FAULT_FLAG_ALLOW_RETRY;
+			flags |= FAULT_FLAG_TRIED;
+
+			/* No need to up_read(&mm->mmap_sem) as we would
+			 * have already released it in __lock_page_or_retry
+			 * in mm/filemap.c.
+			 */
+
+			goto retry;
+		}
+	}
+	up_read(&mm->mmap_sem);
+
+	mm_rss = get_mm_rss(mm);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE));
+#endif
+	if (unlikely(mm_rss >
+		     mm->context.tsb_block[MM_TSB_BASE].tsb_rss_limit))
+		tsb_grow(mm, MM_TSB_BASE, mm_rss);
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count;
+	mm_rss *= REAL_HPAGE_PER_HPAGE;
+	if (unlikely(mm_rss >
+		     mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) {
+		if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
+			tsb_grow(mm, MM_TSB_HUGE, mm_rss);
+		else
+			hugetlb_setup(regs);
+
+	}
+#endif
+exit_exception:
+	exception_exit(prev_state);
+	return;
+
+	/*
+	 * Something tried to access memory that isn't in our memory map..
+	 * Fix it, but check if it's kernel or user first..
+	 */
+bad_area:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+
+handle_kernel_fault:
+	do_kernel_fault(regs, si_code, fault_code, insn, address);
+	goto exit_exception;
+
+/*
+ * We ran out of memory, or some other thing happened to us that made
+ * us unable to handle the page fault gracefully.
+ */
+out_of_memory:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+	if (!(regs->tstate & TSTATE_PRIV)) {
+		pagefault_out_of_memory();
+		goto exit_exception;
+	}
+	goto handle_kernel_fault;
+
+intr_or_no_mm:
+	insn = get_fault_insn(regs, 0);
+	goto handle_kernel_fault;
+
+do_sigbus:
+	insn = get_fault_insn(regs, insn);
+	up_read(&mm->mmap_sem);
+
+	/*
+	 * Send a sigbus, regardless of whether we were in kernel
+	 * or user mode.
+	 */
+	do_fault_siginfo(BUS_ADRERR, SIGBUS, regs, address, insn, fault_code);
+
+	/* Kernel mode? Handle exceptions or die */
+	if (regs->tstate & TSTATE_PRIV)
+		goto handle_kernel_fault;
+}
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
new file mode 100644
index 0000000..aee6dba
--- /dev/null
+++ b/arch/sparc/mm/gup.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Lockless get_user_pages_fast for sparc, cribbed from powerpc
+ *
+ * Copyright (C) 2008 Nick Piggin
+ * Copyright (C) 2008 Novell Inc.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/vmstat.h>
+#include <linux/pagemap.h>
+#include <linux/rwsem.h>
+#include <asm/pgtable.h>
+#include <asm/adi.h>
+
+/*
+ * The performance critical leaf functions are made noinline otherwise gcc
+ * inlines everything into a single function which results in too much
+ * register pressure.
+ */
+static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
+		unsigned long end, int write, struct page **pages, int *nr)
+{
+	unsigned long mask, result;
+	pte_t *ptep;
+
+	if (tlb_type == hypervisor) {
+		result = _PAGE_PRESENT_4V|_PAGE_P_4V;
+		if (write)
+			result |= _PAGE_WRITE_4V;
+	} else {
+		result = _PAGE_PRESENT_4U|_PAGE_P_4U;
+		if (write)
+			result |= _PAGE_WRITE_4U;
+	}
+	mask = result | _PAGE_SPECIAL;
+
+	ptep = pte_offset_kernel(&pmd, addr);
+	do {
+		struct page *page, *head;
+		pte_t pte = *ptep;
+
+		if ((pte_val(pte) & mask) != result)
+			return 0;
+		VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+
+		/* The hugepage case is simplified on sparc64 because
+		 * we encode the sub-page pfn offsets into the
+		 * hugepage PTEs.  We could optimize this in the future
+		 * use page_cache_add_speculative() for the hugepage case.
+		 */
+		page = pte_page(pte);
+		head = compound_head(page);
+		if (!page_cache_get_speculative(head))
+			return 0;
+		if (unlikely(pte_val(pte) != pte_val(*ptep))) {
+			put_page(head);
+			return 0;
+		}
+
+		pages[*nr] = page;
+		(*nr)++;
+	} while (ptep++, addr += PAGE_SIZE, addr != end);
+
+	return 1;
+}
+
+static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
+			unsigned long end, int write, struct page **pages,
+			int *nr)
+{
+	struct page *head, *page;
+	int refs;
+
+	if (!(pmd_val(pmd) & _PAGE_VALID))
+		return 0;
+
+	if (write && !pmd_write(pmd))
+		return 0;
+
+	refs = 0;
+	page = pmd_page(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+	head = compound_head(page);
+	do {
+		VM_BUG_ON(compound_head(page) != head);
+		pages[*nr] = page;
+		(*nr)++;
+		page++;
+		refs++;
+	} while (addr += PAGE_SIZE, addr != end);
+
+	if (!page_cache_add_speculative(head, refs)) {
+		*nr -= refs;
+		return 0;
+	}
+
+	if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
+		*nr -= refs;
+		while (refs--)
+			put_page(head);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
+			unsigned long end, int write, struct page **pages,
+			int *nr)
+{
+	struct page *head, *page;
+	int refs;
+
+	if (!(pud_val(pud) & _PAGE_VALID))
+		return 0;
+
+	if (write && !pud_write(pud))
+		return 0;
+
+	refs = 0;
+	page = pud_page(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+	head = compound_head(page);
+	do {
+		VM_BUG_ON(compound_head(page) != head);
+		pages[*nr] = page;
+		(*nr)++;
+		page++;
+		refs++;
+	} while (addr += PAGE_SIZE, addr != end);
+
+	if (!page_cache_add_speculative(head, refs)) {
+		*nr -= refs;
+		return 0;
+	}
+
+	if (unlikely(pud_val(pud) != pud_val(*pudp))) {
+		*nr -= refs;
+		while (refs--)
+			put_page(head);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
+		int write, struct page **pages, int *nr)
+{
+	unsigned long next;
+	pmd_t *pmdp;
+
+	pmdp = pmd_offset(&pud, addr);
+	do {
+		pmd_t pmd = *pmdp;
+
+		next = pmd_addr_end(addr, end);
+		if (pmd_none(pmd))
+			return 0;
+		if (unlikely(pmd_large(pmd))) {
+			if (!gup_huge_pmd(pmdp, pmd, addr, next,
+					  write, pages, nr))
+				return 0;
+		} else if (!gup_pte_range(pmd, addr, next, write,
+					  pages, nr))
+			return 0;
+	} while (pmdp++, addr = next, addr != end);
+
+	return 1;
+}
+
+static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
+		int write, struct page **pages, int *nr)
+{
+	unsigned long next;
+	pud_t *pudp;
+
+	pudp = pud_offset(&pgd, addr);
+	do {
+		pud_t pud = *pudp;
+
+		next = pud_addr_end(addr, end);
+		if (pud_none(pud))
+			return 0;
+		if (unlikely(pud_large(pud))) {
+			if (!gup_huge_pud(pudp, pud, addr, next,
+					  write, pages, nr))
+				return 0;
+		} else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
+			return 0;
+	} while (pudp++, addr = next, addr != end);
+
+	return 1;
+}
+
+/*
+ * Note a difference with get_user_pages_fast: this always returns the
+ * number of pages pinned, 0 if no pages were pinned.
+ */
+int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
+			  struct page **pages)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long addr, len, end;
+	unsigned long next, flags;
+	pgd_t *pgdp;
+	int nr = 0;
+
+#ifdef CONFIG_SPARC64
+	if (adi_capable()) {
+		long addr = start;
+
+		/* If userspace has passed a versioned address, kernel
+		 * will not find it in the VMAs since it does not store
+		 * the version tags in the list of VMAs. Storing version
+		 * tags in list of VMAs is impractical since they can be
+		 * changed any time from userspace without dropping into
+		 * kernel. Any address search in VMAs will be done with
+		 * non-versioned addresses. Ensure the ADI version bits
+		 * are dropped here by sign extending the last bit before
+		 * ADI bits. IOMMU does not implement version tags.
+		 */
+		addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
+		start = addr;
+	}
+#endif
+	start &= PAGE_MASK;
+	addr = start;
+	len = (unsigned long) nr_pages << PAGE_SHIFT;
+	end = start + len;
+
+	local_irq_save(flags);
+	pgdp = pgd_offset(mm, addr);
+	do {
+		pgd_t pgd = *pgdp;
+
+		next = pgd_addr_end(addr, end);
+		if (pgd_none(pgd))
+			break;
+		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+			break;
+	} while (pgdp++, addr = next, addr != end);
+	local_irq_restore(flags);
+
+	return nr;
+}
+
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+			struct page **pages)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long addr, len, end;
+	unsigned long next;
+	pgd_t *pgdp;
+	int nr = 0;
+
+#ifdef CONFIG_SPARC64
+	if (adi_capable()) {
+		long addr = start;
+
+		/* If userspace has passed a versioned address, kernel
+		 * will not find it in the VMAs since it does not store
+		 * the version tags in the list of VMAs. Storing version
+		 * tags in list of VMAs is impractical since they can be
+		 * changed any time from userspace without dropping into
+		 * kernel. Any address search in VMAs will be done with
+		 * non-versioned addresses. Ensure the ADI version bits
+		 * are dropped here by sign extending the last bit before
+		 * ADI bits. IOMMU does not implements version tags,
+		 */
+		addr = (addr << (long)adi_nbits()) >> (long)adi_nbits();
+		start = addr;
+	}
+#endif
+	start &= PAGE_MASK;
+	addr = start;
+	len = (unsigned long) nr_pages << PAGE_SHIFT;
+	end = start + len;
+
+	/*
+	 * XXX: batch / limit 'nr', to avoid large irq off latency
+	 * needs some instrumenting to determine the common sizes used by
+	 * important workloads (eg. DB2), and whether limiting the batch size
+	 * will decrease performance.
+	 *
+	 * It seems like we're in the clear for the moment. Direct-IO is
+	 * the main guy that batches up lots of get_user_pages, and even
+	 * they are limited to 64-at-a-time which is not so many.
+	 */
+	/*
+	 * This doesn't prevent pagetable teardown, but does prevent
+	 * the pagetables from being freed on sparc.
+	 *
+	 * So long as we atomically load page table pointers versus teardown,
+	 * we can follow the address down to the the page and take a ref on it.
+	 */
+	local_irq_disable();
+
+	pgdp = pgd_offset(mm, addr);
+	do {
+		pgd_t pgd = *pgdp;
+
+		next = pgd_addr_end(addr, end);
+		if (pgd_none(pgd))
+			goto slow;
+		if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
+			goto slow;
+	} while (pgdp++, addr = next, addr != end);
+
+	local_irq_enable();
+
+	VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
+	return nr;
+
+	{
+		int ret;
+
+slow:
+		local_irq_enable();
+
+		/* Try to get the remaining pages with get_user_pages */
+		start += nr << PAGE_SHIFT;
+		pages += nr;
+
+		ret = get_user_pages_unlocked(start,
+			(end - start) >> PAGE_SHIFT, pages,
+			write ? FOLL_WRITE : 0);
+
+		/* Have to be a bit careful with return values */
+		if (nr > 0) {
+			if (ret < 0)
+				ret = nr;
+			else
+				ret += nr;
+		}
+
+		return ret;
+	}
+}
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
new file mode 100644
index 0000000..86bc2a5
--- /dev/null
+++ b/arch/sparc/mm/highmem.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  highmem.c: virtual kernel memory mappings for high memory
+ *
+ *  Provides kernel-static versions of atomic kmap functions originally
+ *  found as inlines in include/asm-sparc/highmem.h.  These became
+ *  needed as kmap_atomic() and kunmap_atomic() started getting
+ *  called from within modules.
+ *  -- Tomas Szepe <szepe@pinerecords.com>, September 2002
+ *
+ *  But kmap_atomic() and kunmap_atomic() cannot be inlined in
+ *  modules because they are loaded with btfixup-ped functions.
+ */
+
+/*
+ * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+ * gives a more generic (and caching) interface. But kmap_atomic can
+ * be used in IRQ contexts, so in some (very limited) cases we need it.
+ *
+ * XXX This is an old text. Actually, it's good to use atomic kmaps,
+ * provided you remember that they are atomic and not try to sleep
+ * with a kmap taken, much like a spinlock. Non-atomic kmaps are
+ * shared by CPUs, and so precious, and establishing them requires IPI.
+ * Atomic kmaps are lightweight and we may have NCPUS more of them.
+ */
+#include <linux/highmem.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm/vaddrs.h>
+
+pgprot_t kmap_prot;
+
+static pte_t *kmap_pte;
+
+void __init kmap_init(void)
+{
+	unsigned long address;
+	pmd_t *dir;
+
+	address = __fix_to_virt(FIX_KMAP_BEGIN);
+	dir = pmd_offset(pgd_offset_k(address), address);
+
+        /* cache the first kmap pte */
+        kmap_pte = pte_offset_kernel(dir, address);
+        kmap_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE);
+}
+
+void *kmap_atomic(struct page *page)
+{
+	unsigned long vaddr;
+	long idx, type;
+
+	preempt_disable();
+	pagefault_disable();
+	if (!PageHighMem(page))
+		return page_address(page);
+
+	type = kmap_atomic_idx_push();
+	idx = type + KM_TYPE_NR*smp_processor_id();
+	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+/* XXX Fix - Anton */
+#if 0
+	__flush_cache_one(vaddr);
+#else
+	flush_cache_all();
+#endif
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+	BUG_ON(!pte_none(*(kmap_pte-idx)));
+#endif
+	set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
+/* XXX Fix - Anton */
+#if 0
+	__flush_tlb_one(vaddr);
+#else
+	flush_tlb_all();
+#endif
+
+	return (void*) vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+	int type;
+
+	if (vaddr < FIXADDR_START) { // FIXME
+		pagefault_enable();
+		preempt_enable();
+		return;
+	}
+
+	type = kmap_atomic_idx();
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+	{
+		unsigned long idx;
+
+		idx = type + KM_TYPE_NR * smp_processor_id();
+		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+
+		/* XXX Fix - Anton */
+#if 0
+		__flush_cache_one(vaddr);
+#else
+		flush_cache_all();
+#endif
+
+		/*
+		 * force other mappings to Oops if they'll try to access
+		 * this pte without first remap it
+		 */
+		pte_clear(&init_mm, vaddr, kmap_pte-idx);
+		/* XXX Fix - Anton */
+#if 0
+		__flush_tlb_one(vaddr);
+#else
+		flush_tlb_all();
+#endif
+	}
+#endif
+
+	kmap_atomic_idx_pop();
+	pagefault_enable();
+	preempt_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
new file mode 100644
index 0000000..f78793a
--- /dev/null
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -0,0 +1,520 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPARC64 Huge TLB page support.
+ *
+ * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+/* Slightly simplified from the non-hugepage variant because by
+ * definition we don't have to worry about any page coloring stuff
+ */
+
+static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
+							unsigned long addr,
+							unsigned long len,
+							unsigned long pgoff,
+							unsigned long flags)
+{
+	struct hstate *h = hstate_file(filp);
+	unsigned long task_size = TASK_SIZE;
+	struct vm_unmapped_area_info info;
+
+	if (test_thread_flag(TIF_32BIT))
+		task_size = STACK_TOP32;
+
+	info.flags = 0;
+	info.length = len;
+	info.low_limit = TASK_UNMAPPED_BASE;
+	info.high_limit = min(task_size, VA_EXCLUDE_START);
+	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	info.align_offset = 0;
+	addr = vm_unmapped_area(&info);
+
+	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
+		VM_BUG_ON(addr != -ENOMEM);
+		info.low_limit = VA_EXCLUDE_END;
+		info.high_limit = task_size;
+		addr = vm_unmapped_area(&info);
+	}
+
+	return addr;
+}
+
+static unsigned long
+hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+				  const unsigned long len,
+				  const unsigned long pgoff,
+				  const unsigned long flags)
+{
+	struct hstate *h = hstate_file(filp);
+	struct mm_struct *mm = current->mm;
+	unsigned long addr = addr0;
+	struct vm_unmapped_area_info info;
+
+	/* This should only ever run for 32-bit processes.  */
+	BUG_ON(!test_thread_flag(TIF_32BIT));
+
+	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+	info.length = len;
+	info.low_limit = PAGE_SIZE;
+	info.high_limit = mm->mmap_base;
+	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
+	info.align_offset = 0;
+	addr = vm_unmapped_area(&info);
+
+	/*
+	 * A failed mmap() very likely causes application failure,
+	 * so fall back to the bottom-up function here. This scenario
+	 * can happen with large stack limits and large mmap()
+	 * allocations.
+	 */
+	if (addr & ~PAGE_MASK) {
+		VM_BUG_ON(addr != -ENOMEM);
+		info.flags = 0;
+		info.low_limit = TASK_UNMAPPED_BASE;
+		info.high_limit = STACK_TOP32;
+		addr = vm_unmapped_area(&info);
+	}
+
+	return addr;
+}
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+		unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+	struct hstate *h = hstate_file(file);
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma;
+	unsigned long task_size = TASK_SIZE;
+
+	if (test_thread_flag(TIF_32BIT))
+		task_size = STACK_TOP32;
+
+	if (len & ~huge_page_mask(h))
+		return -EINVAL;
+	if (len > task_size)
+		return -ENOMEM;
+
+	if (flags & MAP_FIXED) {
+		if (prepare_hugepage_range(file, addr, len))
+			return -EINVAL;
+		return addr;
+	}
+
+	if (addr) {
+		addr = ALIGN(addr, huge_page_size(h));
+		vma = find_vma(mm, addr);
+		if (task_size - len >= addr &&
+		    (!vma || addr + len <= vm_start_gap(vma)))
+			return addr;
+	}
+	if (mm->get_unmapped_area == arch_get_unmapped_area)
+		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
+				pgoff, flags);
+	else
+		return hugetlb_get_unmapped_area_topdown(file, addr, len,
+				pgoff, flags);
+}
+
+static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+	return entry;
+}
+
+static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
+
+	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
+
+	switch (shift) {
+	case HPAGE_16GB_SHIFT:
+		hugepage_size = _PAGE_SZ16GB_4V;
+		pte_val(entry) |= _PAGE_PUD_HUGE;
+		break;
+	case HPAGE_2GB_SHIFT:
+		hugepage_size = _PAGE_SZ2GB_4V;
+		pte_val(entry) |= _PAGE_PMD_HUGE;
+		break;
+	case HPAGE_256MB_SHIFT:
+		hugepage_size = _PAGE_SZ256MB_4V;
+		pte_val(entry) |= _PAGE_PMD_HUGE;
+		break;
+	case HPAGE_SHIFT:
+		pte_val(entry) |= _PAGE_PMD_HUGE;
+		break;
+	case HPAGE_64K_SHIFT:
+		hugepage_size = _PAGE_SZ64K_4V;
+		break;
+	default:
+		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
+	}
+
+	pte_val(entry) = pte_val(entry) | hugepage_size;
+	return entry;
+}
+
+static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
+{
+	if (tlb_type == hypervisor)
+		return sun4v_hugepage_shift_to_tte(entry, shift);
+	else
+		return sun4u_hugepage_shift_to_tte(entry, shift);
+}
+
+pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
+			 struct page *page, int writeable)
+{
+	unsigned int shift = huge_page_shift(hstate_vma(vma));
+	pte_t pte;
+
+	pte = hugepage_shift_to_tte(entry, shift);
+
+#ifdef CONFIG_SPARC64
+	/* If this vma has ADI enabled on it, turn on TTE.mcd
+	 */
+	if (vma->vm_flags & VM_SPARC_ADI)
+		return pte_mkmcd(pte);
+	else
+		return pte_mknotmcd(pte);
+#else
+	return pte;
+#endif
+}
+
+static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
+{
+	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
+	unsigned int shift;
+
+	switch (tte_szbits) {
+	case _PAGE_SZ16GB_4V:
+		shift = HPAGE_16GB_SHIFT;
+		break;
+	case _PAGE_SZ2GB_4V:
+		shift = HPAGE_2GB_SHIFT;
+		break;
+	case _PAGE_SZ256MB_4V:
+		shift = HPAGE_256MB_SHIFT;
+		break;
+	case _PAGE_SZ4MB_4V:
+		shift = REAL_HPAGE_SHIFT;
+		break;
+	case _PAGE_SZ64K_4V:
+		shift = HPAGE_64K_SHIFT;
+		break;
+	default:
+		shift = PAGE_SHIFT;
+		break;
+	}
+	return shift;
+}
+
+static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
+{
+	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
+	unsigned int shift;
+
+	switch (tte_szbits) {
+	case _PAGE_SZ256MB_4U:
+		shift = HPAGE_256MB_SHIFT;
+		break;
+	case _PAGE_SZ4MB_4U:
+		shift = REAL_HPAGE_SHIFT;
+		break;
+	case _PAGE_SZ64K_4U:
+		shift = HPAGE_64K_SHIFT;
+		break;
+	default:
+		shift = PAGE_SHIFT;
+		break;
+	}
+	return shift;
+}
+
+static unsigned int huge_tte_to_shift(pte_t entry)
+{
+	unsigned long shift;
+
+	if (tlb_type == hypervisor)
+		shift = sun4v_huge_tte_to_shift(entry);
+	else
+		shift = sun4u_huge_tte_to_shift(entry);
+
+	if (shift == PAGE_SHIFT)
+		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
+			  pte_val(entry));
+
+	return shift;
+}
+
+static unsigned long huge_tte_to_size(pte_t pte)
+{
+	unsigned long size = 1UL << huge_tte_to_shift(pte);
+
+	if (size == REAL_HPAGE_SIZE)
+		size = HPAGE_SIZE;
+	return size;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+			unsigned long addr, unsigned long sz)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, addr);
+	pud = pud_alloc(mm, pgd, addr);
+	if (!pud)
+		return NULL;
+	if (sz >= PUD_SIZE)
+		return (pte_t *)pud;
+	pmd = pmd_alloc(mm, pud, addr);
+	if (!pmd)
+		return NULL;
+	if (sz >= PMD_SIZE)
+		return (pte_t *)pmd;
+	return pte_alloc_map(mm, pmd, addr);
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm,
+		       unsigned long addr, unsigned long sz)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset(mm, addr);
+	if (pgd_none(*pgd))
+		return NULL;
+	pud = pud_offset(pgd, addr);
+	if (pud_none(*pud))
+		return NULL;
+	if (is_hugetlb_pud(*pud))
+		return (pte_t *)pud;
+	pmd = pmd_offset(pud, addr);
+	if (pmd_none(*pmd))
+		return NULL;
+	if (is_hugetlb_pmd(*pmd))
+		return (pte_t *)pmd;
+	return pte_offset_map(pmd, addr);
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+		     pte_t *ptep, pte_t entry)
+{
+	unsigned int nptes, orig_shift, shift;
+	unsigned long i, size;
+	pte_t orig;
+
+	size = huge_tte_to_size(entry);
+
+	shift = PAGE_SHIFT;
+	if (size >= PUD_SIZE)
+		shift = PUD_SHIFT;
+	else if (size >= PMD_SIZE)
+		shift = PMD_SHIFT;
+	else
+		shift = PAGE_SHIFT;
+
+	nptes = size >> shift;
+
+	if (!pte_present(*ptep) && pte_present(entry))
+		mm->context.hugetlb_pte_count += nptes;
+
+	addr &= ~(size - 1);
+	orig = *ptep;
+	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
+
+	for (i = 0; i < nptes; i++)
+		ptep[i] = __pte(pte_val(entry) + (i << shift));
+
+	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
+	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
+	if (size == HPAGE_SIZE)
+		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
+				    orig_shift);
+}
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+			      pte_t *ptep)
+{
+	unsigned int i, nptes, orig_shift, shift;
+	unsigned long size;
+	pte_t entry;
+
+	entry = *ptep;
+	size = huge_tte_to_size(entry);
+
+	shift = PAGE_SHIFT;
+	if (size >= PUD_SIZE)
+		shift = PUD_SHIFT;
+	else if (size >= PMD_SIZE)
+		shift = PMD_SHIFT;
+	else
+		shift = PAGE_SHIFT;
+
+	nptes = size >> shift;
+	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
+
+	if (pte_present(entry))
+		mm->context.hugetlb_pte_count -= nptes;
+
+	addr &= ~(size - 1);
+	for (i = 0; i < nptes; i++)
+		ptep[i] = __pte(0UL);
+
+	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
+	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
+	if (size == HPAGE_SIZE)
+		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
+				    orig_shift);
+
+	return entry;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+	return !pmd_none(pmd) &&
+		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
+}
+
+int pud_huge(pud_t pud)
+{
+	return !pud_none(pud) &&
+		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
+}
+
+static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+			   unsigned long addr)
+{
+	pgtable_t token = pmd_pgtable(*pmd);
+
+	pmd_clear(pmd);
+	pte_free_tlb(tlb, token, addr);
+	mm_dec_nr_ptes(tlb->mm);
+}
+
+static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
+				   unsigned long addr, unsigned long end,
+				   unsigned long floor, unsigned long ceiling)
+{
+	pmd_t *pmd;
+	unsigned long next;
+	unsigned long start;
+
+	start = addr;
+	pmd = pmd_offset(pud, addr);
+	do {
+		next = pmd_addr_end(addr, end);
+		if (pmd_none(*pmd))
+			continue;
+		if (is_hugetlb_pmd(*pmd))
+			pmd_clear(pmd);
+		else
+			hugetlb_free_pte_range(tlb, pmd, addr);
+	} while (pmd++, addr = next, addr != end);
+
+	start &= PUD_MASK;
+	if (start < floor)
+		return;
+	if (ceiling) {
+		ceiling &= PUD_MASK;
+		if (!ceiling)
+			return;
+	}
+	if (end - 1 > ceiling - 1)
+		return;
+
+	pmd = pmd_offset(pud, start);
+	pud_clear(pud);
+	pmd_free_tlb(tlb, pmd, start);
+	mm_dec_nr_pmds(tlb->mm);
+}
+
+static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
+				   unsigned long addr, unsigned long end,
+				   unsigned long floor, unsigned long ceiling)
+{
+	pud_t *pud;
+	unsigned long next;
+	unsigned long start;
+
+	start = addr;
+	pud = pud_offset(pgd, addr);
+	do {
+		next = pud_addr_end(addr, end);
+		if (pud_none_or_clear_bad(pud))
+			continue;
+		if (is_hugetlb_pud(*pud))
+			pud_clear(pud);
+		else
+			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
+					       ceiling);
+	} while (pud++, addr = next, addr != end);
+
+	start &= PGDIR_MASK;
+	if (start < floor)
+		return;
+	if (ceiling) {
+		ceiling &= PGDIR_MASK;
+		if (!ceiling)
+			return;
+	}
+	if (end - 1 > ceiling - 1)
+		return;
+
+	pud = pud_offset(pgd, start);
+	pgd_clear(pgd);
+	pud_free_tlb(tlb, pud, start);
+	mm_dec_nr_puds(tlb->mm);
+}
+
+void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+			    unsigned long addr, unsigned long end,
+			    unsigned long floor, unsigned long ceiling)
+{
+	pgd_t *pgd;
+	unsigned long next;
+
+	addr &= PMD_MASK;
+	if (addr < floor) {
+		addr += PMD_SIZE;
+		if (!addr)
+			return;
+	}
+	if (ceiling) {
+		ceiling &= PMD_MASK;
+		if (!ceiling)
+			return;
+	}
+	if (end - 1 > ceiling - 1)
+		end -= PMD_SIZE;
+	if (addr > end - 1)
+		return;
+
+	pgd = pgd_offset(tlb->mm, addr);
+	do {
+		next = pgd_addr_end(addr, end);
+		if (pgd_none_or_clear_bad(pgd))
+			continue;
+		hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
+	} while (pgd++, addr = next, addr != end);
+}
diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S
new file mode 100644
index 0000000..66885a8
--- /dev/null
+++ b/arch/sparc/mm/hypersparc.S
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * hypersparc.S: High speed Hypersparc mmu/cache operations.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asm-offsets.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <linux/init.h>
+
+	.text
+	.align	4
+
+	.globl	hypersparc_flush_cache_all, hypersparc_flush_cache_mm
+	.globl	hypersparc_flush_cache_range, hypersparc_flush_cache_page
+	.globl	hypersparc_flush_page_to_ram
+	.globl	hypersparc_flush_page_for_dma, hypersparc_flush_sig_insns
+	.globl	hypersparc_flush_tlb_all, hypersparc_flush_tlb_mm
+	.globl	hypersparc_flush_tlb_range, hypersparc_flush_tlb_page
+
+hypersparc_flush_cache_all:
+	WINDOW_FLUSH(%g4, %g5)
+	sethi	%hi(vac_cache_size), %g4
+	ld	[%g4 + %lo(vac_cache_size)], %g5
+	sethi	%hi(vac_line_size), %g1
+	ld	[%g1 + %lo(vac_line_size)], %g2
+1:	
+	subcc	%g5, %g2, %g5			! hyper_flush_unconditional_combined
+	bne	1b
+	 sta	%g0, [%g5] ASI_M_FLUSH_CTX
+	retl
+	 sta	%g0, [%g0] ASI_M_FLUSH_IWHOLE	! hyper_flush_whole_icache
+
+	/* We expand the window flush to get maximum performance. */
+hypersparc_flush_cache_mm:
+#ifndef CONFIG_SMP
+	ld	[%o0 + AOFF_mm_context], %g1
+	cmp	%g1, -1
+	be	hypersparc_flush_cache_mm_out
+#endif
+	WINDOW_FLUSH(%g4, %g5)
+
+	sethi	%hi(vac_line_size), %g1
+	ld	[%g1 + %lo(vac_line_size)], %o1
+	sethi	%hi(vac_cache_size), %g2
+	ld	[%g2 + %lo(vac_cache_size)], %o0
+	add	%o1, %o1, %g1
+	add	%o1, %g1, %g2
+	add	%o1, %g2, %g3
+	add	%o1, %g3, %g4
+	add	%o1, %g4, %g5
+	add	%o1, %g5, %o4
+	add	%o1, %o4, %o5
+
+	/* BLAMMO! */
+1:
+	subcc	%o0, %o5, %o0				! hyper_flush_cache_user
+	sta	%g0, [%o0 + %g0] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %o1] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %g1] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %g2] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %g3] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %g4] ASI_M_FLUSH_USER
+	sta	%g0, [%o0 + %g5] ASI_M_FLUSH_USER
+	bne	1b
+	 sta	%g0, [%o0 + %o4] ASI_M_FLUSH_USER
+hypersparc_flush_cache_mm_out:
+	retl
+	 nop
+
+	/* The things we do for performance... */
+hypersparc_flush_cache_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+#ifndef CONFIG_SMP
+	ld	[%o0 + AOFF_mm_context], %g1
+	cmp	%g1, -1
+	be	hypersparc_flush_cache_range_out
+#endif
+	WINDOW_FLUSH(%g4, %g5)
+
+	sethi	%hi(vac_line_size), %g1
+	ld	[%g1 + %lo(vac_line_size)], %o4
+	sethi	%hi(vac_cache_size), %g2
+	ld	[%g2 + %lo(vac_cache_size)], %o3
+
+	/* Here comes the fun part... */
+	add	%o2, (PAGE_SIZE - 1), %o2
+	andn	%o1, (PAGE_SIZE - 1), %o1
+	add	%o4, %o4, %o5
+	andn	%o2, (PAGE_SIZE - 1), %o2
+	add	%o4, %o5, %g1
+	sub	%o2, %o1, %g4
+	add	%o4, %g1, %g2
+	sll	%o3, 2, %g5
+	add	%o4, %g2, %g3
+	cmp	%g4, %g5
+	add	%o4, %g3, %g4
+	blu	0f
+	 add	%o4, %g4, %g5
+	add	%o4, %g5, %g7
+
+	/* Flush entire user space, believe it or not this is quicker
+	 * than page at a time flushings for range > (cache_size<<2).
+	 */
+1:
+	subcc	%o3, %g7, %o3
+	sta	%g0, [%o3 + %g0] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %o4] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %o5] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %g1] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %g2] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %g3] ASI_M_FLUSH_USER
+	sta	%g0, [%o3 + %g4] ASI_M_FLUSH_USER
+	bne	1b
+	 sta	%g0, [%o3 + %g5] ASI_M_FLUSH_USER
+	retl
+	 nop
+
+	/* Below our threshold, flush one page at a time. */
+0:
+	ld	[%o0 + AOFF_mm_context], %o0
+	mov	SRMMU_CTX_REG, %g7
+	lda	[%g7] ASI_M_MMUREGS, %o3
+	sta	%o0, [%g7] ASI_M_MMUREGS
+	add	%o2, -PAGE_SIZE, %o0
+1:
+	or	%o0, 0x400, %g7
+	lda	[%g7] ASI_M_FLUSH_PROBE, %g7
+	orcc	%g7, 0, %g0
+	be,a	3f
+	 mov	%o0, %o2
+	add	%o4, %g5, %g7
+2:
+	sub	%o2, %g7, %o2
+	sta	%g0, [%o2 + %g0] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o2 + %o4] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o2 + %o5] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o2 + %g1] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o2 + %g2] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o2 + %g3] ASI_M_FLUSH_PAGE
+	andcc	%o2, 0xffc, %g0
+	sta	%g0, [%o2 + %g4] ASI_M_FLUSH_PAGE
+	bne	2b
+	 sta	%g0, [%o2 + %g5] ASI_M_FLUSH_PAGE
+3:
+	cmp	%o2, %o1
+	bne	1b
+	 add	%o2, -PAGE_SIZE, %o0
+	mov	SRMMU_FAULT_STATUS, %g5
+	lda	[%g5] ASI_M_MMUREGS, %g0
+	mov	SRMMU_CTX_REG, %g7
+	sta	%o3, [%g7] ASI_M_MMUREGS
+hypersparc_flush_cache_range_out:
+	retl
+	 nop
+
+	/* HyperSparc requires a valid mapping where we are about to flush
+	 * in order to check for a physical tag match during the flush.
+	 */
+	/* Verified, my ass... */
+hypersparc_flush_cache_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+	ld	[%o0 + AOFF_mm_context], %g2
+#ifndef CONFIG_SMP
+	cmp	%g2, -1
+	be	hypersparc_flush_cache_page_out
+#endif
+	WINDOW_FLUSH(%g4, %g5)
+
+	sethi	%hi(vac_line_size), %g1
+	ld	[%g1 + %lo(vac_line_size)], %o4
+	mov	SRMMU_CTX_REG, %o3
+	andn	%o1, (PAGE_SIZE - 1), %o1
+	lda	[%o3] ASI_M_MMUREGS, %o2
+	sta	%g2, [%o3] ASI_M_MMUREGS
+	or	%o1, 0x400, %o5
+	lda	[%o5] ASI_M_FLUSH_PROBE, %g1
+	orcc	%g0, %g1, %g0
+	be	2f
+	 add	%o4, %o4, %o5
+	sub	%o1, -PAGE_SIZE, %o1
+	add	%o4, %o5, %g1
+	add	%o4, %g1, %g2
+	add	%o4, %g2, %g3
+	add	%o4, %g3, %g4
+	add	%o4, %g4, %g5
+	add	%o4, %g5, %g7
+
+	/* BLAMMO! */
+1:
+	sub	%o1, %g7, %o1
+	sta	%g0, [%o1 + %g0] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g1] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g2] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+	andcc	%o1, 0xffc, %g0
+	sta	%g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+	bne	1b
+	 sta	%g0, [%o1 + %g5] ASI_M_FLUSH_PAGE
+2:
+	mov	SRMMU_FAULT_STATUS, %g7
+	mov	SRMMU_CTX_REG, %g4
+	lda	[%g7] ASI_M_MMUREGS, %g0
+	sta	%o2, [%g4] ASI_M_MMUREGS
+hypersparc_flush_cache_page_out:
+	retl
+	 nop
+
+hypersparc_flush_sig_insns:
+	flush	%o1
+	retl
+	 flush	%o1 + 4
+
+	/* HyperSparc is copy-back. */
+hypersparc_flush_page_to_ram:
+	sethi	%hi(vac_line_size), %g1
+	ld	[%g1 + %lo(vac_line_size)], %o4
+	andn	%o0, (PAGE_SIZE - 1), %o0
+	add	%o4, %o4, %o5
+	or	%o0, 0x400, %g7
+	lda	[%g7] ASI_M_FLUSH_PROBE, %g5
+	add	%o4, %o5, %g1
+	orcc	%g5, 0, %g0
+	be	2f
+	 add	%o4, %g1, %g2
+	add	%o4, %g2, %g3
+	sub	%o0, -PAGE_SIZE, %o0
+	add	%o4, %g3, %g4
+	add	%o4, %g4, %g5
+	add	%o4, %g5, %g7
+
+	/* BLAMMO! */
+1:
+	sub	%o0, %g7, %o0
+	sta	%g0, [%o0 + %g0] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o0 + %o4] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o0 + %o5] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o0 + %g1] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o0 + %g2] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o0 + %g3] ASI_M_FLUSH_PAGE
+	andcc	%o0, 0xffc, %g0
+	sta	%g0, [%o0 + %g4] ASI_M_FLUSH_PAGE
+	bne	1b
+	 sta	%g0, [%o0 + %g5] ASI_M_FLUSH_PAGE
+2:
+	mov	SRMMU_FAULT_STATUS, %g1
+	retl
+	 lda	[%g1] ASI_M_MMUREGS, %g0
+
+	/* HyperSparc is IO cache coherent. */
+hypersparc_flush_page_for_dma:
+	retl
+	 nop
+
+	/* It was noted that at boot time a TLB flush all in a delay slot
+	 * can deliver an illegal instruction to the processor if the timing
+	 * is just right...
+	 */
+hypersparc_flush_tlb_all:
+	mov	0x400, %g1
+	sta	%g0, [%g1] ASI_M_FLUSH_PROBE
+	retl
+	 nop
+
+hypersparc_flush_tlb_mm:
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o1
+	lda	[%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+	cmp	%o1, -1
+	be	hypersparc_flush_tlb_mm_out
+#endif
+	 mov	0x300, %g2
+	sta	%o1, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%g2] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_mm_out:
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+
+hypersparc_flush_tlb_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	lda	[%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+	cmp	%o3, -1
+	be	hypersparc_flush_tlb_range_out
+#endif
+	 sethi	%hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	and	%o1, %o4, %o1
+	add	%o1, 0x200, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+1:
+	sub	%o1, %o4, %o1
+	cmp	%o1, %o2
+	blu,a	1b
+	 sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_range_out:
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+
+hypersparc_flush_tlb_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	andn	%o1, (PAGE_SIZE - 1), %o1
+#ifndef CONFIG_SMP
+	cmp	%o3, -1
+	be	hypersparc_flush_tlb_page_out
+#endif
+	 lda	[%g1] ASI_M_MMUREGS, %g5
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+hypersparc_flush_tlb_page_out:
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+
+	__INIT
+	
+	/* High speed page clear/copy. */
+hypersparc_bzero_1page:
+/* NOTE: This routine has to be shorter than 40insns --jj */
+	clr	%g1
+	mov	32, %g2
+	mov	64, %g3
+	mov	96, %g4
+	mov	128, %g5
+	mov	160, %g7
+	mov	192, %o2
+	mov	224, %o3
+	mov	16, %o1
+1:
+	stda	%g0, [%o0 + %g0] ASI_M_BFILL
+	stda	%g0, [%o0 + %g2] ASI_M_BFILL
+	stda	%g0, [%o0 + %g3] ASI_M_BFILL
+	stda	%g0, [%o0 + %g4] ASI_M_BFILL
+	stda	%g0, [%o0 + %g5] ASI_M_BFILL
+	stda	%g0, [%o0 + %g7] ASI_M_BFILL
+	stda	%g0, [%o0 + %o2] ASI_M_BFILL
+	stda	%g0, [%o0 + %o3] ASI_M_BFILL
+	subcc	%o1, 1, %o1
+	bne	1b
+	 add	%o0, 256, %o0
+
+	retl
+	 nop
+
+hypersparc_copy_1page:
+/* NOTE: This routine has to be shorter than 70insns --jj */
+	sub	%o1, %o0, %o2		! difference
+	mov	16, %g1
+1:
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	add	%o0, 32, %o0
+	sta	%o0, [%o0 + %o2] ASI_M_BCOPY
+	subcc	%g1, 1, %g1
+	bne	1b
+	 add	%o0, 32, %o0
+
+	retl
+	 nop
+
+	.globl	hypersparc_setup_blockops
+hypersparc_setup_blockops:
+	sethi	%hi(bzero_1page), %o0
+	or	%o0, %lo(bzero_1page), %o0
+	sethi	%hi(hypersparc_bzero_1page), %o1
+	or	%o1, %lo(hypersparc_bzero_1page), %o1
+	sethi	%hi(hypersparc_copy_1page), %o2
+	or	%o2, %lo(hypersparc_copy_1page), %o2
+	ld	[%o1], %o4
+1:
+	add	%o1, 4, %o1
+	st	%o4, [%o0]
+	add	%o0, 4, %o0
+	cmp	%o1, %o2
+	bne	1b
+	 ld	[%o1], %o4
+	sethi	%hi(__copy_1page), %o0
+	or	%o0, %lo(__copy_1page), %o0
+	sethi	%hi(hypersparc_setup_blockops), %o2
+	or	%o2, %lo(hypersparc_setup_blockops), %o2
+	ld	[%o1], %o4
+1:
+	add	%o1, 4, %o1
+	st	%o4, [%o0]
+	add	%o0, 4, %o0
+	cmp	%o1, %o2
+	bne	1b
+	 ld	[%o1], %o4
+	sta	%g0, [%g0] ASI_M_FLUSH_IWHOLE
+	retl
+	 nop
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
new file mode 100644
index 0000000..92634d4
--- /dev/null
+++ b/arch/sparc/mm/init_32.c
@@ -0,0 +1,318 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/sparc/mm/init.c
+ *
+ *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1995 Eddie C. Dost (ecd@skynet.be)
+ *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ *  Copyright (C) 2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/initrd.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/pagemap.h>
+#include <linux/poison.h>
+#include <linux/gfp.h>
+
+#include <asm/sections.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/vaddrs.h>
+#include <asm/pgalloc.h>	/* bug in asm-generic/tlb.h: check_pgt_cache */
+#include <asm/setup.h>
+#include <asm/tlb.h>
+#include <asm/prom.h>
+#include <asm/leon.h>
+
+#include "mm_32.h"
+
+unsigned long *sparc_valid_addr_bitmap;
+EXPORT_SYMBOL(sparc_valid_addr_bitmap);
+
+unsigned long phys_base;
+EXPORT_SYMBOL(phys_base);
+
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
+
+/* Initial ramdisk setup */
+extern unsigned int sparc_ramdisk_image;
+extern unsigned int sparc_ramdisk_size;
+
+unsigned long highstart_pfn, highend_pfn;
+
+unsigned long last_valid_pfn;
+
+unsigned long calc_highpages(void)
+{
+	int i;
+	int nr = 0;
+
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+		unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+
+		if (end_pfn <= max_low_pfn)
+			continue;
+
+		if (start_pfn < max_low_pfn)
+			start_pfn = max_low_pfn;
+
+		nr += end_pfn - start_pfn;
+	}
+
+	return nr;
+}
+
+static unsigned long calc_max_low_pfn(void)
+{
+	int i;
+	unsigned long tmp = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
+	unsigned long curr_pfn, last_pfn;
+
+	last_pfn = (sp_banks[0].base_addr + sp_banks[0].num_bytes) >> PAGE_SHIFT;
+	for (i = 1; sp_banks[i].num_bytes != 0; i++) {
+		curr_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+
+		if (curr_pfn >= tmp) {
+			if (last_pfn < tmp)
+				tmp = last_pfn;
+			break;
+		}
+
+		last_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+	}
+
+	return tmp;
+}
+
+static void __init find_ramdisk(unsigned long end_of_phys_memory)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+	unsigned long size;
+
+	/* Now have to check initial ramdisk, so that it won't pass
+	 * the end of memory
+	 */
+	if (sparc_ramdisk_image) {
+		if (sparc_ramdisk_image >= (unsigned long)&_end - 2 * PAGE_SIZE)
+			sparc_ramdisk_image -= KERNBASE;
+		initrd_start = sparc_ramdisk_image + phys_base;
+		initrd_end = initrd_start + sparc_ramdisk_size;
+		if (initrd_end > end_of_phys_memory) {
+			printk(KERN_CRIT "initrd extends beyond end of memory "
+			       "(0x%016lx > 0x%016lx)\ndisabling initrd\n",
+			       initrd_end, end_of_phys_memory);
+			initrd_start = 0;
+		} else {
+			/* Reserve the initrd image area. */
+			size = initrd_end - initrd_start;
+			memblock_reserve(initrd_start, size);
+
+			initrd_start = (initrd_start - phys_base) + PAGE_OFFSET;
+			initrd_end = (initrd_end - phys_base) + PAGE_OFFSET;
+		}
+	}
+#endif
+}
+
+unsigned long __init bootmem_init(unsigned long *pages_avail)
+{
+	unsigned long start_pfn, bytes_avail, size;
+	unsigned long end_of_phys_memory = 0;
+	unsigned long high_pages = 0;
+	int i;
+
+	memblock_set_bottom_up(true);
+	memblock_allow_resize();
+
+	bytes_avail = 0UL;
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		end_of_phys_memory = sp_banks[i].base_addr +
+			sp_banks[i].num_bytes;
+		bytes_avail += sp_banks[i].num_bytes;
+		if (cmdline_memory_size) {
+			if (bytes_avail > cmdline_memory_size) {
+				unsigned long slack = bytes_avail - cmdline_memory_size;
+
+				bytes_avail -= slack;
+				end_of_phys_memory -= slack;
+
+				sp_banks[i].num_bytes -= slack;
+				if (sp_banks[i].num_bytes == 0) {
+					sp_banks[i].base_addr = 0xdeadbeef;
+				} else {
+					memblock_add(sp_banks[i].base_addr,
+						     sp_banks[i].num_bytes);
+					sp_banks[i+1].num_bytes = 0;
+					sp_banks[i+1].base_addr = 0xdeadbeef;
+				}
+				break;
+			}
+		}
+		memblock_add(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+	}
+
+	/* Start with page aligned address of last symbol in kernel
+	 * image.
+	 */
+	start_pfn  = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end));
+
+	/* Now shift down to get the real physical page frame number. */
+	start_pfn >>= PAGE_SHIFT;
+
+	max_pfn = end_of_phys_memory >> PAGE_SHIFT;
+
+	max_low_pfn = max_pfn;
+	highstart_pfn = highend_pfn = max_pfn;
+
+	if (max_low_pfn > pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT)) {
+		highstart_pfn = pfn_base + (SRMMU_MAXMEM >> PAGE_SHIFT);
+		max_low_pfn = calc_max_low_pfn();
+		high_pages = calc_highpages();
+		printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
+		    high_pages >> (20 - PAGE_SHIFT));
+	}
+
+	find_ramdisk(end_of_phys_memory);
+
+	/* Reserve the kernel text/data/bss. */
+	size = (start_pfn << PAGE_SHIFT) - phys_base;
+	memblock_reserve(phys_base, size);
+
+	size = memblock_phys_mem_size() - memblock_reserved_size();
+	*pages_avail = (size >> PAGE_SHIFT) - high_pages;
+
+	return max_pfn;
+}
+
+/*
+ * paging_init() sets up the page tables: We call the MMU specific
+ * init routine based upon the Sun model type on the Sparc.
+ *
+ */
+void __init paging_init(void)
+{
+	srmmu_paging_init();
+	prom_build_devicetree();
+	of_fill_in_cpu_data();
+	device_scan();
+}
+
+static void __init taint_real_pages(void)
+{
+	int i;
+
+	for (i = 0; sp_banks[i].num_bytes; i++) {
+		unsigned long start, end;
+
+		start = sp_banks[i].base_addr;
+		end = start + sp_banks[i].num_bytes;
+
+		while (start < end) {
+			set_bit(start >> 20, sparc_valid_addr_bitmap);
+			start += PAGE_SIZE;
+		}
+	}
+}
+
+static void map_high_region(unsigned long start_pfn, unsigned long end_pfn)
+{
+	unsigned long tmp;
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+	printk("mapping high region %08lx - %08lx\n", start_pfn, end_pfn);
+#endif
+
+	for (tmp = start_pfn; tmp < end_pfn; tmp++)
+		free_highmem_page(pfn_to_page(tmp));
+}
+
+void __init mem_init(void)
+{
+	int i;
+
+	if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
+		prom_printf("BUG: fixmap and pkmap areas overlap\n");
+		prom_printf("pkbase: 0x%lx pkend: 0x%lx fixstart 0x%lx\n",
+		       PKMAP_BASE,
+		       (unsigned long)PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+		       FIXADDR_START);
+		prom_printf("Please mail sparclinux@vger.kernel.org.\n");
+		prom_halt();
+	}
+
+
+	/* Saves us work later. */
+	memset((void *)empty_zero_page, 0, PAGE_SIZE);
+
+	i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
+	i += 1;
+	sparc_valid_addr_bitmap = (unsigned long *)
+		__alloc_bootmem(i << 2, SMP_CACHE_BYTES, 0UL);
+
+	if (sparc_valid_addr_bitmap == NULL) {
+		prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
+		prom_halt();
+	}
+	memset(sparc_valid_addr_bitmap, 0, i << 2);
+
+	taint_real_pages();
+
+	max_mapnr = last_valid_pfn - pfn_base;
+	high_memory = __va(max_low_pfn << PAGE_SHIFT);
+	free_all_bootmem();
+
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT;
+		unsigned long end_pfn = (sp_banks[i].base_addr + sp_banks[i].num_bytes) >> PAGE_SHIFT;
+
+		if (end_pfn <= highstart_pfn)
+			continue;
+
+		if (start_pfn < highstart_pfn)
+			start_pfn = highstart_pfn;
+
+		map_high_region(start_pfn, end_pfn);
+	}
+
+	mem_init_print_info(NULL);
+}
+
+void free_initmem (void)
+{
+	free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+			   "initrd");
+}
+#endif
+
+void sparc_flush_page_to_ram(struct page *page)
+{
+	unsigned long vaddr = (unsigned long)page_address(page);
+
+	if (vaddr)
+		__flush_page_to_ram(vaddr);
+}
+EXPORT_SYMBOL(sparc_flush_page_to_ram);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
new file mode 100644
index 0000000..39822f6
--- /dev/null
+++ b/arch/sparc/mm/init_64.c
@@ -0,0 +1,3232 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  arch/sparc64/mm/init.c
+ *
+ *  Copyright (C) 1996-1999 David S. Miller (davem@caip.rutgers.edu)
+ *  Copyright (C) 1997-1999 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ */
+ 
+#include <linux/extable.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/pagemap.h>
+#include <linux/poison.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <linux/kprobes.h>
+#include <linux/cache.h>
+#include <linux/sort.h>
+#include <linux/ioport.h>
+#include <linux/percpu.h>
+#include <linux/memblock.h>
+#include <linux/mmzone.h>
+#include <linux/gfp.h>
+
+#include <asm/head.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/oplib.h>
+#include <asm/iommu.h>
+#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/dma.h>
+#include <asm/starfire.h>
+#include <asm/tlb.h>
+#include <asm/spitfire.h>
+#include <asm/sections.h>
+#include <asm/tsb.h>
+#include <asm/hypervisor.h>
+#include <asm/prom.h>
+#include <asm/mdesc.h>
+#include <asm/cpudata.h>
+#include <asm/setup.h>
+#include <asm/irq.h>
+
+#include "init_64.h"
+
+unsigned long kern_linear_pte_xor[4] __read_mostly;
+static unsigned long page_cache4v_flag;
+
+/* A bitmap, two bits for every 256MB of physical memory.  These two
+ * bits determine what page size we use for kernel linear
+ * translations.  They form an index into kern_linear_pte_xor[].  The
+ * value in the indexed slot is XOR'd with the TLB miss virtual
+ * address to form the resulting TTE.  The mapping is:
+ *
+ *	0	==>	4MB
+ *	1	==>	256MB
+ *	2	==>	2GB
+ *	3	==>	16GB
+ *
+ * All sun4v chips support 256MB pages.  Only SPARC-T4 and later
+ * support 2GB pages, and hopefully future cpus will support the 16GB
+ * pages as well.  For slots 2 and 3, we encode a 256MB TTE xor there
+ * if these larger page sizes are not supported by the cpu.
+ *
+ * It would be nice to determine this from the machine description
+ * 'cpu' properties, but we need to have this table setup before the
+ * MDESC is initialized.
+ */
+
+#ifndef CONFIG_DEBUG_PAGEALLOC
+/* A special kernel TSB for 4MB, 256MB, 2GB and 16GB linear mappings.
+ * Space is allocated for this right after the trap table in
+ * arch/sparc64/kernel/head.S
+ */
+extern struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+#endif
+extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+static unsigned long cpu_pgsz_mask;
+
+#define MAX_BANKS	1024
+
+static struct linux_prom64_registers pavail[MAX_BANKS];
+static int pavail_ents;
+
+u64 numa_latency[MAX_NUMNODES][MAX_NUMNODES];
+
+static int cmp_p64(const void *a, const void *b)
+{
+	const struct linux_prom64_registers *x = a, *y = b;
+
+	if (x->phys_addr > y->phys_addr)
+		return 1;
+	if (x->phys_addr < y->phys_addr)
+		return -1;
+	return 0;
+}
+
+static void __init read_obp_memory(const char *property,
+				   struct linux_prom64_registers *regs,
+				   int *num_ents)
+{
+	phandle node = prom_finddevice("/memory");
+	int prop_size = prom_getproplen(node, property);
+	int ents, ret, i;
+
+	ents = prop_size / sizeof(struct linux_prom64_registers);
+	if (ents > MAX_BANKS) {
+		prom_printf("The machine has more %s property entries than "
+			    "this kernel can support (%d).\n",
+			    property, MAX_BANKS);
+		prom_halt();
+	}
+
+	ret = prom_getproperty(node, property, (char *) regs, prop_size);
+	if (ret == -1) {
+		prom_printf("Couldn't get %s property from /memory.\n",
+				property);
+		prom_halt();
+	}
+
+	/* Sanitize what we got from the firmware, by page aligning
+	 * everything.
+	 */
+	for (i = 0; i < ents; i++) {
+		unsigned long base, size;
+
+		base = regs[i].phys_addr;
+		size = regs[i].reg_size;
+
+		size &= PAGE_MASK;
+		if (base & ~PAGE_MASK) {
+			unsigned long new_base = PAGE_ALIGN(base);
+
+			size -= new_base - base;
+			if ((long) size < 0L)
+				size = 0UL;
+			base = new_base;
+		}
+		if (size == 0UL) {
+			/* If it is empty, simply get rid of it.
+			 * This simplifies the logic of the other
+			 * functions that process these arrays.
+			 */
+			memmove(&regs[i], &regs[i + 1],
+				(ents - i - 1) * sizeof(regs[0]));
+			i--;
+			ents--;
+			continue;
+		}
+		regs[i].phys_addr = base;
+		regs[i].reg_size = size;
+	}
+
+	*num_ents = ents;
+
+	sort(regs, ents, sizeof(struct linux_prom64_registers),
+	     cmp_p64, NULL);
+}
+
+/* Kernel physical address base and size in bytes.  */
+unsigned long kern_base __read_mostly;
+unsigned long kern_size __read_mostly;
+
+/* Initial ramdisk setup */
+extern unsigned long sparc_ramdisk_image64;
+extern unsigned int sparc_ramdisk_image;
+extern unsigned int sparc_ramdisk_size;
+
+struct page *mem_map_zero __read_mostly;
+EXPORT_SYMBOL(mem_map_zero);
+
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
+
+int num_kernel_image_mappings;
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+atomic_t dcpage_flushes = ATOMIC_INIT(0);
+#ifdef CONFIG_SMP
+atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
+#endif
+#endif
+
+inline void flush_dcache_page_impl(struct page *page)
+{
+	BUG_ON(tlb_type == hypervisor);
+#ifdef CONFIG_DEBUG_DCFLUSH
+	atomic_inc(&dcpage_flushes);
+#endif
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	__flush_dcache_page(page_address(page),
+			    ((tlb_type == spitfire) &&
+			     page_mapping_file(page) != NULL));
+#else
+	if (page_mapping_file(page) != NULL &&
+	    tlb_type == spitfire)
+		__flush_icache_page(__pa(page_address(page)));
+#endif
+}
+
+#define PG_dcache_dirty		PG_arch_1
+#define PG_dcache_cpu_shift	32UL
+#define PG_dcache_cpu_mask	\
+	((1UL<<ilog2(roundup_pow_of_two(NR_CPUS)))-1UL)
+
+#define dcache_dirty_cpu(page) \
+	(((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
+
+static inline void set_dcache_dirty(struct page *page, int this_cpu)
+{
+	unsigned long mask = this_cpu;
+	unsigned long non_cpu_bits;
+
+	non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
+	mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
+
+	__asm__ __volatile__("1:\n\t"
+			     "ldx	[%2], %%g7\n\t"
+			     "and	%%g7, %1, %%g1\n\t"
+			     "or	%%g1, %0, %%g1\n\t"
+			     "casx	[%2], %%g7, %%g1\n\t"
+			     "cmp	%%g7, %%g1\n\t"
+			     "bne,pn	%%xcc, 1b\n\t"
+			     " nop"
+			     : /* no outputs */
+			     : "r" (mask), "r" (non_cpu_bits), "r" (&page->flags)
+			     : "g1", "g7");
+}
+
+static inline void clear_dcache_dirty_cpu(struct page *page, unsigned long cpu)
+{
+	unsigned long mask = (1UL << PG_dcache_dirty);
+
+	__asm__ __volatile__("! test_and_clear_dcache_dirty\n"
+			     "1:\n\t"
+			     "ldx	[%2], %%g7\n\t"
+			     "srlx	%%g7, %4, %%g1\n\t"
+			     "and	%%g1, %3, %%g1\n\t"
+			     "cmp	%%g1, %0\n\t"
+			     "bne,pn	%%icc, 2f\n\t"
+			     " andn	%%g7, %1, %%g1\n\t"
+			     "casx	[%2], %%g7, %%g1\n\t"
+			     "cmp	%%g7, %%g1\n\t"
+			     "bne,pn	%%xcc, 1b\n\t"
+			     " nop\n"
+			     "2:"
+			     : /* no outputs */
+			     : "r" (cpu), "r" (mask), "r" (&page->flags),
+			       "i" (PG_dcache_cpu_mask),
+			       "i" (PG_dcache_cpu_shift)
+			     : "g1", "g7");
+}
+
+static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long pte)
+{
+	unsigned long tsb_addr = (unsigned long) ent;
+
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+		tsb_addr = __pa(tsb_addr);
+
+	__tsb_insert(tsb_addr, tag, pte);
+}
+
+unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
+
+static void flush_dcache(unsigned long pfn)
+{
+	struct page *page;
+
+	page = pfn_to_page(pfn);
+	if (page) {
+		unsigned long pg_flags;
+
+		pg_flags = page->flags;
+		if (pg_flags & (1UL << PG_dcache_dirty)) {
+			int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
+				   PG_dcache_cpu_mask);
+			int this_cpu = get_cpu();
+
+			/* This is just to optimize away some function calls
+			 * in the SMP case.
+			 */
+			if (cpu == this_cpu)
+				flush_dcache_page_impl(page);
+			else
+				smp_flush_dcache_page_impl(page, cpu);
+
+			clear_dcache_dirty_cpu(page, cpu);
+
+			put_cpu();
+		}
+	}
+}
+
+/* mm->context.lock must be held */
+static void __update_mmu_tsb_insert(struct mm_struct *mm, unsigned long tsb_index,
+				    unsigned long tsb_hash_shift, unsigned long address,
+				    unsigned long tte)
+{
+	struct tsb *tsb = mm->context.tsb_block[tsb_index].tsb;
+	unsigned long tag;
+
+	if (unlikely(!tsb))
+		return;
+
+	tsb += ((address >> tsb_hash_shift) &
+		(mm->context.tsb_block[tsb_index].tsb_nentries - 1UL));
+	tag = (address >> 22UL);
+	tsb_insert(tsb, tag, tte);
+}
+
+#ifdef CONFIG_HUGETLB_PAGE
+static void __init add_huge_page_size(unsigned long size)
+{
+	unsigned int order;
+
+	if (size_to_hstate(size))
+		return;
+
+	order = ilog2(size) - PAGE_SHIFT;
+	hugetlb_add_hstate(order);
+}
+
+static int __init hugetlbpage_init(void)
+{
+	add_huge_page_size(1UL << HPAGE_64K_SHIFT);
+	add_huge_page_size(1UL << HPAGE_SHIFT);
+	add_huge_page_size(1UL << HPAGE_256MB_SHIFT);
+	add_huge_page_size(1UL << HPAGE_2GB_SHIFT);
+
+	return 0;
+}
+
+arch_initcall(hugetlbpage_init);
+
+static void __init pud_huge_patch(void)
+{
+	struct pud_huge_patch_entry *p;
+	unsigned long addr;
+
+	p = &__pud_huge_patch;
+	addr = p->addr;
+	*(unsigned int *)addr = p->insn;
+
+	__asm__ __volatile__("flush %0" : : "r" (addr));
+}
+
+static int __init setup_hugepagesz(char *string)
+{
+	unsigned long long hugepage_size;
+	unsigned int hugepage_shift;
+	unsigned short hv_pgsz_idx;
+	unsigned int hv_pgsz_mask;
+	int rc = 0;
+
+	hugepage_size = memparse(string, &string);
+	hugepage_shift = ilog2(hugepage_size);
+
+	switch (hugepage_shift) {
+	case HPAGE_16GB_SHIFT:
+		hv_pgsz_mask = HV_PGSZ_MASK_16GB;
+		hv_pgsz_idx = HV_PGSZ_IDX_16GB;
+		pud_huge_patch();
+		break;
+	case HPAGE_2GB_SHIFT:
+		hv_pgsz_mask = HV_PGSZ_MASK_2GB;
+		hv_pgsz_idx = HV_PGSZ_IDX_2GB;
+		break;
+	case HPAGE_256MB_SHIFT:
+		hv_pgsz_mask = HV_PGSZ_MASK_256MB;
+		hv_pgsz_idx = HV_PGSZ_IDX_256MB;
+		break;
+	case HPAGE_SHIFT:
+		hv_pgsz_mask = HV_PGSZ_MASK_4MB;
+		hv_pgsz_idx = HV_PGSZ_IDX_4MB;
+		break;
+	case HPAGE_64K_SHIFT:
+		hv_pgsz_mask = HV_PGSZ_MASK_64K;
+		hv_pgsz_idx = HV_PGSZ_IDX_64K;
+		break;
+	default:
+		hv_pgsz_mask = 0;
+	}
+
+	if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) {
+		hugetlb_bad_size();
+		pr_err("hugepagesz=%llu not supported by MMU.\n",
+			hugepage_size);
+		goto out;
+	}
+
+	add_huge_page_size(hugepage_size);
+	rc = 1;
+
+out:
+	return rc;
+}
+__setup("hugepagesz=", setup_hugepagesz);
+#endif	/* CONFIG_HUGETLB_PAGE */
+
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+	struct mm_struct *mm;
+	unsigned long flags;
+	bool is_huge_tsb;
+	pte_t pte = *ptep;
+
+	if (tlb_type != hypervisor) {
+		unsigned long pfn = pte_pfn(pte);
+
+		if (pfn_valid(pfn))
+			flush_dcache(pfn);
+	}
+
+	mm = vma->vm_mm;
+
+	/* Don't insert a non-valid PTE into the TSB, we'll deadlock.  */
+	if (!pte_accessible(mm, pte))
+		return;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	is_huge_tsb = false;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	if (mm->context.hugetlb_pte_count || mm->context.thp_pte_count) {
+		unsigned long hugepage_size = PAGE_SIZE;
+
+		if (is_vm_hugetlb_page(vma))
+			hugepage_size = huge_page_size(hstate_vma(vma));
+
+		if (hugepage_size >= PUD_SIZE) {
+			unsigned long mask = 0x1ffc00000UL;
+
+			/* Transfer bits [32:22] from address to resolve
+			 * at 4M granularity.
+			 */
+			pte_val(pte) &= ~mask;
+			pte_val(pte) |= (address & mask);
+		} else if (hugepage_size >= PMD_SIZE) {
+			/* We are fabricating 8MB pages using 4MB
+			 * real hw pages.
+			 */
+			pte_val(pte) |= (address & (1UL << REAL_HPAGE_SHIFT));
+		}
+
+		if (hugepage_size >= PMD_SIZE) {
+			__update_mmu_tsb_insert(mm, MM_TSB_HUGE,
+				REAL_HPAGE_SHIFT, address, pte_val(pte));
+			is_huge_tsb = true;
+		}
+	}
+#endif
+	if (!is_huge_tsb)
+		__update_mmu_tsb_insert(mm, MM_TSB_BASE, PAGE_SHIFT,
+					address, pte_val(pte));
+
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
+void flush_dcache_page(struct page *page)
+{
+	struct address_space *mapping;
+	int this_cpu;
+
+	if (tlb_type == hypervisor)
+		return;
+
+	/* Do not bother with the expensive D-cache flush if it
+	 * is merely the zero page.  The 'bigcore' testcase in GDB
+	 * causes this case to run millions of times.
+	 */
+	if (page == ZERO_PAGE(0))
+		return;
+
+	this_cpu = get_cpu();
+
+	mapping = page_mapping_file(page);
+	if (mapping && !mapping_mapped(mapping)) {
+		int dirty = test_bit(PG_dcache_dirty, &page->flags);
+		if (dirty) {
+			int dirty_cpu = dcache_dirty_cpu(page);
+
+			if (dirty_cpu == this_cpu)
+				goto out;
+			smp_flush_dcache_page_impl(page, dirty_cpu);
+		}
+		set_dcache_dirty(page, this_cpu);
+	} else {
+		/* We could delay the flush for the !page_mapping
+		 * case too.  But that case is for exec env/arg
+		 * pages and those are %99 certainly going to get
+		 * faulted into the tlb (and thus flushed) anyways.
+		 */
+		flush_dcache_page_impl(page);
+	}
+
+out:
+	put_cpu();
+}
+EXPORT_SYMBOL(flush_dcache_page);
+
+void __kprobes flush_icache_range(unsigned long start, unsigned long end)
+{
+	/* Cheetah and Hypervisor platform cpus have coherent I-cache. */
+	if (tlb_type == spitfire) {
+		unsigned long kaddr;
+
+		/* This code only runs on Spitfire cpus so this is
+		 * why we can assume _PAGE_PADDR_4U.
+		 */
+		for (kaddr = start; kaddr < end; kaddr += PAGE_SIZE) {
+			unsigned long paddr, mask = _PAGE_PADDR_4U;
+
+			if (kaddr >= PAGE_OFFSET)
+				paddr = kaddr & mask;
+			else {
+				pgd_t *pgdp = pgd_offset_k(kaddr);
+				pud_t *pudp = pud_offset(pgdp, kaddr);
+				pmd_t *pmdp = pmd_offset(pudp, kaddr);
+				pte_t *ptep = pte_offset_kernel(pmdp, kaddr);
+
+				paddr = pte_val(*ptep) & mask;
+			}
+			__flush_icache_page(paddr);
+		}
+	}
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void mmu_info(struct seq_file *m)
+{
+	static const char *pgsz_strings[] = {
+		"8K", "64K", "512K", "4MB", "32MB",
+		"256MB", "2GB", "16GB",
+	};
+	int i, printed;
+
+	if (tlb_type == cheetah)
+		seq_printf(m, "MMU Type\t: Cheetah\n");
+	else if (tlb_type == cheetah_plus)
+		seq_printf(m, "MMU Type\t: Cheetah+\n");
+	else if (tlb_type == spitfire)
+		seq_printf(m, "MMU Type\t: Spitfire\n");
+	else if (tlb_type == hypervisor)
+		seq_printf(m, "MMU Type\t: Hypervisor (sun4v)\n");
+	else
+		seq_printf(m, "MMU Type\t: ???\n");
+
+	seq_printf(m, "MMU PGSZs\t: ");
+	printed = 0;
+	for (i = 0; i < ARRAY_SIZE(pgsz_strings); i++) {
+		if (cpu_pgsz_mask & (1UL << i)) {
+			seq_printf(m, "%s%s",
+				   printed ? "," : "", pgsz_strings[i]);
+			printed++;
+		}
+	}
+	seq_putc(m, '\n');
+
+#ifdef CONFIG_DEBUG_DCFLUSH
+	seq_printf(m, "DCPageFlushes\t: %d\n",
+		   atomic_read(&dcpage_flushes));
+#ifdef CONFIG_SMP
+	seq_printf(m, "DCPageFlushesXC\t: %d\n",
+		   atomic_read(&dcpage_flushes_xcall));
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_DEBUG_DCFLUSH */
+}
+
+struct linux_prom_translation prom_trans[512] __read_mostly;
+unsigned int prom_trans_ents __read_mostly;
+
+unsigned long kern_locked_tte_data;
+
+/* The obp translations are saved based on 8k pagesize, since obp can
+ * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
+ * HI_OBP_ADDRESS range are handled in ktlb.S.
+ */
+static inline int in_obp_range(unsigned long vaddr)
+{
+	return (vaddr >= LOW_OBP_ADDRESS &&
+		vaddr < HI_OBP_ADDRESS);
+}
+
+static int cmp_ptrans(const void *a, const void *b)
+{
+	const struct linux_prom_translation *x = a, *y = b;
+
+	if (x->virt > y->virt)
+		return 1;
+	if (x->virt < y->virt)
+		return -1;
+	return 0;
+}
+
+/* Read OBP translations property into 'prom_trans[]'.  */
+static void __init read_obp_translations(void)
+{
+	int n, node, ents, first, last, i;
+
+	node = prom_finddevice("/virtual-memory");
+	n = prom_getproplen(node, "translations");
+	if (unlikely(n == 0 || n == -1)) {
+		prom_printf("prom_mappings: Couldn't get size.\n");
+		prom_halt();
+	}
+	if (unlikely(n > sizeof(prom_trans))) {
+		prom_printf("prom_mappings: Size %d is too big.\n", n);
+		prom_halt();
+	}
+
+	if ((n = prom_getproperty(node, "translations",
+				  (char *)&prom_trans[0],
+				  sizeof(prom_trans))) == -1) {
+		prom_printf("prom_mappings: Couldn't get property.\n");
+		prom_halt();
+	}
+
+	n = n / sizeof(struct linux_prom_translation);
+
+	ents = n;
+
+	sort(prom_trans, ents, sizeof(struct linux_prom_translation),
+	     cmp_ptrans, NULL);
+
+	/* Now kick out all the non-OBP entries.  */
+	for (i = 0; i < ents; i++) {
+		if (in_obp_range(prom_trans[i].virt))
+			break;
+	}
+	first = i;
+	for (; i < ents; i++) {
+		if (!in_obp_range(prom_trans[i].virt))
+			break;
+	}
+	last = i;
+
+	for (i = 0; i < (last - first); i++) {
+		struct linux_prom_translation *src = &prom_trans[i + first];
+		struct linux_prom_translation *dest = &prom_trans[i];
+
+		*dest = *src;
+	}
+	for (; i < ents; i++) {
+		struct linux_prom_translation *dest = &prom_trans[i];
+		dest->virt = dest->size = dest->data = 0x0UL;
+	}
+
+	prom_trans_ents = last - first;
+
+	if (tlb_type == spitfire) {
+		/* Clear diag TTE bits. */
+		for (i = 0; i < prom_trans_ents; i++)
+			prom_trans[i].data &= ~0x0003fe0000000000UL;
+	}
+
+	/* Force execute bit on.  */
+	for (i = 0; i < prom_trans_ents; i++)
+		prom_trans[i].data |= (tlb_type == hypervisor ?
+				       _PAGE_EXEC_4V : _PAGE_EXEC_4U);
+}
+
+static void __init hypervisor_tlb_lock(unsigned long vaddr,
+				       unsigned long pte,
+				       unsigned long mmu)
+{
+	unsigned long ret = sun4v_mmu_map_perm_addr(vaddr, 0, pte, mmu);
+
+	if (ret != 0) {
+		prom_printf("hypervisor_tlb_lock[%lx:%x:%lx:%lx]: "
+			    "errors with %lx\n", vaddr, 0, pte, mmu, ret);
+		prom_halt();
+	}
+}
+
+static unsigned long kern_large_tte(unsigned long paddr);
+
+static void __init remap_kernel(void)
+{
+	unsigned long phys_page, tte_vaddr, tte_data;
+	int i, tlb_ent = sparc64_highest_locked_tlbent();
+
+	tte_vaddr = (unsigned long) KERNBASE;
+	phys_page = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+	tte_data = kern_large_tte(phys_page);
+
+	kern_locked_tte_data = tte_data;
+
+	/* Now lock us into the TLBs via Hypervisor or OBP. */
+	if (tlb_type == hypervisor) {
+		for (i = 0; i < num_kernel_image_mappings; i++) {
+			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_DMMU);
+			hypervisor_tlb_lock(tte_vaddr, tte_data, HV_MMU_IMMU);
+			tte_vaddr += 0x400000;
+			tte_data += 0x400000;
+		}
+	} else {
+		for (i = 0; i < num_kernel_image_mappings; i++) {
+			prom_dtlb_load(tlb_ent - i, tte_data, tte_vaddr);
+			prom_itlb_load(tlb_ent - i, tte_data, tte_vaddr);
+			tte_vaddr += 0x400000;
+			tte_data += 0x400000;
+		}
+		sparc64_highest_unlocked_tlb_ent = tlb_ent - i;
+	}
+	if (tlb_type == cheetah_plus) {
+		sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+					    CTX_CHEETAH_PLUS_NUC);
+		sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+		sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
+	}
+}
+
+
+static void __init inherit_prom_mappings(void)
+{
+	/* Now fixup OBP's idea about where we really are mapped. */
+	printk("Remapping the kernel... ");
+	remap_kernel();
+	printk("done.\n");
+}
+
+void prom_world(int enter)
+{
+	if (!enter)
+		set_fs(get_fs());
+
+	__asm__ __volatile__("flushw");
+}
+
+void __flush_dcache_range(unsigned long start, unsigned long end)
+{
+	unsigned long va;
+
+	if (tlb_type == spitfire) {
+		int n = 0;
+
+		for (va = start; va < end; va += 32) {
+			spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+			if (++n >= 512)
+				break;
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		start = __pa(start);
+		end = __pa(end);
+		for (va = start; va < end; va += 32)
+			__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+					     "membar #Sync"
+					     : /* no outputs */
+					     : "r" (va),
+					       "i" (ASI_DCACHE_INVALIDATE));
+	}
+}
+EXPORT_SYMBOL(__flush_dcache_range);
+
+/* get_new_mmu_context() uses "cache + 1".  */
+DEFINE_SPINLOCK(ctx_alloc_lock);
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
+#define MAX_CTX_NR	(1UL << CTX_NR_BITS)
+#define CTX_BMAP_SLOTS	BITS_TO_LONGS(MAX_CTX_NR)
+DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+	unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+	unsigned long new_ver, new_ctx, old_ctx;
+	struct mm_struct *mm;
+	int cpu;
+
+	bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+	/* Reserve kernel context */
+	set_bit(0, mmu_context_bmap);
+
+	new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+	if (unlikely(new_ver == 0))
+		new_ver = CTX_FIRST_VERSION;
+	tlb_context_cache = new_ver;
+
+	/*
+	 * Make sure that any new mm that are added into per_cpu_secondary_mm,
+	 * are going to go through get_new_mmu_context() path.
+	 */
+	mb();
+
+	/*
+	 * Updated versions to current on those CPUs that had valid secondary
+	 * contexts
+	 */
+	for_each_online_cpu(cpu) {
+		/*
+		 * If a new mm is stored after we took this mm from the array,
+		 * it will go into get_new_mmu_context() path, because we
+		 * already bumped the version in tlb_context_cache.
+		 */
+		mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+		if (unlikely(!mm || mm == &init_mm))
+			continue;
+
+		old_ctx = mm->context.sparc64_ctx_val;
+		if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+			new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+			set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+			mm->context.sparc64_ctx_val = new_ctx;
+		}
+	}
+}
+
+/* Caller does TLB context flushing on local CPU if necessary.
+ * The caller also ensures that CTX_VALID(mm->context) is false.
+ *
+ * We must be careful about boundary cases so that we never
+ * let the user have CTX 0 (nucleus) or we ever use a CTX
+ * version of zero (and thus NO_CONTEXT would not be caught
+ * by version mis-match tests in mmu_context.h).
+ *
+ * Always invoked with interrupts disabled.
+ */
+void get_new_mmu_context(struct mm_struct *mm)
+{
+	unsigned long ctx, new_ctx;
+	unsigned long orig_pgsz_bits;
+
+	spin_lock(&ctx_alloc_lock);
+retry:
+	/* wrap might have happened, test again if our context became valid */
+	if (unlikely(CTX_VALID(mm->context)))
+		goto out;
+	orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
+	ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
+	new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
+	if (new_ctx >= (1 << CTX_NR_BITS)) {
+		new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
+		if (new_ctx >= ctx) {
+			mmu_context_wrap();
+			goto retry;
+		}
+	}
+	if (mm->context.sparc64_ctx_val)
+		cpumask_clear(mm_cpumask(mm));
+	mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
+	new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
+	tlb_context_cache = new_ctx;
+	mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
+	spin_unlock(&ctx_alloc_lock);
+}
+
+static int numa_enabled = 1;
+static int numa_debug;
+
+static int __init early_numa(char *p)
+{
+	if (!p)
+		return 0;
+
+	if (strstr(p, "off"))
+		numa_enabled = 0;
+
+	if (strstr(p, "debug"))
+		numa_debug = 1;
+
+	return 0;
+}
+early_param("numa", early_numa);
+
+#define numadbg(f, a...) \
+do {	if (numa_debug) \
+		printk(KERN_INFO f, ## a); \
+} while (0)
+
+static void __init find_ramdisk(unsigned long phys_base)
+{
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (sparc_ramdisk_image || sparc_ramdisk_image64) {
+		unsigned long ramdisk_image;
+
+		/* Older versions of the bootloader only supported a
+		 * 32-bit physical address for the ramdisk image
+		 * location, stored at sparc_ramdisk_image.  Newer
+		 * SILO versions set sparc_ramdisk_image to zero and
+		 * provide a full 64-bit physical address at
+		 * sparc_ramdisk_image64.
+		 */
+		ramdisk_image = sparc_ramdisk_image;
+		if (!ramdisk_image)
+			ramdisk_image = sparc_ramdisk_image64;
+
+		/* Another bootloader quirk.  The bootloader normalizes
+		 * the physical address to KERNBASE, so we have to
+		 * factor that back out and add in the lowest valid
+		 * physical page address to get the true physical address.
+		 */
+		ramdisk_image -= KERNBASE;
+		ramdisk_image += phys_base;
+
+		numadbg("Found ramdisk at physical address 0x%lx, size %u\n",
+			ramdisk_image, sparc_ramdisk_size);
+
+		initrd_start = ramdisk_image;
+		initrd_end = ramdisk_image + sparc_ramdisk_size;
+
+		memblock_reserve(initrd_start, sparc_ramdisk_size);
+
+		initrd_start += PAGE_OFFSET;
+		initrd_end += PAGE_OFFSET;
+	}
+#endif
+}
+
+struct node_mem_mask {
+	unsigned long mask;
+	unsigned long match;
+};
+static struct node_mem_mask node_masks[MAX_NUMNODES];
+static int num_node_masks;
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
+struct mdesc_mlgroup {
+	u64	node;
+	u64	latency;
+	u64	match;
+	u64	mask;
+};
+
+static struct mdesc_mlgroup *mlgroups;
+static int num_mlgroups;
+
+int numa_cpu_lookup_table[NR_CPUS];
+cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
+
+struct mdesc_mblock {
+	u64	base;
+	u64	size;
+	u64	offset; /* RA-to-PA */
+};
+static struct mdesc_mblock *mblocks;
+static int num_mblocks;
+
+static struct mdesc_mblock * __init addr_to_mblock(unsigned long addr)
+{
+	struct mdesc_mblock *m = NULL;
+	int i;
+
+	for (i = 0; i < num_mblocks; i++) {
+		m = &mblocks[i];
+
+		if (addr >= m->base &&
+		    addr < (m->base + m->size)) {
+			break;
+		}
+	}
+
+	return m;
+}
+
+static u64 __init memblock_nid_range_sun4u(u64 start, u64 end, int *nid)
+{
+	int prev_nid, new_nid;
+
+	prev_nid = -1;
+	for ( ; start < end; start += PAGE_SIZE) {
+		for (new_nid = 0; new_nid < num_node_masks; new_nid++) {
+			struct node_mem_mask *p = &node_masks[new_nid];
+
+			if ((start & p->mask) == p->match) {
+				if (prev_nid == -1)
+					prev_nid = new_nid;
+				break;
+			}
+		}
+
+		if (new_nid == num_node_masks) {
+			prev_nid = 0;
+			WARN_ONCE(1, "addr[%Lx] doesn't match a NUMA node rule. Some memory will be owned by node 0.",
+				  start);
+			break;
+		}
+
+		if (prev_nid != new_nid)
+			break;
+	}
+	*nid = prev_nid;
+
+	return start > end ? end : start;
+}
+
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
+{
+	u64 ret_end, pa_start, m_mask, m_match, m_end;
+	struct mdesc_mblock *mblock;
+	int _nid, i;
+
+	if (tlb_type != hypervisor)
+		return memblock_nid_range_sun4u(start, end, nid);
+
+	mblock = addr_to_mblock(start);
+	if (!mblock) {
+		WARN_ONCE(1, "memblock_nid_range: Can't find mblock addr[%Lx]",
+			  start);
+
+		_nid = 0;
+		ret_end = end;
+		goto done;
+	}
+
+	pa_start = start + mblock->offset;
+	m_match = 0;
+	m_mask = 0;
+
+	for (_nid = 0; _nid < num_node_masks; _nid++) {
+		struct node_mem_mask *const m = &node_masks[_nid];
+
+		if ((pa_start & m->mask) == m->match) {
+			m_match = m->match;
+			m_mask = m->mask;
+			break;
+		}
+	}
+
+	if (num_node_masks == _nid) {
+		/* We could not find NUMA group, so default to 0, but lets
+		 * search for latency group, so we could calculate the correct
+		 * end address that we return
+		 */
+		_nid = 0;
+
+		for (i = 0; i < num_mlgroups; i++) {
+			struct mdesc_mlgroup *const m = &mlgroups[i];
+
+			if ((pa_start & m->mask) == m->match) {
+				m_match = m->match;
+				m_mask = m->mask;
+				break;
+			}
+		}
+
+		if (i == num_mlgroups) {
+			WARN_ONCE(1, "memblock_nid_range: Can't find latency group addr[%Lx]",
+				  start);
+
+			ret_end = end;
+			goto done;
+		}
+	}
+
+	/*
+	 * Each latency group has match and mask, and each memory block has an
+	 * offset.  An address belongs to a latency group if its address matches
+	 * the following formula: ((addr + offset) & mask) == match
+	 * It is, however, slow to check every single page if it matches a
+	 * particular latency group. As optimization we calculate end value by
+	 * using bit arithmetics.
+	 */
+	m_end = m_match + (1ul << __ffs(m_mask)) - mblock->offset;
+	m_end += pa_start & ~((1ul << fls64(m_mask)) - 1);
+	ret_end = m_end > end ? end : m_end;
+
+done:
+	*nid = _nid;
+	return ret_end;
+}
+#endif
+
+/* This must be invoked after performing all of the necessary
+ * memblock_set_node() calls for 'nid'.  We need to be able to get
+ * correct data from get_pfn_range_for_nid().
+ */
+static void __init allocate_node_data(int nid)
+{
+	struct pglist_data *p;
+	unsigned long start_pfn, end_pfn;
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	unsigned long paddr;
+
+	paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
+	if (!paddr) {
+		prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
+		prom_halt();
+	}
+	NODE_DATA(nid) = __va(paddr);
+	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
+
+	NODE_DATA(nid)->node_id = nid;
+#endif
+
+	p = NODE_DATA(nid);
+
+	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
+	p->node_start_pfn = start_pfn;
+	p->node_spanned_pages = end_pfn - start_pfn;
+}
+
+static void init_node_masks_nonnuma(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	int i;
+#endif
+
+	numadbg("Initializing tables for non-numa.\n");
+
+	node_masks[0].mask = 0;
+	node_masks[0].match = 0;
+	num_node_masks = 1;
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	for (i = 0; i < NR_CPUS; i++)
+		numa_cpu_lookup_table[i] = 0;
+
+	cpumask_setall(&numa_cpumask_lookup_table[0]);
+#endif
+}
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+struct pglist_data *node_data[MAX_NUMNODES];
+
+EXPORT_SYMBOL(numa_cpu_lookup_table);
+EXPORT_SYMBOL(numa_cpumask_lookup_table);
+EXPORT_SYMBOL(node_data);
+
+static int scan_pio_for_cfg_handle(struct mdesc_handle *md, u64 pio,
+				   u32 cfg_handle)
+{
+	u64 arc;
+
+	mdesc_for_each_arc(arc, md, pio, MDESC_ARC_TYPE_FWD) {
+		u64 target = mdesc_arc_target(md, arc);
+		const u64 *val;
+
+		val = mdesc_get_property(md, target,
+					 "cfg-handle", NULL);
+		if (val && *val == cfg_handle)
+			return 0;
+	}
+	return -ENODEV;
+}
+
+static int scan_arcs_for_cfg_handle(struct mdesc_handle *md, u64 grp,
+				    u32 cfg_handle)
+{
+	u64 arc, candidate, best_latency = ~(u64)0;
+
+	candidate = MDESC_NODE_NULL;
+	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
+		u64 target = mdesc_arc_target(md, arc);
+		const char *name = mdesc_node_name(md, target);
+		const u64 *val;
+
+		if (strcmp(name, "pio-latency-group"))
+			continue;
+
+		val = mdesc_get_property(md, target, "latency", NULL);
+		if (!val)
+			continue;
+
+		if (*val < best_latency) {
+			candidate = target;
+			best_latency = *val;
+		}
+	}
+
+	if (candidate == MDESC_NODE_NULL)
+		return -ENODEV;
+
+	return scan_pio_for_cfg_handle(md, candidate, cfg_handle);
+}
+
+int of_node_to_nid(struct device_node *dp)
+{
+	const struct linux_prom64_registers *regs;
+	struct mdesc_handle *md;
+	u32 cfg_handle;
+	int count, nid;
+	u64 grp;
+
+	/* This is the right thing to do on currently supported
+	 * SUN4U NUMA platforms as well, as the PCI controller does
+	 * not sit behind any particular memory controller.
+	 */
+	if (!mlgroups)
+		return -1;
+
+	regs = of_get_property(dp, "reg", NULL);
+	if (!regs)
+		return -1;
+
+	cfg_handle = (regs->phys_addr >> 32UL) & 0x0fffffff;
+
+	md = mdesc_grab();
+
+	count = 0;
+	nid = -1;
+	mdesc_for_each_node_by_name(md, grp, "group") {
+		if (!scan_arcs_for_cfg_handle(md, grp, cfg_handle)) {
+			nid = count;
+			break;
+		}
+		count++;
+	}
+
+	mdesc_release(md);
+
+	return nid;
+}
+
+static void __init add_node_ranges(void)
+{
+	struct memblock_region *reg;
+	unsigned long prev_max;
+
+memblock_resized:
+	prev_max = memblock.memory.max;
+
+	for_each_memblock(memory, reg) {
+		unsigned long size = reg->size;
+		unsigned long start, end;
+
+		start = reg->base;
+		end = start + size;
+		while (start < end) {
+			unsigned long this_end;
+			int nid;
+
+			this_end = memblock_nid_range(start, end, &nid);
+
+			numadbg("Setting memblock NUMA node nid[%d] "
+				"start[%lx] end[%lx]\n",
+				nid, start, this_end);
+
+			memblock_set_node(start, this_end - start,
+					  &memblock.memory, nid);
+			if (memblock.memory.max != prev_max)
+				goto memblock_resized;
+			start = this_end;
+		}
+	}
+}
+
+static int __init grab_mlgroups(struct mdesc_handle *md)
+{
+	unsigned long paddr;
+	int count = 0;
+	u64 node;
+
+	mdesc_for_each_node_by_name(md, node, "memory-latency-group")
+		count++;
+	if (!count)
+		return -ENOENT;
+
+	paddr = memblock_alloc(count * sizeof(struct mdesc_mlgroup),
+			  SMP_CACHE_BYTES);
+	if (!paddr)
+		return -ENOMEM;
+
+	mlgroups = __va(paddr);
+	num_mlgroups = count;
+
+	count = 0;
+	mdesc_for_each_node_by_name(md, node, "memory-latency-group") {
+		struct mdesc_mlgroup *m = &mlgroups[count++];
+		const u64 *val;
+
+		m->node = node;
+
+		val = mdesc_get_property(md, node, "latency", NULL);
+		m->latency = *val;
+		val = mdesc_get_property(md, node, "address-match", NULL);
+		m->match = *val;
+		val = mdesc_get_property(md, node, "address-mask", NULL);
+		m->mask = *val;
+
+		numadbg("MLGROUP[%d]: node[%llx] latency[%llx] "
+			"match[%llx] mask[%llx]\n",
+			count - 1, m->node, m->latency, m->match, m->mask);
+	}
+
+	return 0;
+}
+
+static int __init grab_mblocks(struct mdesc_handle *md)
+{
+	unsigned long paddr;
+	int count = 0;
+	u64 node;
+
+	mdesc_for_each_node_by_name(md, node, "mblock")
+		count++;
+	if (!count)
+		return -ENOENT;
+
+	paddr = memblock_alloc(count * sizeof(struct mdesc_mblock),
+			  SMP_CACHE_BYTES);
+	if (!paddr)
+		return -ENOMEM;
+
+	mblocks = __va(paddr);
+	num_mblocks = count;
+
+	count = 0;
+	mdesc_for_each_node_by_name(md, node, "mblock") {
+		struct mdesc_mblock *m = &mblocks[count++];
+		const u64 *val;
+
+		val = mdesc_get_property(md, node, "base", NULL);
+		m->base = *val;
+		val = mdesc_get_property(md, node, "size", NULL);
+		m->size = *val;
+		val = mdesc_get_property(md, node,
+					 "address-congruence-offset", NULL);
+
+		/* The address-congruence-offset property is optional.
+		 * Explicity zero it be identifty this.
+		 */
+		if (val)
+			m->offset = *val;
+		else
+			m->offset = 0UL;
+
+		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
+			count - 1, m->base, m->size, m->offset);
+	}
+
+	return 0;
+}
+
+static void __init numa_parse_mdesc_group_cpus(struct mdesc_handle *md,
+					       u64 grp, cpumask_t *mask)
+{
+	u64 arc;
+
+	cpumask_clear(mask);
+
+	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_BACK) {
+		u64 target = mdesc_arc_target(md, arc);
+		const char *name = mdesc_node_name(md, target);
+		const u64 *id;
+
+		if (strcmp(name, "cpu"))
+			continue;
+		id = mdesc_get_property(md, target, "id", NULL);
+		if (*id < nr_cpu_ids)
+			cpumask_set_cpu(*id, mask);
+	}
+}
+
+static struct mdesc_mlgroup * __init find_mlgroup(u64 node)
+{
+	int i;
+
+	for (i = 0; i < num_mlgroups; i++) {
+		struct mdesc_mlgroup *m = &mlgroups[i];
+		if (m->node == node)
+			return m;
+	}
+	return NULL;
+}
+
+int __node_distance(int from, int to)
+{
+	if ((from >= MAX_NUMNODES) || (to >= MAX_NUMNODES)) {
+		pr_warn("Returning default NUMA distance value for %d->%d\n",
+			from, to);
+		return (from == to) ? LOCAL_DISTANCE : REMOTE_DISTANCE;
+	}
+	return numa_latency[from][to];
+}
+EXPORT_SYMBOL(__node_distance);
+
+static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		struct node_mem_mask *n = &node_masks[i];
+
+		if ((grp->mask == n->mask) && (grp->match == n->match))
+			break;
+	}
+	return i;
+}
+
+static void __init find_numa_latencies_for_group(struct mdesc_handle *md,
+						 u64 grp, int index)
+{
+	u64 arc;
+
+	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
+		int tnode;
+		u64 target = mdesc_arc_target(md, arc);
+		struct mdesc_mlgroup *m = find_mlgroup(target);
+
+		if (!m)
+			continue;
+		tnode = find_best_numa_node_for_mlgroup(m);
+		if (tnode == MAX_NUMNODES)
+			continue;
+		numa_latency[index][tnode] = m->latency;
+	}
+}
+
+static int __init numa_attach_mlgroup(struct mdesc_handle *md, u64 grp,
+				      int index)
+{
+	struct mdesc_mlgroup *candidate = NULL;
+	u64 arc, best_latency = ~(u64)0;
+	struct node_mem_mask *n;
+
+	mdesc_for_each_arc(arc, md, grp, MDESC_ARC_TYPE_FWD) {
+		u64 target = mdesc_arc_target(md, arc);
+		struct mdesc_mlgroup *m = find_mlgroup(target);
+		if (!m)
+			continue;
+		if (m->latency < best_latency) {
+			candidate = m;
+			best_latency = m->latency;
+		}
+	}
+	if (!candidate)
+		return -ENOENT;
+
+	if (num_node_masks != index) {
+		printk(KERN_ERR "Inconsistent NUMA state, "
+		       "index[%d] != num_node_masks[%d]\n",
+		       index, num_node_masks);
+		return -EINVAL;
+	}
+
+	n = &node_masks[num_node_masks++];
+
+	n->mask = candidate->mask;
+	n->match = candidate->match;
+
+	numadbg("NUMA NODE[%d]: mask[%lx] match[%lx] (latency[%llx])\n",
+		index, n->mask, n->match, candidate->latency);
+
+	return 0;
+}
+
+static int __init numa_parse_mdesc_group(struct mdesc_handle *md, u64 grp,
+					 int index)
+{
+	cpumask_t mask;
+	int cpu;
+
+	numa_parse_mdesc_group_cpus(md, grp, &mask);
+
+	for_each_cpu(cpu, &mask)
+		numa_cpu_lookup_table[cpu] = index;
+	cpumask_copy(&numa_cpumask_lookup_table[index], &mask);
+
+	if (numa_debug) {
+		printk(KERN_INFO "NUMA GROUP[%d]: cpus [ ", index);
+		for_each_cpu(cpu, &mask)
+			printk("%d ", cpu);
+		printk("]\n");
+	}
+
+	return numa_attach_mlgroup(md, grp, index);
+}
+
+static int __init numa_parse_mdesc(void)
+{
+	struct mdesc_handle *md = mdesc_grab();
+	int i, j, err, count;
+	u64 node;
+
+	node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+	if (node == MDESC_NODE_NULL) {
+		mdesc_release(md);
+		return -ENOENT;
+	}
+
+	err = grab_mblocks(md);
+	if (err < 0)
+		goto out;
+
+	err = grab_mlgroups(md);
+	if (err < 0)
+		goto out;
+
+	count = 0;
+	mdesc_for_each_node_by_name(md, node, "group") {
+		err = numa_parse_mdesc_group(md, node, count);
+		if (err < 0)
+			break;
+		count++;
+	}
+
+	count = 0;
+	mdesc_for_each_node_by_name(md, node, "group") {
+		find_numa_latencies_for_group(md, node, count);
+		count++;
+	}
+
+	/* Normalize numa latency matrix according to ACPI SLIT spec. */
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		u64 self_latency = numa_latency[i][i];
+
+		for (j = 0; j < MAX_NUMNODES; j++) {
+			numa_latency[i][j] =
+				(numa_latency[i][j] * LOCAL_DISTANCE) /
+				self_latency;
+		}
+	}
+
+	add_node_ranges();
+
+	for (i = 0; i < num_node_masks; i++) {
+		allocate_node_data(i);
+		node_set_online(i);
+	}
+
+	err = 0;
+out:
+	mdesc_release(md);
+	return err;
+}
+
+static int __init numa_parse_jbus(void)
+{
+	unsigned long cpu, index;
+
+	/* NUMA node id is encoded in bits 36 and higher, and there is
+	 * a 1-to-1 mapping from CPU ID to NUMA node ID.
+	 */
+	index = 0;
+	for_each_present_cpu(cpu) {
+		numa_cpu_lookup_table[cpu] = index;
+		cpumask_copy(&numa_cpumask_lookup_table[index], cpumask_of(cpu));
+		node_masks[index].mask = ~((1UL << 36UL) - 1UL);
+		node_masks[index].match = cpu << 36UL;
+
+		index++;
+	}
+	num_node_masks = index;
+
+	add_node_ranges();
+
+	for (index = 0; index < num_node_masks; index++) {
+		allocate_node_data(index);
+		node_set_online(index);
+	}
+
+	return 0;
+}
+
+static int __init numa_parse_sun4u(void)
+{
+	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		unsigned long ver;
+
+		__asm__ ("rdpr %%ver, %0" : "=r" (ver));
+		if ((ver >> 32UL) == __JALAPENO_ID ||
+		    (ver >> 32UL) == __SERRANO_ID)
+			return numa_parse_jbus();
+	}
+	return -1;
+}
+
+static int __init bootmem_init_numa(void)
+{
+	int i, j;
+	int err = -1;
+
+	numadbg("bootmem_init_numa()\n");
+
+	/* Some sane defaults for numa latency values */
+	for (i = 0; i < MAX_NUMNODES; i++) {
+		for (j = 0; j < MAX_NUMNODES; j++)
+			numa_latency[i][j] = (i == j) ?
+				LOCAL_DISTANCE : REMOTE_DISTANCE;
+	}
+
+	if (numa_enabled) {
+		if (tlb_type == hypervisor)
+			err = numa_parse_mdesc();
+		else
+			err = numa_parse_sun4u();
+	}
+	return err;
+}
+
+#else
+
+static int bootmem_init_numa(void)
+{
+	return -1;
+}
+
+#endif
+
+static void __init bootmem_init_nonnuma(void)
+{
+	unsigned long top_of_ram = memblock_end_of_DRAM();
+	unsigned long total_ram = memblock_phys_mem_size();
+
+	numadbg("bootmem_init_nonnuma()\n");
+
+	printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
+	       top_of_ram, total_ram);
+	printk(KERN_INFO "Memory hole size: %ldMB\n",
+	       (top_of_ram - total_ram) >> 20);
+
+	init_node_masks_nonnuma();
+	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+	allocate_node_data(0);
+	node_set_online(0);
+}
+
+static unsigned long __init bootmem_init(unsigned long phys_base)
+{
+	unsigned long end_pfn;
+
+	end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
+	max_pfn = max_low_pfn = end_pfn;
+	min_low_pfn = (phys_base >> PAGE_SHIFT);
+
+	if (bootmem_init_numa() < 0)
+		bootmem_init_nonnuma();
+
+	/* Dump memblock with node info. */
+	memblock_dump_all();
+
+	/* XXX cpu notifier XXX */
+
+	sparse_memory_present_with_active_regions(MAX_NUMNODES);
+	sparse_init();
+
+	return end_pfn;
+}
+
+static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+static int pall_ents __initdata;
+
+static unsigned long max_phys_bits = 40;
+
+bool kern_addr_valid(unsigned long addr)
+{
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+	pte_t *pte;
+
+	if ((long)addr < 0L) {
+		unsigned long pa = __pa(addr);
+
+		if ((pa >> max_phys_bits) != 0UL)
+			return false;
+
+		return pfn_valid(pa >> PAGE_SHIFT);
+	}
+
+	if (addr >= (unsigned long) KERNBASE &&
+	    addr < (unsigned long)&_end)
+		return true;
+
+	pgd = pgd_offset_k(addr);
+	if (pgd_none(*pgd))
+		return 0;
+
+	pud = pud_offset(pgd, addr);
+	if (pud_none(*pud))
+		return 0;
+
+	if (pud_large(*pud))
+		return pfn_valid(pud_pfn(*pud));
+
+	pmd = pmd_offset(pud, addr);
+	if (pmd_none(*pmd))
+		return 0;
+
+	if (pmd_large(*pmd))
+		return pfn_valid(pmd_pfn(*pmd));
+
+	pte = pte_offset_kernel(pmd, addr);
+	if (pte_none(*pte))
+		return 0;
+
+	return pfn_valid(pte_pfn(*pte));
+}
+EXPORT_SYMBOL(kern_addr_valid);
+
+static unsigned long __ref kernel_map_hugepud(unsigned long vstart,
+					      unsigned long vend,
+					      pud_t *pud)
+{
+	const unsigned long mask16gb = (1UL << 34) - 1UL;
+	u64 pte_val = vstart;
+
+	/* Each PUD is 8GB */
+	if ((vstart & mask16gb) ||
+	    (vend - vstart <= mask16gb)) {
+		pte_val ^= kern_linear_pte_xor[2];
+		pud_val(*pud) = pte_val | _PAGE_PUD_HUGE;
+
+		return vstart + PUD_SIZE;
+	}
+
+	pte_val ^= kern_linear_pte_xor[3];
+	pte_val |= _PAGE_PUD_HUGE;
+
+	vend = vstart + mask16gb + 1UL;
+	while (vstart < vend) {
+		pud_val(*pud) = pte_val;
+
+		pte_val += PUD_SIZE;
+		vstart += PUD_SIZE;
+		pud++;
+	}
+	return vstart;
+}
+
+static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend,
+				   bool guard)
+{
+	if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE)
+		return true;
+
+	return false;
+}
+
+static unsigned long __ref kernel_map_hugepmd(unsigned long vstart,
+					      unsigned long vend,
+					      pmd_t *pmd)
+{
+	const unsigned long mask256mb = (1UL << 28) - 1UL;
+	const unsigned long mask2gb = (1UL << 31) - 1UL;
+	u64 pte_val = vstart;
+
+	/* Each PMD is 8MB */
+	if ((vstart & mask256mb) ||
+	    (vend - vstart <= mask256mb)) {
+		pte_val ^= kern_linear_pte_xor[0];
+		pmd_val(*pmd) = pte_val | _PAGE_PMD_HUGE;
+
+		return vstart + PMD_SIZE;
+	}
+
+	if ((vstart & mask2gb) ||
+	    (vend - vstart <= mask2gb)) {
+		pte_val ^= kern_linear_pte_xor[1];
+		pte_val |= _PAGE_PMD_HUGE;
+		vend = vstart + mask256mb + 1UL;
+	} else {
+		pte_val ^= kern_linear_pte_xor[2];
+		pte_val |= _PAGE_PMD_HUGE;
+		vend = vstart + mask2gb + 1UL;
+	}
+
+	while (vstart < vend) {
+		pmd_val(*pmd) = pte_val;
+
+		pte_val += PMD_SIZE;
+		vstart += PMD_SIZE;
+		pmd++;
+	}
+
+	return vstart;
+}
+
+static bool kernel_can_map_hugepmd(unsigned long vstart, unsigned long vend,
+				   bool guard)
+{
+	if (guard && !(vstart & ~PMD_MASK) && (vend - vstart) >= PMD_SIZE)
+		return true;
+
+	return false;
+}
+
+static unsigned long __ref kernel_map_range(unsigned long pstart,
+					    unsigned long pend, pgprot_t prot,
+					    bool use_huge)
+{
+	unsigned long vstart = PAGE_OFFSET + pstart;
+	unsigned long vend = PAGE_OFFSET + pend;
+	unsigned long alloc_bytes = 0UL;
+
+	if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
+		prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
+			    vstart, vend);
+		prom_halt();
+	}
+
+	while (vstart < vend) {
+		unsigned long this_end, paddr = __pa(vstart);
+		pgd_t *pgd = pgd_offset_k(vstart);
+		pud_t *pud;
+		pmd_t *pmd;
+		pte_t *pte;
+
+		if (pgd_none(*pgd)) {
+			pud_t *new;
+
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			alloc_bytes += PAGE_SIZE;
+			pgd_populate(&init_mm, pgd, new);
+		}
+		pud = pud_offset(pgd, vstart);
+		if (pud_none(*pud)) {
+			pmd_t *new;
+
+			if (kernel_can_map_hugepud(vstart, vend, use_huge)) {
+				vstart = kernel_map_hugepud(vstart, vend, pud);
+				continue;
+			}
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			alloc_bytes += PAGE_SIZE;
+			pud_populate(&init_mm, pud, new);
+		}
+
+		pmd = pmd_offset(pud, vstart);
+		if (pmd_none(*pmd)) {
+			pte_t *new;
+
+			if (kernel_can_map_hugepmd(vstart, vend, use_huge)) {
+				vstart = kernel_map_hugepmd(vstart, vend, pmd);
+				continue;
+			}
+			new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+			alloc_bytes += PAGE_SIZE;
+			pmd_populate_kernel(&init_mm, pmd, new);
+		}
+
+		pte = pte_offset_kernel(pmd, vstart);
+		this_end = (vstart + PMD_SIZE) & PMD_MASK;
+		if (this_end > vend)
+			this_end = vend;
+
+		while (vstart < this_end) {
+			pte_val(*pte) = (paddr | pgprot_val(prot));
+
+			vstart += PAGE_SIZE;
+			paddr += PAGE_SIZE;
+			pte++;
+		}
+	}
+
+	return alloc_bytes;
+}
+
+static void __init flush_all_kernel_tsbs(void)
+{
+	int i;
+
+	for (i = 0; i < KERNEL_TSB_NENTRIES; i++) {
+		struct tsb *ent = &swapper_tsb[i];
+
+		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+	}
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	for (i = 0; i < KERNEL_TSB4M_NENTRIES; i++) {
+		struct tsb *ent = &swapper_4m_tsb[i];
+
+		ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+	}
+#endif
+}
+
+extern unsigned int kvmap_linear_patch[1];
+
+static void __init kernel_physical_mapping_init(void)
+{
+	unsigned long i, mem_alloced = 0UL;
+	bool use_huge = true;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	use_huge = false;
+#endif
+	for (i = 0; i < pall_ents; i++) {
+		unsigned long phys_start, phys_end;
+
+		phys_start = pall[i].phys_addr;
+		phys_end = phys_start + pall[i].reg_size;
+
+		mem_alloced += kernel_map_range(phys_start, phys_end,
+						PAGE_KERNEL, use_huge);
+	}
+
+	printk("Allocated %ld bytes for kernel page tables.\n",
+	       mem_alloced);
+
+	kvmap_linear_patch[0] = 0x01000000; /* nop */
+	flushi(&kvmap_linear_patch[0]);
+
+	flush_all_kernel_tsbs();
+
+	__flush_tlb_all();
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+	unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
+	unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+	kernel_map_range(phys_start, phys_end,
+			 (enable ? PAGE_KERNEL : __pgprot(0)), false);
+
+	flush_tsb_kernel_range(PAGE_OFFSET + phys_start,
+			       PAGE_OFFSET + phys_end);
+
+	/* we should perform an IPI and flush all tlbs,
+	 * but that can deadlock->flush only current cpu.
+	 */
+	__flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
+				 PAGE_OFFSET + phys_end);
+}
+#endif
+
+unsigned long __init find_ecache_flush_span(unsigned long size)
+{
+	int i;
+
+	for (i = 0; i < pavail_ents; i++) {
+		if (pavail[i].reg_size >= size)
+			return pavail[i].phys_addr;
+	}
+
+	return ~0UL;
+}
+
+unsigned long PAGE_OFFSET;
+EXPORT_SYMBOL(PAGE_OFFSET);
+
+unsigned long VMALLOC_END   = 0x0000010000000000UL;
+EXPORT_SYMBOL(VMALLOC_END);
+
+unsigned long sparc64_va_hole_top =    0xfffff80000000000UL;
+unsigned long sparc64_va_hole_bottom = 0x0000080000000000UL;
+
+static void __init setup_page_offset(void)
+{
+	if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		/* Cheetah/Panther support a full 64-bit virtual
+		 * address, so we can use all that our page tables
+		 * support.
+		 */
+		sparc64_va_hole_top =    0xfff0000000000000UL;
+		sparc64_va_hole_bottom = 0x0010000000000000UL;
+
+		max_phys_bits = 42;
+	} else if (tlb_type == hypervisor) {
+		switch (sun4v_chip_type) {
+		case SUN4V_CHIP_NIAGARA1:
+		case SUN4V_CHIP_NIAGARA2:
+			/* T1 and T2 support 48-bit virtual addresses.  */
+			sparc64_va_hole_top =    0xffff800000000000UL;
+			sparc64_va_hole_bottom = 0x0000800000000000UL;
+
+			max_phys_bits = 39;
+			break;
+		case SUN4V_CHIP_NIAGARA3:
+			/* T3 supports 48-bit virtual addresses.  */
+			sparc64_va_hole_top =    0xffff800000000000UL;
+			sparc64_va_hole_bottom = 0x0000800000000000UL;
+
+			max_phys_bits = 43;
+			break;
+		case SUN4V_CHIP_NIAGARA4:
+		case SUN4V_CHIP_NIAGARA5:
+		case SUN4V_CHIP_SPARC64X:
+		case SUN4V_CHIP_SPARC_M6:
+			/* T4 and later support 52-bit virtual addresses.  */
+			sparc64_va_hole_top =    0xfff8000000000000UL;
+			sparc64_va_hole_bottom = 0x0008000000000000UL;
+			max_phys_bits = 47;
+			break;
+		case SUN4V_CHIP_SPARC_M7:
+		case SUN4V_CHIP_SPARC_SN:
+			/* M7 and later support 52-bit virtual addresses.  */
+			sparc64_va_hole_top =    0xfff8000000000000UL;
+			sparc64_va_hole_bottom = 0x0008000000000000UL;
+			max_phys_bits = 49;
+			break;
+		case SUN4V_CHIP_SPARC_M8:
+		default:
+			/* M8 and later support 54-bit virtual addresses.
+			 * However, restricting M8 and above VA bits to 53
+			 * as 4-level page table cannot support more than
+			 * 53 VA bits.
+			 */
+			sparc64_va_hole_top =    0xfff0000000000000UL;
+			sparc64_va_hole_bottom = 0x0010000000000000UL;
+			max_phys_bits = 51;
+			break;
+		}
+	}
+
+	if (max_phys_bits > MAX_PHYS_ADDRESS_BITS) {
+		prom_printf("MAX_PHYS_ADDRESS_BITS is too small, need %lu\n",
+			    max_phys_bits);
+		prom_halt();
+	}
+
+	PAGE_OFFSET = sparc64_va_hole_top;
+	VMALLOC_END = ((sparc64_va_hole_bottom >> 1) +
+		       (sparc64_va_hole_bottom >> 2));
+
+	pr_info("MM: PAGE_OFFSET is 0x%016lx (max_phys_bits == %lu)\n",
+		PAGE_OFFSET, max_phys_bits);
+	pr_info("MM: VMALLOC [0x%016lx --> 0x%016lx]\n",
+		VMALLOC_START, VMALLOC_END);
+	pr_info("MM: VMEMMAP [0x%016lx --> 0x%016lx]\n",
+		VMEMMAP_BASE, VMEMMAP_BASE << 1);
+}
+
+static void __init tsb_phys_patch(void)
+{
+	struct tsb_ldquad_phys_patch_entry *pquad;
+	struct tsb_phys_patch_entry *p;
+
+	pquad = &__tsb_ldquad_phys_patch;
+	while (pquad < &__tsb_ldquad_phys_patch_end) {
+		unsigned long addr = pquad->addr;
+
+		if (tlb_type == hypervisor)
+			*(unsigned int *) addr = pquad->sun4v_insn;
+		else
+			*(unsigned int *) addr = pquad->sun4u_insn;
+		wmb();
+		__asm__ __volatile__("flush	%0"
+				     : /* no outputs */
+				     : "r" (addr));
+
+		pquad++;
+	}
+
+	p = &__tsb_phys_patch;
+	while (p < &__tsb_phys_patch_end) {
+		unsigned long addr = p->addr;
+
+		*(unsigned int *) addr = p->insn;
+		wmb();
+		__asm__ __volatile__("flush	%0"
+				     : /* no outputs */
+				     : "r" (addr));
+
+		p++;
+	}
+}
+
+/* Don't mark as init, we give this to the Hypervisor.  */
+#ifndef CONFIG_DEBUG_PAGEALLOC
+#define NUM_KTSB_DESCR	2
+#else
+#define NUM_KTSB_DESCR	1
+#endif
+static struct hv_tsb_descr ktsb_descr[NUM_KTSB_DESCR];
+
+/* The swapper TSBs are loaded with a base sequence of:
+ *
+ *	sethi	%uhi(SYMBOL), REG1
+ *	sethi	%hi(SYMBOL), REG2
+ *	or	REG1, %ulo(SYMBOL), REG1
+ *	or	REG2, %lo(SYMBOL), REG2
+ *	sllx	REG1, 32, REG1
+ *	or	REG1, REG2, REG1
+ *
+ * When we use physical addressing for the TSB accesses, we patch the
+ * first four instructions in the above sequence.
+ */
+
+static void patch_one_ktsb_phys(unsigned int *start, unsigned int *end, unsigned long pa)
+{
+	unsigned long high_bits, low_bits;
+
+	high_bits = (pa >> 32) & 0xffffffff;
+	low_bits = (pa >> 0) & 0xffffffff;
+
+	while (start < end) {
+		unsigned int *ia = (unsigned int *)(unsigned long)*start;
+
+		ia[0] = (ia[0] & ~0x3fffff) | (high_bits >> 10);
+		__asm__ __volatile__("flush	%0" : : "r" (ia));
+
+		ia[1] = (ia[1] & ~0x3fffff) | (low_bits >> 10);
+		__asm__ __volatile__("flush	%0" : : "r" (ia + 1));
+
+		ia[2] = (ia[2] & ~0x1fff) | (high_bits & 0x3ff);
+		__asm__ __volatile__("flush	%0" : : "r" (ia + 2));
+
+		ia[3] = (ia[3] & ~0x1fff) | (low_bits & 0x3ff);
+		__asm__ __volatile__("flush	%0" : : "r" (ia + 3));
+
+		start++;
+	}
+}
+
+static void ktsb_phys_patch(void)
+{
+	extern unsigned int __swapper_tsb_phys_patch;
+	extern unsigned int __swapper_tsb_phys_patch_end;
+	unsigned long ktsb_pa;
+
+	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
+	patch_one_ktsb_phys(&__swapper_tsb_phys_patch,
+			    &__swapper_tsb_phys_patch_end, ktsb_pa);
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	{
+	extern unsigned int __swapper_4m_tsb_phys_patch;
+	extern unsigned int __swapper_4m_tsb_phys_patch_end;
+	ktsb_pa = (kern_base +
+		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+	patch_one_ktsb_phys(&__swapper_4m_tsb_phys_patch,
+			    &__swapper_4m_tsb_phys_patch_end, ktsb_pa);
+	}
+#endif
+}
+
+static void __init sun4v_ktsb_init(void)
+{
+	unsigned long ktsb_pa;
+
+	/* First KTSB for PAGE_SIZE mappings.  */
+	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
+
+	switch (PAGE_SIZE) {
+	case 8 * 1024:
+	default:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_8K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_8K;
+		break;
+
+	case 64 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_64K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_64K;
+		break;
+
+	case 512 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_512K;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_512K;
+		break;
+
+	case 4 * 1024 * 1024:
+		ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
+		ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
+		break;
+	}
+
+	ktsb_descr[0].assoc = 1;
+	ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
+	ktsb_descr[0].ctx_idx = 0;
+	ktsb_descr[0].tsb_base = ktsb_pa;
+	ktsb_descr[0].resv = 0;
+
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	/* Second KTSB for 4MB/256MB/2GB/16GB mappings.  */
+	ktsb_pa = (kern_base +
+		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+
+	ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
+	ktsb_descr[1].pgsz_mask = ((HV_PGSZ_MASK_4MB |
+				    HV_PGSZ_MASK_256MB |
+				    HV_PGSZ_MASK_2GB |
+				    HV_PGSZ_MASK_16GB) &
+				   cpu_pgsz_mask);
+	ktsb_descr[1].assoc = 1;
+	ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
+	ktsb_descr[1].ctx_idx = 0;
+	ktsb_descr[1].tsb_base = ktsb_pa;
+	ktsb_descr[1].resv = 0;
+#endif
+}
+
+void sun4v_ktsb_register(void)
+{
+	unsigned long pa, ret;
+
+	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
+
+	ret = sun4v_mmu_tsb_ctx0(NUM_KTSB_DESCR, pa);
+	if (ret != 0) {
+		prom_printf("hypervisor_mmu_tsb_ctx0[%lx]: "
+			    "errors with %lx\n", pa, ret);
+		prom_halt();
+	}
+}
+
+static void __init sun4u_linear_pte_xor_finalize(void)
+{
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	/* This is where we would add Panther support for
+	 * 32MB and 256MB pages.
+	 */
+#endif
+}
+
+static void __init sun4v_linear_pte_xor_finalize(void)
+{
+	unsigned long pagecv_flag;
+
+	/* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
+	 * enables MCD error. Do not set bit 9 on M7 processor.
+	 */
+	switch (sun4v_chip_type) {
+	case SUN4V_CHIP_SPARC_M7:
+	case SUN4V_CHIP_SPARC_M8:
+	case SUN4V_CHIP_SPARC_SN:
+		pagecv_flag = 0x00;
+		break;
+	default:
+		pagecv_flag = _PAGE_CV_4V;
+		break;
+	}
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
+		kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
+			PAGE_OFFSET;
+		kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
+					   _PAGE_P_4V | _PAGE_W_4V);
+	} else {
+		kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
+	}
+
+	if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
+		kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
+			PAGE_OFFSET;
+		kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
+					   _PAGE_P_4V | _PAGE_W_4V);
+	} else {
+		kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
+	}
+
+	if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
+		kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
+			PAGE_OFFSET;
+		kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
+					   _PAGE_P_4V | _PAGE_W_4V);
+	} else {
+		kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
+	}
+#endif
+}
+
+/* paging_init() sets up the page tables */
+
+static unsigned long last_valid_pfn;
+
+static void sun4u_pgprot_init(void);
+static void sun4v_pgprot_init(void);
+
+static phys_addr_t __init available_memory(void)
+{
+	phys_addr_t available = 0ULL;
+	phys_addr_t pa_start, pa_end;
+	u64 i;
+
+	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
+				&pa_end, NULL)
+		available = available + (pa_end  - pa_start);
+
+	return available;
+}
+
+#define _PAGE_CACHE_4U	(_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V	(_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U	 (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V	 (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
+/* We need to exclude reserved regions. This exclusion will include
+ * vmlinux and initrd. To be more precise the initrd size could be used to
+ * compute a new lower limit because it is freed later during initialization.
+ */
+static void __init reduce_memory(phys_addr_t limit_ram)
+{
+	phys_addr_t avail_ram = available_memory();
+	phys_addr_t pa_start, pa_end;
+	u64 i;
+
+	if (limit_ram >= avail_ram)
+		return;
+
+	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
+				&pa_end, NULL) {
+		phys_addr_t region_size = pa_end - pa_start;
+		phys_addr_t clip_start = pa_start;
+
+		avail_ram = avail_ram - region_size;
+		/* Are we consuming too much? */
+		if (avail_ram < limit_ram) {
+			phys_addr_t give_back = limit_ram - avail_ram;
+
+			region_size = region_size - give_back;
+			clip_start = clip_start + give_back;
+		}
+
+		memblock_remove(clip_start, region_size);
+
+		if (avail_ram <= limit_ram)
+			break;
+		i = 0UL;
+	}
+}
+
+void __init paging_init(void)
+{
+	unsigned long end_pfn, shift, phys_base;
+	unsigned long real_end, i;
+
+	setup_page_offset();
+
+	/* These build time checkes make sure that the dcache_dirty_cpu()
+	 * page->flags usage will work.
+	 *
+	 * When a page gets marked as dcache-dirty, we store the
+	 * cpu number starting at bit 32 in the page->flags.  Also,
+	 * functions like clear_dcache_dirty_cpu use the cpu mask
+	 * in 13-bit signed-immediate instruction fields.
+	 */
+
+	/*
+	 * Page flags must not reach into upper 32 bits that are used
+	 * for the cpu number
+	 */
+	BUILD_BUG_ON(NR_PAGEFLAGS > 32);
+
+	/*
+	 * The bit fields placed in the high range must not reach below
+	 * the 32 bit boundary. Otherwise we cannot place the cpu field
+	 * at the 32 bit boundary.
+	 */
+	BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
+		ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
+
+	BUILD_BUG_ON(NR_CPUS > 4096);
+
+	kern_base = (prom_boot_mapping_phys_low >> ILOG2_4MB) << ILOG2_4MB;
+	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
+
+	/* Invalidate both kernel TSBs.  */
+	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
+#endif
+
+	/* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
+	 * bit on M7 processor. This is a conflicting usage of the same
+	 * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
+	 * Detection error on all pages and this will lead to problems
+	 * later. Kernel does not run with MCD enabled and hence rest
+	 * of the required steps to fully configure memory corruption
+	 * detection are not taken. We need to ensure TTE.mcde is not
+	 * set on M7 processor. Compute the value of cacheability
+	 * flag for use later taking this into consideration.
+	 */
+	switch (sun4v_chip_type) {
+	case SUN4V_CHIP_SPARC_M7:
+	case SUN4V_CHIP_SPARC_M8:
+	case SUN4V_CHIP_SPARC_SN:
+		page_cache4v_flag = _PAGE_CP_4V;
+		break;
+	default:
+		page_cache4v_flag = _PAGE_CACHE_4V;
+		break;
+	}
+
+	if (tlb_type == hypervisor)
+		sun4v_pgprot_init();
+	else
+		sun4u_pgprot_init();
+
+	if (tlb_type == cheetah_plus ||
+	    tlb_type == hypervisor) {
+		tsb_phys_patch();
+		ktsb_phys_patch();
+	}
+
+	if (tlb_type == hypervisor)
+		sun4v_patch_tlb_handlers();
+
+	/* Find available physical memory...
+	 *
+	 * Read it twice in order to work around a bug in openfirmware.
+	 * The call to grab this table itself can cause openfirmware to
+	 * allocate memory, which in turn can take away some space from
+	 * the list of available memory.  Reading it twice makes sure
+	 * we really do get the final value.
+	 */
+	read_obp_translations();
+	read_obp_memory("reg", &pall[0], &pall_ents);
+	read_obp_memory("available", &pavail[0], &pavail_ents);
+	read_obp_memory("available", &pavail[0], &pavail_ents);
+
+	phys_base = 0xffffffffffffffffUL;
+	for (i = 0; i < pavail_ents; i++) {
+		phys_base = min(phys_base, pavail[i].phys_addr);
+		memblock_add(pavail[i].phys_addr, pavail[i].reg_size);
+	}
+
+	memblock_reserve(kern_base, kern_size);
+
+	find_ramdisk(phys_base);
+
+	if (cmdline_memory_size)
+		reduce_memory(cmdline_memory_size);
+
+	memblock_allow_resize();
+	memblock_dump_all();
+
+	set_bit(0, mmu_context_bmap);
+
+	shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+
+	real_end = (unsigned long)_end;
+	num_kernel_image_mappings = DIV_ROUND_UP(real_end - KERNBASE, 1 << ILOG2_4MB);
+	printk("Kernel: Using %d locked TLB entries for main kernel image.\n",
+	       num_kernel_image_mappings);
+
+	/* Set kernel pgd to upper alias so physical page computations
+	 * work.
+	 */
+	init_mm.pgd += ((shift) / (sizeof(pgd_t)));
+	
+	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
+
+	inherit_prom_mappings();
+	
+	/* Ok, we can use our TLB miss and window trap handlers safely.  */
+	setup_tba();
+
+	__flush_tlb_all();
+
+	prom_build_devicetree();
+	of_populate_present_mask();
+#ifndef CONFIG_SMP
+	of_fill_in_cpu_data();
+#endif
+
+	if (tlb_type == hypervisor) {
+		sun4v_mdesc_init();
+		mdesc_populate_present_mask(cpu_all_mask);
+#ifndef CONFIG_SMP
+		mdesc_fill_in_cpu_data(cpu_all_mask);
+#endif
+		mdesc_get_page_sizes(cpu_all_mask, &cpu_pgsz_mask);
+
+		sun4v_linear_pte_xor_finalize();
+
+		sun4v_ktsb_init();
+		sun4v_ktsb_register();
+	} else {
+		unsigned long impl, ver;
+
+		cpu_pgsz_mask = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K |
+				 HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB);
+
+		__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
+		impl = ((ver >> 32) & 0xffff);
+		if (impl == PANTHER_IMPL)
+			cpu_pgsz_mask |= (HV_PGSZ_MASK_32MB |
+					  HV_PGSZ_MASK_256MB);
+
+		sun4u_linear_pte_xor_finalize();
+	}
+
+	/* Flush the TLBs and the 4M TSB so that the updated linear
+	 * pte XOR settings are realized for all mappings.
+	 */
+	__flush_tlb_all();
+#ifndef CONFIG_DEBUG_PAGEALLOC
+	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
+#endif
+	__flush_tlb_all();
+
+	/* Setup bootmem... */
+	last_valid_pfn = end_pfn = bootmem_init(phys_base);
+
+	kernel_physical_mapping_init();
+
+	{
+		unsigned long max_zone_pfns[MAX_NR_ZONES];
+
+		memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
+		max_zone_pfns[ZONE_NORMAL] = end_pfn;
+
+		free_area_init_nodes(max_zone_pfns);
+	}
+
+	printk("Booting Linux...\n");
+}
+
+int page_in_phys_avail(unsigned long paddr)
+{
+	int i;
+
+	paddr &= PAGE_MASK;
+
+	for (i = 0; i < pavail_ents; i++) {
+		unsigned long start, end;
+
+		start = pavail[i].phys_addr;
+		end = start + pavail[i].reg_size;
+
+		if (paddr >= start && paddr < end)
+			return 1;
+	}
+	if (paddr >= kern_base && paddr < (kern_base + kern_size))
+		return 1;
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (paddr >= __pa(initrd_start) &&
+	    paddr < __pa(PAGE_ALIGN(initrd_end)))
+		return 1;
+#endif
+
+	return 0;
+}
+
+static void __init register_page_bootmem_info(void)
+{
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+	int i;
+
+	for_each_online_node(i)
+		if (NODE_DATA(i)->node_spanned_pages)
+			register_page_bootmem_info_node(NODE_DATA(i));
+#endif
+}
+void __init mem_init(void)
+{
+	high_memory = __va(last_valid_pfn << PAGE_SHIFT);
+
+	free_all_bootmem();
+
+	/*
+	 * Must be done after boot memory is put on freelist, because here we
+	 * might set fields in deferred struct pages that have not yet been
+	 * initialized, and free_all_bootmem() initializes all the reserved
+	 * deferred pages for us.
+	 */
+	register_page_bootmem_info();
+
+	/*
+	 * Set up the zero page, mark it reserved, so that page count
+	 * is not manipulated when freeing the page from user ptes.
+	 */
+	mem_map_zero = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
+	if (mem_map_zero == NULL) {
+		prom_printf("paging_init: Cannot alloc zero page.\n");
+		prom_halt();
+	}
+	mark_page_reserved(mem_map_zero);
+
+	mem_init_print_info(NULL);
+
+	if (tlb_type == cheetah || tlb_type == cheetah_plus)
+		cheetah_ecache_flush_init();
+}
+
+void free_initmem(void)
+{
+	unsigned long addr, initend;
+	int do_free = 1;
+
+	/* If the physical memory maps were trimmed by kernel command
+	 * line options, don't even try freeing this initmem stuff up.
+	 * The kernel image could have been in the trimmed out region
+	 * and if so the freeing below will free invalid page structs.
+	 */
+	if (cmdline_memory_size)
+		do_free = 0;
+
+	/*
+	 * The init section is aligned to 8k in vmlinux.lds. Page align for >8k pagesizes.
+	 */
+	addr = PAGE_ALIGN((unsigned long)(__init_begin));
+	initend = (unsigned long)(__init_end) & PAGE_MASK;
+	for (; addr < initend; addr += PAGE_SIZE) {
+		unsigned long page;
+
+		page = (addr +
+			((unsigned long) __va(kern_base)) -
+			((unsigned long) KERNBASE));
+		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
+
+		if (do_free)
+			free_reserved_page(virt_to_page(page));
+	}
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+	free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
+			   "initrd");
+}
+#endif
+
+pgprot_t PAGE_KERNEL __read_mostly;
+EXPORT_SYMBOL(PAGE_KERNEL);
+
+pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
+pgprot_t PAGE_COPY __read_mostly;
+
+pgprot_t PAGE_SHARED __read_mostly;
+EXPORT_SYMBOL(PAGE_SHARED);
+
+unsigned long pg_iobits __read_mostly;
+
+unsigned long _PAGE_IE __read_mostly;
+EXPORT_SYMBOL(_PAGE_IE);
+
+unsigned long _PAGE_E __read_mostly;
+EXPORT_SYMBOL(_PAGE_E);
+
+unsigned long _PAGE_CACHE __read_mostly;
+EXPORT_SYMBOL(_PAGE_CACHE);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+			       int node, struct vmem_altmap *altmap)
+{
+	unsigned long pte_base;
+
+	pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+		    _PAGE_CP_4U | _PAGE_CV_4U |
+		    _PAGE_P_4U | _PAGE_W_4U);
+	if (tlb_type == hypervisor)
+		pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+			    page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
+
+	pte_base |= _PAGE_PMD_HUGE;
+
+	vstart = vstart & PMD_MASK;
+	vend = ALIGN(vend, PMD_SIZE);
+	for (; vstart < vend; vstart += PMD_SIZE) {
+		pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
+		unsigned long pte;
+		pud_t *pud;
+		pmd_t *pmd;
+
+		if (!pgd)
+			return -ENOMEM;
+
+		pud = vmemmap_pud_populate(pgd, vstart, node);
+		if (!pud)
+			return -ENOMEM;
+
+		pmd = pmd_offset(pud, vstart);
+		pte = pmd_val(*pmd);
+		if (!(pte & _PAGE_VALID)) {
+			void *block = vmemmap_alloc_block(PMD_SIZE, node);
+
+			if (!block)
+				return -ENOMEM;
+
+			pmd_val(*pmd) = pte_base | __pa(block);
+		}
+	}
+
+	return 0;
+}
+
+void vmemmap_free(unsigned long start, unsigned long end,
+		struct vmem_altmap *altmap)
+{
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+
+static void prot_init_common(unsigned long page_none,
+			     unsigned long page_shared,
+			     unsigned long page_copy,
+			     unsigned long page_readonly,
+			     unsigned long page_exec_bit)
+{
+	PAGE_COPY = __pgprot(page_copy);
+	PAGE_SHARED = __pgprot(page_shared);
+
+	protection_map[0x0] = __pgprot(page_none);
+	protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
+	protection_map[0x4] = __pgprot(page_readonly);
+	protection_map[0x5] = __pgprot(page_readonly);
+	protection_map[0x6] = __pgprot(page_copy);
+	protection_map[0x7] = __pgprot(page_copy);
+	protection_map[0x8] = __pgprot(page_none);
+	protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
+	protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
+	protection_map[0xc] = __pgprot(page_readonly);
+	protection_map[0xd] = __pgprot(page_readonly);
+	protection_map[0xe] = __pgprot(page_shared);
+	protection_map[0xf] = __pgprot(page_shared);
+}
+
+static void __init sun4u_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+	int i;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				_PAGE_CACHE_4U | _PAGE_P_4U |
+				__ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				_PAGE_EXEC_4U);
+	PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
+				       _PAGE_CACHE_4U | _PAGE_P_4U |
+				       __ACCESS_BITS_4U | __DIRTY_BITS_4U |
+				       _PAGE_EXEC_4U | _PAGE_L_4U);
+
+	_PAGE_IE = _PAGE_IE_4U;
+	_PAGE_E = _PAGE_E_4U;
+	_PAGE_CACHE = _PAGE_CACHE_4U;
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
+		     __ACCESS_BITS_4U | _PAGE_E_4U);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+#else
+	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
+		PAGE_OFFSET;
+#endif
+	kern_linear_pte_xor[0] |= (_PAGE_CP_4U | _PAGE_CV_4U |
+				   _PAGE_P_4U | _PAGE_W_4U);
+
+	for (i = 1; i < 4; i++)
+		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
+
+	_PAGE_ALL_SZ_BITS =  (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
+			      _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
+			      _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
+
+
+	page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+		       __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+	page_readonly   = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
+			   __ACCESS_BITS_4U | _PAGE_EXEC_4U);
+
+	page_exec_bit = _PAGE_EXEC_4U;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+static void __init sun4v_pgprot_init(void)
+{
+	unsigned long page_none, page_shared, page_copy, page_readonly;
+	unsigned long page_exec_bit;
+	int i;
+
+	PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
+				page_cache4v_flag | _PAGE_P_4V |
+				__ACCESS_BITS_4V | __DIRTY_BITS_4V |
+				_PAGE_EXEC_4V);
+	PAGE_KERNEL_LOCKED = PAGE_KERNEL;
+
+	_PAGE_IE = _PAGE_IE_4V;
+	_PAGE_E = _PAGE_E_4V;
+	_PAGE_CACHE = page_cache4v_flag;
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+	kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
+#else
+	kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
+		PAGE_OFFSET;
+#endif
+	kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
+				   _PAGE_W_4V);
+
+	for (i = 1; i < 4; i++)
+		kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
+
+	pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
+		     __ACCESS_BITS_4V | _PAGE_E_4V);
+
+	_PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
+			     _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
+			     _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
+			     _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
+
+	page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
+	page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
+		       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
+	page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
+		       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+	page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
+			 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
+
+	page_exec_bit = _PAGE_EXEC_4V;
+
+	prot_init_common(page_none, page_shared, page_copy, page_readonly,
+			 page_exec_bit);
+}
+
+unsigned long pte_sz_bits(unsigned long sz)
+{
+	if (tlb_type == hypervisor) {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4V;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4V;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4V;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4V;
+		}
+	} else {
+		switch (sz) {
+		case 8 * 1024:
+		default:
+			return _PAGE_SZ8K_4U;
+		case 64 * 1024:
+			return _PAGE_SZ64K_4U;
+		case 512 * 1024:
+			return _PAGE_SZ512K_4U;
+		case 4 * 1024 * 1024:
+			return _PAGE_SZ4MB_4U;
+		}
+	}
+}
+
+pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
+{
+	pte_t pte;
+
+	pte_val(pte)  = page | pgprot_val(pgprot_noncached(prot));
+	pte_val(pte) |= (((unsigned long)space) << 32);
+	pte_val(pte) |= pte_sz_bits(page_size);
+
+	return pte;
+}
+
+static unsigned long kern_large_tte(unsigned long paddr)
+{
+	unsigned long val;
+
+	val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
+	       _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
+	       _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
+	if (tlb_type == hypervisor)
+		val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
+		       page_cache4v_flag | _PAGE_P_4V |
+		       _PAGE_EXEC_4V | _PAGE_W_4V);
+
+	return val | paddr;
+}
+
+/* If not locked, zap it. */
+void __flush_tlb_all(void)
+{
+	unsigned long pstate;
+	int i;
+
+	__asm__ __volatile__("flushw\n\t"
+			     "rdpr	%%pstate, %0\n\t"
+			     "wrpr	%0, %1, %%pstate"
+			     : "=r" (pstate)
+			     : "i" (PSTATE_IE));
+	if (tlb_type == hypervisor) {
+		sun4v_mmu_demap_all();
+	} else if (tlb_type == spitfire) {
+		for (i = 0; i < 64; i++) {
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
+				spitfire_put_dtlb_data(i, 0x0UL);
+			}
+
+			/* Spitfire Errata #32 workaround */
+			/* NOTE: Always runs on spitfire, so no
+			 *       cheetah+ page size encodings.
+			 */
+			__asm__ __volatile__("stxa	%0, [%1] %2\n\t"
+					     "flush	%%g6"
+					     : /* No outputs */
+					     : "r" (0),
+					     "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
+
+			if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
+				__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
+						     "membar #Sync"
+						     : /* no outputs */
+						     : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
+				spitfire_put_itlb_data(i, 0x0UL);
+			}
+		}
+	} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
+		cheetah_flush_dtlb_all();
+		cheetah_flush_itlb_all();
+	}
+	__asm__ __volatile__("wrpr	%0, 0, %%pstate"
+			     : : "r" (pstate));
+}
+
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+			    unsigned long address)
+{
+	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	pte_t *pte = NULL;
+
+	if (page)
+		pte = (pte_t *) page_address(page);
+
+	return pte;
+}
+
+pgtable_t pte_alloc_one(struct mm_struct *mm,
+			unsigned long address)
+{
+	struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page)
+		return NULL;
+	if (!pgtable_page_ctor(page)) {
+		free_unref_page(page);
+		return NULL;
+	}
+	return (pte_t *) page_address(page);
+}
+
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static void __pte_free(pgtable_t pte)
+{
+	struct page *page = virt_to_page(pte);
+
+	pgtable_page_dtor(page);
+	__free_page(page);
+}
+
+void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+	__pte_free(pte);
+}
+
+void pgtable_free(void *table, bool is_page)
+{
+	if (is_page)
+		__pte_free(table);
+	else
+		kmem_cache_free(pgtable_cache, table);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+			  pmd_t *pmd)
+{
+	unsigned long pte, flags;
+	struct mm_struct *mm;
+	pmd_t entry = *pmd;
+
+	if (!pmd_large(entry) || !pmd_young(entry))
+		return;
+
+	pte = pmd_val(entry);
+
+	/* Don't insert a non-valid PMD into the TSB, we'll deadlock.  */
+	if (!(pte & _PAGE_VALID))
+		return;
+
+	/* We are fabricating 8MB pages using 4MB real hw pages.  */
+	pte |= (addr & (1UL << REAL_HPAGE_SHIFT));
+
+	mm = vma->vm_mm;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	if (mm->context.tsb_block[MM_TSB_HUGE].tsb != NULL)
+		__update_mmu_tsb_insert(mm, MM_TSB_HUGE, REAL_HPAGE_SHIFT,
+					addr, pte);
+
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static void context_reload(void *__data)
+{
+	struct mm_struct *mm = __data;
+
+	if (mm == current->mm)
+		load_secondary_context(mm);
+}
+
+void hugetlb_setup(struct pt_regs *regs)
+{
+	struct mm_struct *mm = current->mm;
+	struct tsb_config *tp;
+
+	if (faulthandler_disabled() || !mm) {
+		const struct exception_table_entry *entry;
+
+		entry = search_exception_tables(regs->tpc);
+		if (entry) {
+			regs->tpc = entry->fixup;
+			regs->tnpc = regs->tpc + 4;
+			return;
+		}
+		pr_alert("Unexpected HugeTLB setup in atomic context.\n");
+		die_if_kernel("HugeTSB in atomic", regs);
+	}
+
+	tp = &mm->context.tsb_block[MM_TSB_HUGE];
+	if (likely(tp->tsb == NULL))
+		tsb_grow(mm, MM_TSB_HUGE, 0);
+
+	tsb_context_switch(mm);
+	smp_tsb_sync(mm);
+
+	/* On UltraSPARC-III+ and later, configure the second half of
+	 * the Data-TLB for huge pages.
+	 */
+	if (tlb_type == cheetah_plus) {
+		bool need_context_reload = false;
+		unsigned long ctx;
+
+		spin_lock_irq(&ctx_alloc_lock);
+		ctx = mm->context.sparc64_ctx_val;
+		ctx &= ~CTX_PGSZ_MASK;
+		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
+		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
+
+		if (ctx != mm->context.sparc64_ctx_val) {
+			/* When changing the page size fields, we
+			 * must perform a context flush so that no
+			 * stale entries match.  This flush must
+			 * occur with the original context register
+			 * settings.
+			 */
+			do_flush_tlb_mm(mm);
+
+			/* Reload the context register of all processors
+			 * also executing in this address space.
+			 */
+			mm->context.sparc64_ctx_val = ctx;
+			need_context_reload = true;
+		}
+		spin_unlock_irq(&ctx_alloc_lock);
+
+		if (need_context_reload)
+			on_each_cpu(context_reload, mm, 0);
+	}
+}
+#endif
+
+static struct resource code_resource = {
+	.name	= "Kernel code",
+	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource data_resource = {
+	.name	= "Kernel data",
+	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static struct resource bss_resource = {
+	.name	= "Kernel bss",
+	.flags	= IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
+};
+
+static inline resource_size_t compute_kern_paddr(void *addr)
+{
+	return (resource_size_t) (addr - KERNBASE + kern_base);
+}
+
+static void __init kernel_lds_init(void)
+{
+	code_resource.start = compute_kern_paddr(_text);
+	code_resource.end   = compute_kern_paddr(_etext - 1);
+	data_resource.start = compute_kern_paddr(_etext);
+	data_resource.end   = compute_kern_paddr(_edata - 1);
+	bss_resource.start  = compute_kern_paddr(__bss_start);
+	bss_resource.end    = compute_kern_paddr(_end - 1);
+}
+
+static int __init report_memory(void)
+{
+	int i;
+	struct resource *res;
+
+	kernel_lds_init();
+
+	for (i = 0; i < pavail_ents; i++) {
+		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+
+		if (!res) {
+			pr_warn("Failed to allocate source.\n");
+			break;
+		}
+
+		res->name = "System RAM";
+		res->start = pavail[i].phys_addr;
+		res->end = pavail[i].phys_addr + pavail[i].reg_size - 1;
+		res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM;
+
+		if (insert_resource(&iomem_resource, res) < 0) {
+			pr_warn("Resource insertion failed.\n");
+			break;
+		}
+
+		insert_resource(res, &code_resource);
+		insert_resource(res, &data_resource);
+		insert_resource(res, &bss_resource);
+	}
+
+	return 0;
+}
+arch_initcall(report_memory);
+
+#ifdef CONFIG_SMP
+#define do_flush_tlb_kernel_range	smp_flush_tlb_kernel_range
+#else
+#define do_flush_tlb_kernel_range	__flush_tlb_kernel_range
+#endif
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+	if (start < HI_OBP_ADDRESS && end > LOW_OBP_ADDRESS) {
+		if (start < LOW_OBP_ADDRESS) {
+			flush_tsb_kernel_range(start, LOW_OBP_ADDRESS);
+			do_flush_tlb_kernel_range(start, LOW_OBP_ADDRESS);
+		}
+		if (end > HI_OBP_ADDRESS) {
+			flush_tsb_kernel_range(HI_OBP_ADDRESS, end);
+			do_flush_tlb_kernel_range(HI_OBP_ADDRESS, end);
+		}
+	} else {
+		flush_tsb_kernel_range(start, end);
+		do_flush_tlb_kernel_range(start, end);
+	}
+}
+
+void copy_user_highpage(struct page *to, struct page *from,
+	unsigned long vaddr, struct vm_area_struct *vma)
+{
+	char *vfrom, *vto;
+
+	vfrom = kmap_atomic(from);
+	vto = kmap_atomic(to);
+	copy_user_page(vto, vfrom, vaddr, to);
+	kunmap_atomic(vto);
+	kunmap_atomic(vfrom);
+
+	/* If this page has ADI enabled, copy over any ADI tags
+	 * as well
+	 */
+	if (vma->vm_flags & VM_SPARC_ADI) {
+		unsigned long pfrom, pto, i, adi_tag;
+
+		pfrom = page_to_phys(from);
+		pto = page_to_phys(to);
+
+		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
+			asm volatile("ldxa [%1] %2, %0\n\t"
+					: "=r" (adi_tag)
+					:  "r" (i), "i" (ASI_MCD_REAL));
+			asm volatile("stxa %0, [%1] %2\n\t"
+					:
+					: "r" (adi_tag), "r" (pto),
+					  "i" (ASI_MCD_REAL));
+			pto += adi_blksize();
+		}
+		asm volatile("membar #Sync\n\t");
+	}
+}
+EXPORT_SYMBOL(copy_user_highpage);
+
+void copy_highpage(struct page *to, struct page *from)
+{
+	char *vfrom, *vto;
+
+	vfrom = kmap_atomic(from);
+	vto = kmap_atomic(to);
+	copy_page(vto, vfrom);
+	kunmap_atomic(vto);
+	kunmap_atomic(vfrom);
+
+	/* If this platform is ADI enabled, copy any ADI tags
+	 * as well
+	 */
+	if (adi_capable()) {
+		unsigned long pfrom, pto, i, adi_tag;
+
+		pfrom = page_to_phys(from);
+		pto = page_to_phys(to);
+
+		for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) {
+			asm volatile("ldxa [%1] %2, %0\n\t"
+					: "=r" (adi_tag)
+					:  "r" (i), "i" (ASI_MCD_REAL));
+			asm volatile("stxa %0, [%1] %2\n\t"
+					:
+					: "r" (adi_tag), "r" (pto),
+					  "i" (ASI_MCD_REAL));
+			pto += adi_blksize();
+		}
+		asm volatile("membar #Sync\n\t");
+	}
+}
+EXPORT_SYMBOL(copy_highpage);
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
new file mode 100644
index 0000000..d920a75
--- /dev/null
+++ b/arch/sparc/mm/init_64.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _SPARC64_MM_INIT_H
+#define _SPARC64_MM_INIT_H
+
+#include <asm/page.h>
+
+/* Most of the symbols in this file are defined in init.c and
+ * marked non-static so that assembler code can get at them.
+ */
+
+#define MAX_PHYS_ADDRESS	(1UL << MAX_PHYS_ADDRESS_BITS)
+
+extern unsigned long kern_linear_pte_xor[4];
+extern unsigned int sparc64_highest_unlocked_tlb_ent;
+extern unsigned long sparc64_kern_pri_context;
+extern unsigned long sparc64_kern_pri_nuc_bits;
+extern unsigned long sparc64_kern_sec_context;
+void mmu_info(struct seq_file *m);
+
+struct linux_prom_translation {
+	unsigned long virt;
+	unsigned long size;
+	unsigned long data;
+};
+
+/* Exported for kernel TLB miss handling in ktlb.S */
+extern struct linux_prom_translation prom_trans[512];
+extern unsigned int prom_trans_ents;
+
+/* Exported for SMP bootup purposes. */
+extern unsigned long kern_locked_tte_data;
+
+void prom_world(int enter);
+
+#endif /* _SPARC64_MM_INIT_H */
diff --git a/arch/sparc/mm/io-unit.c b/arch/sparc/mm/io-unit.c
new file mode 100644
index 0000000..c8cb27d
--- /dev/null
+++ b/arch/sparc/mm/io-unit.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * io-unit.c:  IO-UNIT specific routines for memory management.
+ *
+ * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
+ */
+ 
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
+#include <linux/bitops.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/io-unit.h>
+#include <asm/mxcc.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/dma.h>
+#include <asm/oplib.h>
+
+#include "mm_32.h"
+
+/* #define IOUNIT_DEBUG */
+#ifdef IOUNIT_DEBUG
+#define IOD(x) printk(x)
+#else
+#define IOD(x) do { } while (0)
+#endif
+
+#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
+#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
+
+static void __init iounit_iommu_init(struct platform_device *op)
+{
+	struct iounit_struct *iounit;
+	iopte_t __iomem *xpt;
+	iopte_t __iomem *xptend;
+
+	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
+	if (!iounit) {
+		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
+		prom_halt();
+	}
+
+	iounit->limit[0] = IOUNIT_BMAP1_START;
+	iounit->limit[1] = IOUNIT_BMAP2_START;
+	iounit->limit[2] = IOUNIT_BMAPM_START;
+	iounit->limit[3] = IOUNIT_BMAPM_END;
+	iounit->rotor[1] = IOUNIT_BMAP2_START;
+	iounit->rotor[2] = IOUNIT_BMAPM_START;
+
+	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
+	if (!xpt) {
+		prom_printf("SUN4D: Cannot map External Page Table.");
+		prom_halt();
+	}
+	
+	op->dev.archdata.iommu = iounit;
+	iounit->page_table = xpt;
+	spin_lock_init(&iounit->lock);
+
+	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
+	for (; xpt < xptend; xpt++)
+		sbus_writel(0, xpt);
+}
+
+static int __init iounit_init(void)
+{
+	extern void sun4d_init_sbi_irq(void);
+	struct device_node *dp;
+
+	for_each_node_by_name(dp, "sbi") {
+		struct platform_device *op = of_find_device_by_node(dp);
+
+		iounit_iommu_init(op);
+		of_propagate_archdata(op);
+	}
+
+	sun4d_init_sbi_irq();
+
+	return 0;
+}
+
+subsys_initcall(iounit_init);
+
+/* One has to hold iounit->lock to call this */
+static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
+{
+	int i, j, k, npages;
+	unsigned long rotor, scan, limit;
+	iopte_t iopte;
+
+        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+
+	/* A tiny bit of magic ingredience :) */
+	switch (npages) {
+	case 1: i = 0x0231; break;
+	case 2: i = 0x0132; break;
+	default: i = 0x0213; break;
+	}
+	
+	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
+	
+next:	j = (i & 15);
+	rotor = iounit->rotor[j - 1];
+	limit = iounit->limit[j];
+	scan = rotor;
+nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
+	if (scan + npages > limit) {
+		if (limit != rotor) {
+			limit = rotor;
+			scan = iounit->limit[j - 1];
+			goto nexti;
+		}
+		i >>= 4;
+		if (!(i & 15))
+			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
+		goto next;
+	}
+	for (k = 1, scan++; k < npages; k++)
+		if (test_bit(scan++, iounit->bmap))
+			goto nexti;
+	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
+	scan -= npages;
+	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
+	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
+	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
+		set_bit(scan, iounit->bmap);
+		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
+	}
+	IOD(("%08lx\n", vaddr));
+	return vaddr;
+}
+
+static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
+{
+	struct iounit_struct *iounit = dev->archdata.iommu;
+	unsigned long ret, flags;
+	
+	spin_lock_irqsave(&iounit->lock, flags);
+	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
+	spin_unlock_irqrestore(&iounit->lock, flags);
+	return ret;
+}
+
+static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+{
+	struct iounit_struct *iounit = dev->archdata.iommu;
+	unsigned long flags;
+
+	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
+	spin_lock_irqsave(&iounit->lock, flags);
+	while (sz != 0) {
+		--sz;
+		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
+		sg->dma_length = sg->length;
+		sg = sg_next(sg);
+	}
+	spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
+{
+	struct iounit_struct *iounit = dev->archdata.iommu;
+	unsigned long flags;
+	
+	spin_lock_irqsave(&iounit->lock, flags);
+	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+	for (len += vaddr; vaddr < len; vaddr++)
+		clear_bit(vaddr, iounit->bmap);
+	spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+{
+	struct iounit_struct *iounit = dev->archdata.iommu;
+	unsigned long flags;
+	unsigned long vaddr, len;
+
+	spin_lock_irqsave(&iounit->lock, flags);
+	while (sz != 0) {
+		--sz;
+		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
+		for (len += vaddr; vaddr < len; vaddr++)
+			clear_bit(vaddr, iounit->bmap);
+		sg = sg_next(sg);
+	}
+	spin_unlock_irqrestore(&iounit->lock, flags);
+}
+
+#ifdef CONFIG_SBUS
+static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
+{
+	struct iounit_struct *iounit = dev->archdata.iommu;
+	unsigned long page, end;
+	pgprot_t dvma_prot;
+	iopte_t __iomem *iopte;
+
+	*pba = addr;
+
+	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+	end = PAGE_ALIGN((addr + len));
+	while(addr < end) {
+		page = va;
+		{
+			pgd_t *pgdp;
+			pmd_t *pmdp;
+			pte_t *ptep;
+			long i;
+
+			pgdp = pgd_offset(&init_mm, addr);
+			pmdp = pmd_offset(pgdp, addr);
+			ptep = pte_offset_map(pmdp, addr);
+
+			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
+			
+			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
+
+			iopte = iounit->page_table + i;
+			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
+		}
+		addr += PAGE_SIZE;
+		va += PAGE_SIZE;
+	}
+	flush_cache_all();
+	flush_tlb_all();
+
+	return 0;
+}
+
+static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
+{
+	/* XXX Somebody please fill this in */
+}
+#endif
+
+static const struct sparc32_dma_ops iounit_dma_ops = {
+	.get_scsi_one		= iounit_get_scsi_one,
+	.get_scsi_sgl		= iounit_get_scsi_sgl,
+	.release_scsi_one	= iounit_release_scsi_one,
+	.release_scsi_sgl	= iounit_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iounit_map_dma_area,
+	.unmap_dma_area		= iounit_unmap_dma_area,
+#endif
+};
+
+void __init ld_mmu_iounit(void)
+{
+	sparc32_dma_ops = &iounit_dma_ops;
+}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
new file mode 100644
index 0000000..2c5f8a6
--- /dev/null
+++ b/arch/sparc/mm/iommu.c
@@ -0,0 +1,452 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * iommu.c:  IOMMU specific routines for memory management.
+ *
+ * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
+ * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
+ */
+ 
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/io.h>
+#include <asm/mxcc.h>
+#include <asm/mbus.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/bitext.h>
+#include <asm/iommu.h>
+#include <asm/dma.h>
+
+#include "mm_32.h"
+
+/*
+ * This can be sized dynamically, but we will do this
+ * only when we have a guidance about actual I/O pressures.
+ */
+#define IOMMU_RNGE	IOMMU_RNGE_256MB
+#define IOMMU_START	0xF0000000
+#define IOMMU_WINSIZE	(256*1024*1024U)
+#define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
+#define IOMMU_ORDER	6				/* 4096 * (1<<6) */
+
+static int viking_flush;
+/* viking.S */
+extern void viking_flush_page(unsigned long page);
+extern void viking_mxcc_flush_page(unsigned long page);
+
+/*
+ * Values precomputed according to CPU type.
+ */
+static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
+static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
+
+#define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
+#define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
+
+static void __init sbus_iommu_init(struct platform_device *op)
+{
+	struct iommu_struct *iommu;
+	unsigned int impl, vers;
+	unsigned long *bitmap;
+	unsigned long control;
+	unsigned long base;
+	unsigned long tmp;
+
+	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
+	if (!iommu) {
+		prom_printf("Unable to allocate iommu structure\n");
+		prom_halt();
+	}
+
+	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
+				 "iommu_regs");
+	if (!iommu->regs) {
+		prom_printf("Cannot map IOMMU registers\n");
+		prom_halt();
+	}
+
+	control = sbus_readl(&iommu->regs->control);
+	impl = (control & IOMMU_CTRL_IMPL) >> 28;
+	vers = (control & IOMMU_CTRL_VERS) >> 24;
+	control &= ~(IOMMU_CTRL_RNGE);
+	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+	sbus_writel(control, &iommu->regs->control);
+
+	iommu_invalidate(iommu->regs);
+	iommu->start = IOMMU_START;
+	iommu->end = 0xffffffff;
+
+	/* Allocate IOMMU page table */
+	/* Stupid alignment constraints give me a headache. 
+	   We need 256K or 512K or 1M or 2M area aligned to
+           its size and current gfp will fortunately give
+           it to us. */
+        tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
+	if (!tmp) {
+		prom_printf("Unable to allocate iommu table [0x%lx]\n",
+			    IOMMU_NPTES * sizeof(iopte_t));
+		prom_halt();
+	}
+	iommu->page_table = (iopte_t *)tmp;
+
+	/* Initialize new table. */
+	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
+	flush_cache_all();
+	flush_tlb_all();
+
+	base = __pa((unsigned long)iommu->page_table) >> 4;
+	sbus_writel(base, &iommu->regs->base);
+	iommu_invalidate(iommu->regs);
+
+	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
+	if (!bitmap) {
+		prom_printf("Unable to allocate iommu bitmap [%d]\n",
+			    (int)(IOMMU_NPTES>>3));
+		prom_halt();
+	}
+	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
+	/* To be coherent on HyperSparc, the page color of DVMA
+	 * and physical addresses must match.
+	 */
+	if (srmmu_modtype == HyperSparc)
+		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
+	else
+		iommu->usemap.num_colors = 1;
+
+	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
+	       impl, vers, iommu->page_table,
+	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
+
+	op->dev.archdata.iommu = iommu;
+}
+
+static int __init iommu_init(void)
+{
+	struct device_node *dp;
+
+	for_each_node_by_name(dp, "iommu") {
+		struct platform_device *op = of_find_device_by_node(dp);
+
+		sbus_iommu_init(op);
+		of_propagate_archdata(op);
+	}
+
+	return 0;
+}
+
+subsys_initcall(iommu_init);
+
+/* Flush the iotlb entries to ram. */
+/* This could be better if we didn't have to flush whole pages. */
+static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
+{
+	unsigned long start;
+	unsigned long end;
+
+	start = (unsigned long)iopte;
+	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
+	start &= PAGE_MASK;
+	if (viking_mxcc_present) {
+		while(start < end) {
+			viking_mxcc_flush_page(start);
+			start += PAGE_SIZE;
+		}
+	} else if (viking_flush) {
+		while(start < end) {
+			viking_flush_page(start);
+			start += PAGE_SIZE;
+		}
+	} else {
+		while(start < end) {
+			__flush_page_to_ram(start);
+			start += PAGE_SIZE;
+		}
+	}
+}
+
+static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
+{
+	struct iommu_struct *iommu = dev->archdata.iommu;
+	int ioptex;
+	iopte_t *iopte, *iopte0;
+	unsigned int busa, busa0;
+	int i;
+
+	/* page color = pfn of page */
+	ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
+	if (ioptex < 0)
+		panic("iommu out");
+	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
+	iopte0 = &iommu->page_table[ioptex];
+
+	busa = busa0;
+	iopte = iopte0;
+	for (i = 0; i < npages; i++) {
+		iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
+		iommu_invalidate_page(iommu->regs, busa);
+		busa += PAGE_SIZE;
+		iopte++;
+		page++;
+	}
+
+	iommu_flush_iotlb(iopte0, npages);
+
+	return busa0;
+}
+
+static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
+{
+	unsigned long off;
+	int npages;
+	struct page *page;
+	u32 busa;
+
+	off = (unsigned long)vaddr & ~PAGE_MASK;
+	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
+	page = virt_to_page((unsigned long)vaddr & PAGE_MASK);
+	busa = iommu_get_one(dev, page, npages);
+	return busa + off;
+}
+
+static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
+{
+	flush_page_for_dma(0);
+	return iommu_get_scsi_one(dev, vaddr, len);
+}
+
+static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned long len)
+{
+	unsigned long page = ((unsigned long) vaddr) & PAGE_MASK;
+
+	while(page < ((unsigned long)(vaddr + len))) {
+		flush_page_for_dma(page);
+		page += PAGE_SIZE;
+	}
+	return iommu_get_scsi_one(dev, vaddr, len);
+}
+
+static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
+{
+	int n;
+
+	flush_page_for_dma(0);
+	while (sz != 0) {
+		--sz;
+		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+		sg->dma_length = sg->length;
+		sg = sg_next(sg);
+	}
+}
+
+static void iommu_get_scsi_sgl_pflush(struct device *dev, struct scatterlist *sg, int sz)
+{
+	unsigned long page, oldpage = 0;
+	int n, i;
+
+	while(sz != 0) {
+		--sz;
+
+		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+		/*
+		 * We expect unmapped highmem pages to be not in the cache.
+		 * XXX Is this a good assumption?
+		 * XXX What if someone else unmaps it here and races us?
+		 */
+		if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
+			for (i = 0; i < n; i++) {
+				if (page != oldpage) {	/* Already flushed? */
+					flush_page_for_dma(page);
+					oldpage = page;
+				}
+				page += PAGE_SIZE;
+			}
+		}
+
+		sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
+		sg->dma_length = sg->length;
+		sg = sg_next(sg);
+	}
+}
+
+static void iommu_release_one(struct device *dev, u32 busa, int npages)
+{
+	struct iommu_struct *iommu = dev->archdata.iommu;
+	int ioptex;
+	int i;
+
+	BUG_ON(busa < iommu->start);
+	ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+	for (i = 0; i < npages; i++) {
+		iopte_val(iommu->page_table[ioptex + i]) = 0;
+		iommu_invalidate_page(iommu->regs, busa);
+		busa += PAGE_SIZE;
+	}
+	bit_map_clear(&iommu->usemap, ioptex, npages);
+}
+
+static void iommu_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
+{
+	unsigned long off;
+	int npages;
+
+	off = vaddr & ~PAGE_MASK;
+	npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
+	iommu_release_one(dev, vaddr & PAGE_MASK, npages);
+}
+
+static void iommu_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
+{
+	int n;
+
+	while(sz != 0) {
+		--sz;
+
+		n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
+		iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
+		sg->dma_address = 0x21212121;
+		sg = sg_next(sg);
+	}
+}
+
+#ifdef CONFIG_SBUS
+static int iommu_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va,
+			      unsigned long addr, int len)
+{
+	struct iommu_struct *iommu = dev->archdata.iommu;
+	unsigned long page, end;
+	iopte_t *iopte = iommu->page_table;
+	iopte_t *first;
+	int ioptex;
+
+	BUG_ON((va & ~PAGE_MASK) != 0);
+	BUG_ON((addr & ~PAGE_MASK) != 0);
+	BUG_ON((len & ~PAGE_MASK) != 0);
+
+	/* page color = physical address */
+	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
+		addr >> PAGE_SHIFT);
+	if (ioptex < 0)
+		panic("iommu out");
+
+	iopte += ioptex;
+	first = iopte;
+	end = addr + len;
+	while(addr < end) {
+		page = va;
+		{
+			pgd_t *pgdp;
+			pmd_t *pmdp;
+			pte_t *ptep;
+
+			if (viking_mxcc_present)
+				viking_mxcc_flush_page(page);
+			else if (viking_flush)
+				viking_flush_page(page);
+			else
+				__flush_page_to_ram(page);
+
+			pgdp = pgd_offset(&init_mm, addr);
+			pmdp = pmd_offset(pgdp, addr);
+			ptep = pte_offset_map(pmdp, addr);
+
+			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
+		}
+		iopte_val(*iopte++) =
+		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
+		addr += PAGE_SIZE;
+		va += PAGE_SIZE;
+	}
+	/* P3: why do we need this?
+	 *
+	 * DAVEM: Because there are several aspects, none of which
+	 *        are handled by a single interface.  Some cpus are
+	 *        completely not I/O DMA coherent, and some have
+	 *        virtually indexed caches.  The driver DMA flushing
+	 *        methods handle the former case, but here during
+	 *        IOMMU page table modifications, and usage of non-cacheable
+	 *        cpu mappings of pages potentially in the cpu caches, we have
+	 *        to handle the latter case as well.
+	 */
+	flush_cache_all();
+	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
+	flush_tlb_all();
+	iommu_invalidate(iommu->regs);
+
+	*pba = iommu->start + (ioptex << PAGE_SHIFT);
+	return 0;
+}
+
+static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len)
+{
+	struct iommu_struct *iommu = dev->archdata.iommu;
+	iopte_t *iopte = iommu->page_table;
+	unsigned long end;
+	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
+
+	BUG_ON((busa & ~PAGE_MASK) != 0);
+	BUG_ON((len & ~PAGE_MASK) != 0);
+
+	iopte += ioptex;
+	end = busa + len;
+	while (busa < end) {
+		iopte_val(*iopte++) = 0;
+		busa += PAGE_SIZE;
+	}
+	flush_tlb_all();
+	iommu_invalidate(iommu->regs);
+	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
+}
+#endif
+
+static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
+	.get_scsi_one		= iommu_get_scsi_one_gflush,
+	.get_scsi_sgl		= iommu_get_scsi_sgl_gflush,
+	.release_scsi_one	= iommu_release_scsi_one,
+	.release_scsi_sgl	= iommu_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iommu_map_dma_area,
+	.unmap_dma_area		= iommu_unmap_dma_area,
+#endif
+};
+
+static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
+	.get_scsi_one		= iommu_get_scsi_one_pflush,
+	.get_scsi_sgl		= iommu_get_scsi_sgl_pflush,
+	.release_scsi_one	= iommu_release_scsi_one,
+	.release_scsi_sgl	= iommu_release_scsi_sgl,
+#ifdef CONFIG_SBUS
+	.map_dma_area		= iommu_map_dma_area,
+	.unmap_dma_area		= iommu_unmap_dma_area,
+#endif
+};
+
+void __init ld_mmu_iommu(void)
+{
+	if (flush_page_for_dma_global) {
+		/* flush_page_for_dma flushes everything, no matter of what page is it */
+		sparc32_dma_ops = &iommu_dma_gflush_ops;
+	} else {
+		sparc32_dma_ops = &iommu_dma_pflush_ops;
+	}
+
+	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
+		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
+		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
+	} else {
+		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
+		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
+	}
+}
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
new file mode 100644
index 0000000..ec61ff1
--- /dev/null
+++ b/arch/sparc/mm/leon_mm.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/sparc/mm/leon_m.c
+ *
+ * Copyright (C) 2004 Konrad Eisele (eiselekd@web.de, konrad@gaisler.com) Gaisler Research
+ * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
+ * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
+ *
+ * do srmmu probe in software
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <asm/asi.h>
+#include <asm/leon.h>
+#include <asm/tlbflush.h>
+
+#include "mm_32.h"
+
+int leon_flush_during_switch = 1;
+static int srmmu_swprobe_trace;
+
+static inline unsigned long leon_get_ctable_ptr(void)
+{
+	unsigned int retval;
+
+	__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+			     "=r" (retval) :
+			     "r" (SRMMU_CTXTBL_PTR),
+			     "i" (ASI_LEON_MMUREGS));
+	return (retval & SRMMU_CTX_PMASK) << 4;
+}
+
+
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
+{
+
+	unsigned int ctxtbl;
+	unsigned int pgd, pmd, ped;
+	unsigned int ptr;
+	unsigned int lvl, pte, paddrbase;
+	unsigned int ctx;
+	unsigned int paddr_calc;
+
+	paddrbase = 0;
+
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe: trace on\n");
+
+	ctxtbl = leon_get_ctable_ptr();
+	if (!(ctxtbl)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
+		return 0;
+	}
+	if (!_pfn_valid(PFN(ctxtbl))) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO
+			       "swprobe: !_pfn_valid(%x)=>0\n",
+			       PFN(ctxtbl));
+		return 0;
+	}
+
+	ctx = srmmu_get_context();
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe:  --- ctx (%x) ---\n", ctx);
+
+	pgd = LEON_BYPASS_LOAD_PA(ctxtbl + (ctx * 4));
+
+	if (((pgd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: pgd is entry level 3\n");
+		lvl = 3;
+		pte = pgd;
+		paddrbase = pgd & _SRMMU_PTE_PMASK_LEON;
+		goto ready;
+	}
+	if (((pgd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: pgd is invalid => 0\n");
+		return 0;
+	}
+
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe:  --- pgd (%x) ---\n", pgd);
+
+	ptr = (pgd & SRMMU_PTD_PMASK) << 4;
+	ptr += ((((vaddr) >> LEON_PGD_SH) & LEON_PGD_M) * 4);
+	if (!_pfn_valid(PFN(ptr)))
+		return 0;
+
+	pmd = LEON_BYPASS_LOAD_PA(ptr);
+	if (((pmd & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: pmd is entry level 2\n");
+		lvl = 2;
+		pte = pmd;
+		paddrbase = pmd & _SRMMU_PTE_PMASK_LEON;
+		goto ready;
+	}
+	if (((pmd & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: pmd is invalid => 0\n");
+		return 0;
+	}
+
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe:  --- pmd (%x) ---\n", pmd);
+
+	ptr = (pmd & SRMMU_PTD_PMASK) << 4;
+	ptr += (((vaddr >> LEON_PMD_SH) & LEON_PMD_M) * 4);
+	if (!_pfn_valid(PFN(ptr))) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: !_pfn_valid(%x)=>0\n",
+			       PFN(ptr));
+		return 0;
+	}
+
+	ped = LEON_BYPASS_LOAD_PA(ptr);
+
+	if (((ped & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: ped is entry level 1\n");
+		lvl = 1;
+		pte = ped;
+		paddrbase = ped & _SRMMU_PTE_PMASK_LEON;
+		goto ready;
+	}
+	if (((ped & SRMMU_ET_MASK) != SRMMU_ET_PTD)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: ped is invalid => 0\n");
+		return 0;
+	}
+
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe:  --- ped (%x) ---\n", ped);
+
+	ptr = (ped & SRMMU_PTD_PMASK) << 4;
+	ptr += (((vaddr >> LEON_PTE_SH) & LEON_PTE_M) * 4);
+	if (!_pfn_valid(PFN(ptr)))
+		return 0;
+
+	ptr = LEON_BYPASS_LOAD_PA(ptr);
+	if (((ptr & SRMMU_ET_MASK) == SRMMU_ET_PTE)) {
+		if (srmmu_swprobe_trace)
+			printk(KERN_INFO "swprobe: ptr is entry level 0\n");
+		lvl = 0;
+		pte = ptr;
+		paddrbase = ptr & _SRMMU_PTE_PMASK_LEON;
+		goto ready;
+	}
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe: ptr is invalid => 0\n");
+	return 0;
+
+ready:
+	switch (lvl) {
+	case 0:
+		paddr_calc =
+		    (vaddr & ~(-1 << LEON_PTE_SH)) | ((pte & ~0xff) << 4);
+		break;
+	case 1:
+		paddr_calc =
+		    (vaddr & ~(-1 << LEON_PMD_SH)) | ((pte & ~0xff) << 4);
+		break;
+	case 2:
+		paddr_calc =
+		    (vaddr & ~(-1 << LEON_PGD_SH)) | ((pte & ~0xff) << 4);
+		break;
+	default:
+	case 3:
+		paddr_calc = vaddr;
+		break;
+	}
+	if (srmmu_swprobe_trace)
+		printk(KERN_INFO "swprobe: padde %x\n", paddr_calc);
+	if (paddr)
+		*paddr = paddr_calc;
+	return pte;
+}
+
+void leon_flush_icache_all(void)
+{
+	__asm__ __volatile__(" flush ");	/*iflush*/
+}
+
+void leon_flush_dcache_all(void)
+{
+	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
+			     "i"(ASI_LEON_DFLUSH) : "memory");
+}
+
+void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page)
+{
+	if (vma->vm_flags & VM_EXEC)
+		leon_flush_icache_all();
+	leon_flush_dcache_all();
+}
+
+void leon_flush_cache_all(void)
+{
+	__asm__ __volatile__(" flush ");	/*iflush*/
+	__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
+			     "i"(ASI_LEON_DFLUSH) : "memory");
+}
+
+void leon_flush_tlb_all(void)
+{
+	leon_flush_cache_all();
+	__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : "r"(0x400),
+			     "i"(ASI_LEON_MMUFLUSH) : "memory");
+}
+
+/* get all cache regs */
+void leon3_getCacheRegs(struct leon3_cacheregs *regs)
+{
+	unsigned long ccr, iccr, dccr;
+
+	if (!regs)
+		return;
+	/* Get Cache regs from "Cache ASI" address 0x0, 0x8 and 0xC */
+	__asm__ __volatile__("lda [%%g0] %3, %0\n\t"
+			     "mov 0x08, %%g1\n\t"
+			     "lda [%%g1] %3, %1\n\t"
+			     "mov 0x0c, %%g1\n\t"
+			     "lda [%%g1] %3, %2\n\t"
+			     : "=r"(ccr), "=r"(iccr), "=r"(dccr)
+			       /* output */
+			     : "i"(ASI_LEON_CACHEREGS)	/* input */
+			     : "g1"	/* clobber list */
+	    );
+	regs->ccr = ccr;
+	regs->iccr = iccr;
+	regs->dccr = dccr;
+}
+
+/* Due to virtual cache we need to check cache configuration if
+ * it is possible to skip flushing in some cases.
+ *
+ * Leon2 and Leon3 differ in their way of telling cache information
+ *
+ */
+int __init leon_flush_needed(void)
+{
+	int flush_needed = -1;
+	unsigned int ssize, sets;
+	char *setStr[4] =
+	    { "direct mapped", "2-way associative", "3-way associative",
+		"4-way associative"
+	};
+	/* leon 3 */
+	struct leon3_cacheregs cregs;
+	leon3_getCacheRegs(&cregs);
+	sets = (cregs.dccr & LEON3_XCCR_SETS_MASK) >> 24;
+	/* (ssize=>realsize) 0=>1k, 1=>2k, 2=>4k, 3=>8k ... */
+	ssize = 1 << ((cregs.dccr & LEON3_XCCR_SSIZE_MASK) >> 20);
+
+	printk(KERN_INFO "CACHE: %s cache, set size %dk\n",
+	       sets > 3 ? "unknown" : setStr[sets], ssize);
+	if ((ssize <= (PAGE_SIZE / 1024)) && (sets == 0)) {
+		/* Set Size <= Page size  ==>
+		   flush on every context switch not needed. */
+		flush_needed = 0;
+		printk(KERN_INFO "CACHE: not flushing on every context switch\n");
+	}
+	return flush_needed;
+}
+
+void leon_switch_mm(void)
+{
+	flush_tlb_mm((void *)0);
+	if (leon_flush_during_switch)
+		leon_flush_cache_all();
+}
+
+static void leon_flush_cache_mm(struct mm_struct *mm)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+	leon_flush_pcache_all(vma, page);
+}
+
+static void leon_flush_cache_range(struct vm_area_struct *vma,
+				   unsigned long start,
+				   unsigned long end)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_tlb_mm(struct mm_struct *mm)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_page(struct vm_area_struct *vma,
+				unsigned long page)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_tlb_range(struct vm_area_struct *vma,
+				 unsigned long start,
+				 unsigned long end)
+{
+	leon_flush_tlb_all();
+}
+
+static void leon_flush_page_to_ram(unsigned long page)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
+{
+	leon_flush_cache_all();
+}
+
+static void leon_flush_page_for_dma(unsigned long page)
+{
+	leon_flush_dcache_all();
+}
+
+void __init poke_leonsparc(void)
+{
+}
+
+static const struct sparc32_cachetlb_ops leon_ops = {
+	.cache_all	= leon_flush_cache_all,
+	.cache_mm	= leon_flush_cache_mm,
+	.cache_page	= leon_flush_cache_page,
+	.cache_range	= leon_flush_cache_range,
+	.tlb_all	= leon_flush_tlb_all,
+	.tlb_mm		= leon_flush_tlb_mm,
+	.tlb_page	= leon_flush_tlb_page,
+	.tlb_range	= leon_flush_tlb_range,
+	.page_to_ram	= leon_flush_page_to_ram,
+	.sig_insns	= leon_flush_sig_insns,
+	.page_for_dma	= leon_flush_page_for_dma,
+};
+
+void __init init_leon(void)
+{
+	srmmu_name = "LEON";
+	sparc32_cachetlb_ops = &leon_ops;
+	poke_srmmu = poke_leonsparc;
+
+	leon_flush_during_switch = leon_flush_needed();
+}
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
new file mode 100644
index 0000000..0d0b06e
--- /dev/null
+++ b/arch/sparc/mm/mm_32.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* fault_32.c - visible as they are called from assembler */
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+                            unsigned long address);
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+                               unsigned long address);
+
+void window_overflow_fault(void);
+void window_underflow_fault(unsigned long sp);
+void window_ret_fault(struct pt_regs *regs);
+
+/* srmmu.c */
+extern char *srmmu_name;
+extern int viking_mxcc_present;
+extern int flush_page_for_dma_global;
+
+extern void (*poke_srmmu)(void);
+
+void __init srmmu_paging_init(void);
+
+/* iommu.c */
+void ld_mmu_iommu(void);
+
+/* io-unit.c */
+void ld_mmu_iounit(void);
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
new file mode 100644
index 0000000..be9cb00
--- /dev/null
+++ b/arch/sparc/mm/srmmu.c
@@ -0,0 +1,1843 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * srmmu.c:  SRMMU specific routines for memory management.
+ *
+ * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
+ * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
+ * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
+ * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+ * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
+ */
+
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/bootmem.h>
+#include <linux/pagemap.h>
+#include <linux/vmalloc.h>
+#include <linux/kdebug.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+
+#include <asm/mmu_context.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/io-unit.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/bitext.h>
+#include <asm/vaddrs.h>
+#include <asm/cache.h>
+#include <asm/traps.h>
+#include <asm/oplib.h>
+#include <asm/mbus.h>
+#include <asm/page.h>
+#include <asm/asi.h>
+#include <asm/smp.h>
+#include <asm/io.h>
+
+/* Now the cpu specific definitions. */
+#include <asm/turbosparc.h>
+#include <asm/tsunami.h>
+#include <asm/viking.h>
+#include <asm/swift.h>
+#include <asm/leon.h>
+#include <asm/mxcc.h>
+#include <asm/ross.h>
+
+#include "mm_32.h"
+
+enum mbus_module srmmu_modtype;
+static unsigned int hwbug_bitmask;
+int vac_cache_size;
+EXPORT_SYMBOL(vac_cache_size);
+int vac_line_size;
+
+extern struct resource sparc_iomap;
+
+extern unsigned long last_valid_pfn;
+
+static pgd_t *srmmu_swapper_pg_dir;
+
+const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+EXPORT_SYMBOL(sparc32_cachetlb_ops);
+
+#ifdef CONFIG_SMP
+const struct sparc32_cachetlb_ops *local_ops;
+
+#define FLUSH_BEGIN(mm)
+#define FLUSH_END
+#else
+#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
+#define FLUSH_END	}
+#endif
+
+int flush_page_for_dma_global = 1;
+
+char *srmmu_name;
+
+ctxd_t *srmmu_ctx_table_phys;
+static ctxd_t *srmmu_context_table;
+
+int viking_mxcc_present;
+static DEFINE_SPINLOCK(srmmu_context_spinlock);
+
+static int is_hypersparc;
+
+static int srmmu_cache_pagetables;
+
+/* these will be initialized in srmmu_nocache_calcsize() */
+static unsigned long srmmu_nocache_size;
+static unsigned long srmmu_nocache_end;
+
+/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
+#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
+
+/* The context table is a nocache user with the biggest alignment needs. */
+#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
+
+void *srmmu_nocache_pool;
+static struct bit_map srmmu_nocache_map;
+
+static inline int srmmu_pmd_none(pmd_t pmd)
+{ return !(pmd_val(pmd) & 0xFFFFFFF); }
+
+/* XXX should we hyper_flush_whole_icache here - Anton */
+static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
+{
+	pte_t pte;
+
+	pte = __pte((SRMMU_ET_PTD | (__nocache_pa(pgdp) >> 4)));
+	set_pte((pte_t *)ctxp, pte);
+}
+
+/*
+ * Locations of MSI Registers.
+ */
+#define MSI_MBUS_ARBEN	0xe0001008	/* MBus Arbiter Enable register */
+
+/*
+ * Useful bits in the MSI Registers.
+ */
+#define MSI_ASYNC_MODE  0x80000000	/* Operate the MSI asynchronously */
+
+static void msi_set_sync(void)
+{
+	__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
+			      "andn %%g3, %2, %%g3\n\t"
+			      "sta %%g3, [%0] %1\n\t" : :
+			      "r" (MSI_MBUS_ARBEN),
+			      "i" (ASI_M_CTL), "r" (MSI_ASYNC_MODE) : "g3");
+}
+
+void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+	unsigned long ptp;	/* Physical address, shifted right by 4 */
+	int i;
+
+	ptp = __nocache_pa(ptep) >> 4;
+	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
+		set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
+		ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
+	}
+}
+
+void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
+{
+	unsigned long ptp;	/* Physical address, shifted right by 4 */
+	int i;
+
+	ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4);	/* watch for overflow */
+	for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
+		set_pte((pte_t *)&pmdp->pmdv[i], __pte(SRMMU_ET_PTD | ptp));
+		ptp += (SRMMU_REAL_PTRS_PER_PTE * sizeof(pte_t) >> 4);
+	}
+}
+
+/* Find an entry in the third-level page table.. */
+pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
+{
+	void *pte;
+
+	pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
+	return (pte_t *) pte +
+	    ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
+}
+
+/*
+ * size: bytes to allocate in the nocache area.
+ * align: bytes, number to align at.
+ * Returns the virtual address of the allocated area.
+ */
+static void *__srmmu_get_nocache(int size, int align)
+{
+	int offset;
+	unsigned long addr;
+
+	if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+		printk(KERN_ERR "Size 0x%x too small for nocache request\n",
+		       size);
+		size = SRMMU_NOCACHE_BITMAP_SHIFT;
+	}
+	if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
+		printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
+		       size);
+		size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
+	}
+	BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
+
+	offset = bit_map_string_get(&srmmu_nocache_map,
+				    size >> SRMMU_NOCACHE_BITMAP_SHIFT,
+				    align >> SRMMU_NOCACHE_BITMAP_SHIFT);
+	if (offset == -1) {
+		printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
+		       size, (int) srmmu_nocache_size,
+		       srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
+		return NULL;
+	}
+
+	addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
+	return (void *)addr;
+}
+
+void *srmmu_get_nocache(int size, int align)
+{
+	void *tmp;
+
+	tmp = __srmmu_get_nocache(size, align);
+
+	if (tmp)
+		memset(tmp, 0, size);
+
+	return tmp;
+}
+
+void srmmu_free_nocache(void *addr, int size)
+{
+	unsigned long vaddr;
+	int offset;
+
+	vaddr = (unsigned long)addr;
+	if (vaddr < SRMMU_NOCACHE_VADDR) {
+		printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
+		    vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
+		BUG();
+	}
+	if (vaddr + size > srmmu_nocache_end) {
+		printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
+		    vaddr, srmmu_nocache_end);
+		BUG();
+	}
+	if (!is_power_of_2(size)) {
+		printk("Size 0x%x is not a power of 2\n", size);
+		BUG();
+	}
+	if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
+		printk("Size 0x%x is too small\n", size);
+		BUG();
+	}
+	if (vaddr & (size - 1)) {
+		printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
+		BUG();
+	}
+
+	offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
+	size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+	bit_map_clear(&srmmu_nocache_map, offset, size);
+}
+
+static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
+						 unsigned long end);
+
+/* Return how much physical memory we have.  */
+static unsigned long __init probe_memory(void)
+{
+	unsigned long total = 0;
+	int i;
+
+	for (i = 0; sp_banks[i].num_bytes; i++)
+		total += sp_banks[i].num_bytes;
+
+	return total;
+}
+
+/*
+ * Reserve nocache dynamically proportionally to the amount of
+ * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
+ */
+static void __init srmmu_nocache_calcsize(void)
+{
+	unsigned long sysmemavail = probe_memory() / 1024;
+	int srmmu_nocache_npages;
+
+	srmmu_nocache_npages =
+		sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
+
+ /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
+	// if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
+	if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
+		srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
+
+	/* anything above 1280 blows up */
+	if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
+		srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
+
+	srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
+	srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
+}
+
+static void __init srmmu_nocache_init(void)
+{
+	void *srmmu_nocache_bitmap;
+	unsigned int bitmap_bits;
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned long paddr, vaddr;
+	unsigned long pteval;
+
+	bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
+
+	srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
+		SRMMU_NOCACHE_ALIGN_MAX, 0UL);
+	memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
+
+	srmmu_nocache_bitmap =
+		__alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
+				SMP_CACHE_BYTES, 0UL);
+	bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
+
+	srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+	memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
+	init_mm.pgd = srmmu_swapper_pg_dir;
+
+	srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
+
+	paddr = __pa((unsigned long)srmmu_nocache_pool);
+	vaddr = SRMMU_NOCACHE_VADDR;
+
+	while (vaddr < srmmu_nocache_end) {
+		pgd = pgd_offset_k(vaddr);
+		pmd = pmd_offset(__nocache_fix(pgd), vaddr);
+		pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
+
+		pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
+
+		if (srmmu_cache_pagetables)
+			pteval |= SRMMU_CACHE;
+
+		set_pte(__nocache_fix(pte), __pte(pteval));
+
+		vaddr += PAGE_SIZE;
+		paddr += PAGE_SIZE;
+	}
+
+	flush_cache_all();
+	flush_tlb_all();
+}
+
+pgd_t *get_pgd_fast(void)
+{
+	pgd_t *pgd = NULL;
+
+	pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
+	if (pgd) {
+		pgd_t *init = pgd_offset_k(0);
+		memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+		memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+						(PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+	}
+
+	return pgd;
+}
+
+/*
+ * Hardware needs alignment to 256 only, but we align to whole page size
+ * to reduce fragmentation problems due to the buddy principle.
+ * XXX Provide actual fragmentation statistics in /proc.
+ *
+ * Alignments up to the page size are the same for physical and virtual
+ * addresses of the nocache area.
+ */
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
+{
+	unsigned long pte;
+	struct page *page;
+
+	if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
+		return NULL;
+	page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
+	if (!pgtable_page_ctor(page)) {
+		__free_page(page);
+		return NULL;
+	}
+	return page;
+}
+
+void pte_free(struct mm_struct *mm, pgtable_t pte)
+{
+	unsigned long p;
+
+	pgtable_page_dtor(pte);
+	p = (unsigned long)page_address(pte);	/* Cached address (for test) */
+	if (p == 0)
+		BUG();
+	p = page_to_pfn(pte) << PAGE_SHIFT;	/* Physical address */
+
+	/* free non cached virtual address*/
+	srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
+}
+
+/* context handling - a dynamically sized pool is used */
+#define NO_CONTEXT	-1
+
+struct ctx_list {
+	struct ctx_list *next;
+	struct ctx_list *prev;
+	unsigned int ctx_number;
+	struct mm_struct *ctx_mm;
+};
+
+static struct ctx_list *ctx_list_pool;
+static struct ctx_list ctx_free;
+static struct ctx_list ctx_used;
+
+/* At boot time we determine the number of contexts */
+static int num_contexts;
+
+static inline void remove_from_ctx_list(struct ctx_list *entry)
+{
+	entry->next->prev = entry->prev;
+	entry->prev->next = entry->next;
+}
+
+static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
+{
+	entry->next = head;
+	(entry->prev = head->prev)->next = entry;
+	head->prev = entry;
+}
+#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
+#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
+
+
+static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
+{
+	struct ctx_list *ctxp;
+
+	ctxp = ctx_free.next;
+	if (ctxp != &ctx_free) {
+		remove_from_ctx_list(ctxp);
+		add_to_used_ctxlist(ctxp);
+		mm->context = ctxp->ctx_number;
+		ctxp->ctx_mm = mm;
+		return;
+	}
+	ctxp = ctx_used.next;
+	if (ctxp->ctx_mm == old_mm)
+		ctxp = ctxp->next;
+	if (ctxp == &ctx_used)
+		panic("out of mmu contexts");
+	flush_cache_mm(ctxp->ctx_mm);
+	flush_tlb_mm(ctxp->ctx_mm);
+	remove_from_ctx_list(ctxp);
+	add_to_used_ctxlist(ctxp);
+	ctxp->ctx_mm->context = NO_CONTEXT;
+	ctxp->ctx_mm = mm;
+	mm->context = ctxp->ctx_number;
+}
+
+static inline void free_context(int context)
+{
+	struct ctx_list *ctx_old;
+
+	ctx_old = ctx_list_pool + context;
+	remove_from_ctx_list(ctx_old);
+	add_to_free_ctxlist(ctx_old);
+}
+
+static void __init sparc_context_init(int numctx)
+{
+	int ctx;
+	unsigned long size;
+
+	size = numctx * sizeof(struct ctx_list);
+	ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);
+
+	for (ctx = 0; ctx < numctx; ctx++) {
+		struct ctx_list *clist;
+
+		clist = (ctx_list_pool + ctx);
+		clist->ctx_number = ctx;
+		clist->ctx_mm = NULL;
+	}
+	ctx_free.next = ctx_free.prev = &ctx_free;
+	ctx_used.next = ctx_used.prev = &ctx_used;
+	for (ctx = 0; ctx < numctx; ctx++)
+		add_to_free_ctxlist(ctx_list_pool + ctx);
+}
+
+void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+	       struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	if (mm->context == NO_CONTEXT) {
+		spin_lock_irqsave(&srmmu_context_spinlock, flags);
+		alloc_context(old_mm, mm);
+		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+		srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+	}
+
+	if (sparc_cpu_model == sparc_leon)
+		leon_switch_mm();
+
+	if (is_hypersparc)
+		hyper_flush_whole_icache();
+
+	srmmu_set_context(mm->context);
+}
+
+/* Low level IO area allocation on the SRMMU. */
+static inline void srmmu_mapioaddr(unsigned long physaddr,
+				   unsigned long virt_addr, int bus_type)
+{
+	pgd_t *pgdp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	unsigned long tmp;
+
+	physaddr &= PAGE_MASK;
+	pgdp = pgd_offset_k(virt_addr);
+	pmdp = pmd_offset(pgdp, virt_addr);
+	ptep = pte_offset_kernel(pmdp, virt_addr);
+	tmp = (physaddr >> 4) | SRMMU_ET_PTE;
+
+	/* I need to test whether this is consistent over all
+	 * sun4m's.  The bus_type represents the upper 4 bits of
+	 * 36-bit physical address on the I/O space lines...
+	 */
+	tmp |= (bus_type << 28);
+	tmp |= SRMMU_PRIV;
+	__flush_page_to_ram(virt_addr);
+	set_pte(ptep, __pte(tmp));
+}
+
+void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
+		      unsigned long xva, unsigned int len)
+{
+	while (len != 0) {
+		len -= PAGE_SIZE;
+		srmmu_mapioaddr(xpa, xva, bus);
+		xva += PAGE_SIZE;
+		xpa += PAGE_SIZE;
+	}
+	flush_tlb_all();
+}
+
+static inline void srmmu_unmapioaddr(unsigned long virt_addr)
+{
+	pgd_t *pgdp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+
+	pgdp = pgd_offset_k(virt_addr);
+	pmdp = pmd_offset(pgdp, virt_addr);
+	ptep = pte_offset_kernel(pmdp, virt_addr);
+
+	/* No need to flush uncacheable page. */
+	__pte_clear(ptep);
+}
+
+void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
+{
+	while (len != 0) {
+		len -= PAGE_SIZE;
+		srmmu_unmapioaddr(virt_addr);
+		virt_addr += PAGE_SIZE;
+	}
+	flush_tlb_all();
+}
+
+/* tsunami.S */
+extern void tsunami_flush_cache_all(void);
+extern void tsunami_flush_cache_mm(struct mm_struct *mm);
+extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void tsunami_flush_page_to_ram(unsigned long page);
+extern void tsunami_flush_page_for_dma(unsigned long page);
+extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void tsunami_flush_tlb_all(void);
+extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
+extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void tsunami_setup_blockops(void);
+
+/* swift.S */
+extern void swift_flush_cache_all(void);
+extern void swift_flush_cache_mm(struct mm_struct *mm);
+extern void swift_flush_cache_range(struct vm_area_struct *vma,
+				    unsigned long start, unsigned long end);
+extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void swift_flush_page_to_ram(unsigned long page);
+extern void swift_flush_page_for_dma(unsigned long page);
+extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void swift_flush_tlb_all(void);
+extern void swift_flush_tlb_mm(struct mm_struct *mm);
+extern void swift_flush_tlb_range(struct vm_area_struct *vma,
+				  unsigned long start, unsigned long end);
+extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+
+#if 0  /* P3: deadwood to debug precise flushes on Swift. */
+void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	int cctx, ctx1;
+
+	page &= PAGE_MASK;
+	if ((ctx1 = vma->vm_mm->context) != -1) {
+		cctx = srmmu_get_context();
+/* Is context # ever different from current context? P3 */
+		if (cctx != ctx1) {
+			printk("flush ctx %02x curr %02x\n", ctx1, cctx);
+			srmmu_set_context(ctx1);
+			swift_flush_page(page);
+			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+					"r" (page), "i" (ASI_M_FLUSH_PROBE));
+			srmmu_set_context(cctx);
+		} else {
+			 /* Rm. prot. bits from virt. c. */
+			/* swift_flush_cache_all(); */
+			/* swift_flush_cache_page(vma, page); */
+			swift_flush_page(page);
+
+			__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
+				"r" (page), "i" (ASI_M_FLUSH_PROBE));
+			/* same as above: srmmu_flush_tlb_page() */
+		}
+	}
+}
+#endif
+
+/*
+ * The following are all MBUS based SRMMU modules, and therefore could
+ * be found in a multiprocessor configuration.  On the whole, these
+ * chips seems to be much more touchy about DVMA and page tables
+ * with respect to cache coherency.
+ */
+
+/* viking.S */
+extern void viking_flush_cache_all(void);
+extern void viking_flush_cache_mm(struct mm_struct *mm);
+extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
+				     unsigned long end);
+extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void viking_flush_page_to_ram(unsigned long page);
+extern void viking_flush_page_for_dma(unsigned long page);
+extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
+extern void viking_flush_page(unsigned long page);
+extern void viking_mxcc_flush_page(unsigned long page);
+extern void viking_flush_tlb_all(void);
+extern void viking_flush_tlb_mm(struct mm_struct *mm);
+extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+				   unsigned long end);
+extern void viking_flush_tlb_page(struct vm_area_struct *vma,
+				  unsigned long page);
+extern void sun4dsmp_flush_tlb_all(void);
+extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
+extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+				   unsigned long end);
+extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
+				  unsigned long page);
+
+/* hypersparc.S */
+extern void hypersparc_flush_cache_all(void);
+extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
+extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
+extern void hypersparc_flush_page_to_ram(unsigned long page);
+extern void hypersparc_flush_page_for_dma(unsigned long page);
+extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
+extern void hypersparc_flush_tlb_all(void);
+extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
+extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
+extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
+extern void hypersparc_setup_blockops(void);
+
+/*
+ * NOTE: All of this startup code assumes the low 16mb (approx.) of
+ *       kernel mappings are done with one single contiguous chunk of
+ *       ram.  On small ram machines (classics mainly) we only get
+ *       around 8mb mapped for us.
+ */
+
+static void __init early_pgtable_allocfail(char *type)
+{
+	prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
+	prom_halt();
+}
+
+static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
+							unsigned long end)
+{
+	pgd_t *pgdp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+
+	while (start < end) {
+		pgdp = pgd_offset_k(start);
+		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+			pmdp = __srmmu_get_nocache(
+			    SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+			if (pmdp == NULL)
+				early_pgtable_allocfail("pmd");
+			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+			pgd_set(__nocache_fix(pgdp), pmdp);
+		}
+		pmdp = pmd_offset(__nocache_fix(pgdp), start);
+		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+			if (ptep == NULL)
+				early_pgtable_allocfail("pte");
+			memset(__nocache_fix(ptep), 0, PTE_SIZE);
+			pmd_set(__nocache_fix(pmdp), ptep);
+		}
+		if (start > (0xffffffffUL - PMD_SIZE))
+			break;
+		start = (start + PMD_SIZE) & PMD_MASK;
+	}
+}
+
+static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
+						  unsigned long end)
+{
+	pgd_t *pgdp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+
+	while (start < end) {
+		pgdp = pgd_offset_k(start);
+		if (pgd_none(*pgdp)) {
+			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
+			if (pmdp == NULL)
+				early_pgtable_allocfail("pmd");
+			memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
+			pgd_set(pgdp, pmdp);
+		}
+		pmdp = pmd_offset(pgdp, start);
+		if (srmmu_pmd_none(*pmdp)) {
+			ptep = __srmmu_get_nocache(PTE_SIZE,
+							     PTE_SIZE);
+			if (ptep == NULL)
+				early_pgtable_allocfail("pte");
+			memset(ptep, 0, PTE_SIZE);
+			pmd_set(pmdp, ptep);
+		}
+		if (start > (0xffffffffUL - PMD_SIZE))
+			break;
+		start = (start + PMD_SIZE) & PMD_MASK;
+	}
+}
+
+/* These flush types are not available on all chips... */
+static inline unsigned long srmmu_probe(unsigned long vaddr)
+{
+	unsigned long retval;
+
+	if (sparc_cpu_model != sparc_leon) {
+
+		vaddr &= PAGE_MASK;
+		__asm__ __volatile__("lda [%1] %2, %0\n\t" :
+				     "=r" (retval) :
+				     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
+	} else {
+		retval = leon_swprobe(vaddr, NULL);
+	}
+	return retval;
+}
+
+/*
+ * This is much cleaner than poking around physical address space
+ * looking at the prom's page table directly which is what most
+ * other OS's do.  Yuck... this is much better.
+ */
+static void __init srmmu_inherit_prom_mappings(unsigned long start,
+					       unsigned long end)
+{
+	unsigned long probed;
+	unsigned long addr;
+	pgd_t *pgdp;
+	pmd_t *pmdp;
+	pte_t *ptep;
+	int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
+
+	while (start <= end) {
+		if (start == 0)
+			break; /* probably wrap around */
+		if (start == 0xfef00000)
+			start = KADB_DEBUGGER_BEGVM;
+		probed = srmmu_probe(start);
+		if (!probed) {
+			/* continue probing until we find an entry */
+			start += PAGE_SIZE;
+			continue;
+		}
+
+		/* A red snapper, see what it really is. */
+		what = 0;
+		addr = start - PAGE_SIZE;
+
+		if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
+			if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
+				what = 1;
+		}
+
+		if (!(start & ~(SRMMU_PGDIR_MASK))) {
+			if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
+				what = 2;
+		}
+
+		pgdp = pgd_offset_k(start);
+		if (what == 2) {
+			*(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
+			start += SRMMU_PGDIR_SIZE;
+			continue;
+		}
+		if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
+			pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
+						   SRMMU_PMD_TABLE_SIZE);
+			if (pmdp == NULL)
+				early_pgtable_allocfail("pmd");
+			memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
+			pgd_set(__nocache_fix(pgdp), pmdp);
+		}
+		pmdp = pmd_offset(__nocache_fix(pgdp), start);
+		if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+			ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+			if (ptep == NULL)
+				early_pgtable_allocfail("pte");
+			memset(__nocache_fix(ptep), 0, PTE_SIZE);
+			pmd_set(__nocache_fix(pmdp), ptep);
+		}
+		if (what == 1) {
+			/* We bend the rule where all 16 PTPs in a pmd_t point
+			 * inside the same PTE page, and we leak a perfectly
+			 * good hardware PTE piece. Alternatives seem worse.
+			 */
+			unsigned int x;	/* Index of HW PMD in soft cluster */
+			unsigned long *val;
+			x = (start >> PMD_SHIFT) & 15;
+			val = &pmdp->pmdv[x];
+			*(unsigned long *)__nocache_fix(val) = probed;
+			start += SRMMU_REAL_PMD_SIZE;
+			continue;
+		}
+		ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
+		*(pte_t *)__nocache_fix(ptep) = __pte(probed);
+		start += PAGE_SIZE;
+	}
+}
+
+#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
+
+/* Create a third-level SRMMU 16MB page mapping. */
+static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
+{
+	pgd_t *pgdp = pgd_offset_k(vaddr);
+	unsigned long big_pte;
+
+	big_pte = KERNEL_PTE(phys_base >> 4);
+	*(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
+}
+
+/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
+static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
+{
+	unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
+	unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
+	unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
+	/* Map "low" memory only */
+	const unsigned long min_vaddr = PAGE_OFFSET;
+	const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
+
+	if (vstart < min_vaddr || vstart >= max_vaddr)
+		return vstart;
+
+	if (vend > max_vaddr || vend < min_vaddr)
+		vend = max_vaddr;
+
+	while (vstart < vend) {
+		do_large_mapping(vstart, pstart);
+		vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
+	}
+	return vstart;
+}
+
+static void __init map_kernel(void)
+{
+	int i;
+
+	if (phys_base > 0) {
+		do_large_mapping(PAGE_OFFSET, phys_base);
+	}
+
+	for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+		map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
+	}
+}
+
+void (*poke_srmmu)(void) = NULL;
+
+void __init srmmu_paging_init(void)
+{
+	int i;
+	phandle cpunode;
+	char node_str[128];
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *pte;
+	unsigned long pages_avail;
+
+	init_mm.context = (unsigned long) NO_CONTEXT;
+	sparc_iomap.start = SUN4M_IOBASE_VADDR;	/* 16MB of IOSPACE on all sun4m's. */
+
+	if (sparc_cpu_model == sun4d)
+		num_contexts = 65536; /* We know it is Viking */
+	else {
+		/* Find the number of contexts on the srmmu. */
+		cpunode = prom_getchild(prom_root_node);
+		num_contexts = 0;
+		while (cpunode != 0) {
+			prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
+			if (!strcmp(node_str, "cpu")) {
+				num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
+				break;
+			}
+			cpunode = prom_getsibling(cpunode);
+		}
+	}
+
+	if (!num_contexts) {
+		prom_printf("Something wrong, can't find cpu node in paging_init.\n");
+		prom_halt();
+	}
+
+	pages_avail = 0;
+	last_valid_pfn = bootmem_init(&pages_avail);
+
+	srmmu_nocache_calcsize();
+	srmmu_nocache_init();
+	srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
+	map_kernel();
+
+	/* ctx table has to be physically aligned to its size */
+	srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
+	srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
+
+	for (i = 0; i < num_contexts; i++)
+		srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
+
+	flush_cache_all();
+	srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
+#ifdef CONFIG_SMP
+	/* Stop from hanging here... */
+	local_ops->tlb_all();
+#else
+	flush_tlb_all();
+#endif
+	poke_srmmu();
+
+	srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
+	srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
+
+	srmmu_allocate_ptable_skeleton(
+		__fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
+	srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
+
+	pgd = pgd_offset_k(PKMAP_BASE);
+	pmd = pmd_offset(pgd, PKMAP_BASE);
+	pte = pte_offset_kernel(pmd, PKMAP_BASE);
+	pkmap_page_table = pte;
+
+	flush_cache_all();
+	flush_tlb_all();
+
+	sparc_context_init(num_contexts);
+
+	kmap_init();
+
+	{
+		unsigned long zones_size[MAX_NR_ZONES];
+		unsigned long zholes_size[MAX_NR_ZONES];
+		unsigned long npages;
+		int znum;
+
+		for (znum = 0; znum < MAX_NR_ZONES; znum++)
+			zones_size[znum] = zholes_size[znum] = 0;
+
+		npages = max_low_pfn - pfn_base;
+
+		zones_size[ZONE_DMA] = npages;
+		zholes_size[ZONE_DMA] = npages - pages_avail;
+
+		npages = highend_pfn - max_low_pfn;
+		zones_size[ZONE_HIGHMEM] = npages;
+		zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
+
+		free_area_init_node(0, zones_size, pfn_base, zholes_size);
+	}
+}
+
+void mmu_info(struct seq_file *m)
+{
+	seq_printf(m,
+		   "MMU type\t: %s\n"
+		   "contexts\t: %d\n"
+		   "nocache total\t: %ld\n"
+		   "nocache used\t: %d\n",
+		   srmmu_name,
+		   num_contexts,
+		   srmmu_nocache_size,
+		   srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
+}
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	mm->context = NO_CONTEXT;
+	return 0;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+	unsigned long flags;
+
+	if (mm->context != NO_CONTEXT) {
+		flush_cache_mm(mm);
+		srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
+		flush_tlb_mm(mm);
+		spin_lock_irqsave(&srmmu_context_spinlock, flags);
+		free_context(mm->context);
+		spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+		mm->context = NO_CONTEXT;
+	}
+}
+
+/* Init various srmmu chip types. */
+static void __init srmmu_is_bad(void)
+{
+	prom_printf("Could not determine SRMMU chip type.\n");
+	prom_halt();
+}
+
+static void __init init_vac_layout(void)
+{
+	phandle nd;
+	int cache_lines;
+	char node_str[128];
+#ifdef CONFIG_SMP
+	int cpu = 0;
+	unsigned long max_size = 0;
+	unsigned long min_line_size = 0x10000000;
+#endif
+
+	nd = prom_getchild(prom_root_node);
+	while ((nd = prom_getsibling(nd)) != 0) {
+		prom_getstring(nd, "device_type", node_str, sizeof(node_str));
+		if (!strcmp(node_str, "cpu")) {
+			vac_line_size = prom_getint(nd, "cache-line-size");
+			if (vac_line_size == -1) {
+				prom_printf("can't determine cache-line-size, halting.\n");
+				prom_halt();
+			}
+			cache_lines = prom_getint(nd, "cache-nlines");
+			if (cache_lines == -1) {
+				prom_printf("can't determine cache-nlines, halting.\n");
+				prom_halt();
+			}
+
+			vac_cache_size = cache_lines * vac_line_size;
+#ifdef CONFIG_SMP
+			if (vac_cache_size > max_size)
+				max_size = vac_cache_size;
+			if (vac_line_size < min_line_size)
+				min_line_size = vac_line_size;
+			//FIXME: cpus not contiguous!!
+			cpu++;
+			if (cpu >= nr_cpu_ids || !cpu_online(cpu))
+				break;
+#else
+			break;
+#endif
+		}
+	}
+	if (nd == 0) {
+		prom_printf("No CPU nodes found, halting.\n");
+		prom_halt();
+	}
+#ifdef CONFIG_SMP
+	vac_cache_size = max_size;
+	vac_line_size = min_line_size;
+#endif
+	printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
+	       (int)vac_cache_size, (int)vac_line_size);
+}
+
+static void poke_hypersparc(void)
+{
+	volatile unsigned long clear;
+	unsigned long mreg = srmmu_get_mmureg();
+
+	hyper_flush_unconditional_combined();
+
+	mreg &= ~(HYPERSPARC_CWENABLE);
+	mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
+	mreg |= (HYPERSPARC_CMODE);
+
+	srmmu_set_mmureg(mreg);
+
+#if 0 /* XXX I think this is bad news... -DaveM */
+	hyper_clear_all_tags();
+#endif
+
+	put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
+	hyper_flush_whole_icache();
+	clear = srmmu_get_faddr();
+	clear = srmmu_get_fstatus();
+}
+
+static const struct sparc32_cachetlb_ops hypersparc_ops = {
+	.cache_all	= hypersparc_flush_cache_all,
+	.cache_mm	= hypersparc_flush_cache_mm,
+	.cache_page	= hypersparc_flush_cache_page,
+	.cache_range	= hypersparc_flush_cache_range,
+	.tlb_all	= hypersparc_flush_tlb_all,
+	.tlb_mm		= hypersparc_flush_tlb_mm,
+	.tlb_page	= hypersparc_flush_tlb_page,
+	.tlb_range	= hypersparc_flush_tlb_range,
+	.page_to_ram	= hypersparc_flush_page_to_ram,
+	.sig_insns	= hypersparc_flush_sig_insns,
+	.page_for_dma	= hypersparc_flush_page_for_dma,
+};
+
+static void __init init_hypersparc(void)
+{
+	srmmu_name = "ROSS HyperSparc";
+	srmmu_modtype = HyperSparc;
+
+	init_vac_layout();
+
+	is_hypersparc = 1;
+	sparc32_cachetlb_ops = &hypersparc_ops;
+
+	poke_srmmu = poke_hypersparc;
+
+	hypersparc_setup_blockops();
+}
+
+static void poke_swift(void)
+{
+	unsigned long mreg;
+
+	/* Clear any crap from the cache or else... */
+	swift_flush_cache_all();
+
+	/* Enable I & D caches */
+	mreg = srmmu_get_mmureg();
+	mreg |= (SWIFT_IE | SWIFT_DE);
+	/*
+	 * The Swift branch folding logic is completely broken.  At
+	 * trap time, if things are just right, if can mistakenly
+	 * think that a trap is coming from kernel mode when in fact
+	 * it is coming from user mode (it mis-executes the branch in
+	 * the trap code).  So you see things like crashme completely
+	 * hosing your machine which is completely unacceptable.  Turn
+	 * this shit off... nice job Fujitsu.
+	 */
+	mreg &= ~(SWIFT_BF);
+	srmmu_set_mmureg(mreg);
+}
+
+static const struct sparc32_cachetlb_ops swift_ops = {
+	.cache_all	= swift_flush_cache_all,
+	.cache_mm	= swift_flush_cache_mm,
+	.cache_page	= swift_flush_cache_page,
+	.cache_range	= swift_flush_cache_range,
+	.tlb_all	= swift_flush_tlb_all,
+	.tlb_mm		= swift_flush_tlb_mm,
+	.tlb_page	= swift_flush_tlb_page,
+	.tlb_range	= swift_flush_tlb_range,
+	.page_to_ram	= swift_flush_page_to_ram,
+	.sig_insns	= swift_flush_sig_insns,
+	.page_for_dma	= swift_flush_page_for_dma,
+};
+
+#define SWIFT_MASKID_ADDR  0x10003018
+static void __init init_swift(void)
+{
+	unsigned long swift_rev;
+
+	__asm__ __volatile__("lda [%1] %2, %0\n\t"
+			     "srl %0, 0x18, %0\n\t" :
+			     "=r" (swift_rev) :
+			     "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
+	srmmu_name = "Fujitsu Swift";
+	switch (swift_rev) {
+	case 0x11:
+	case 0x20:
+	case 0x23:
+	case 0x30:
+		srmmu_modtype = Swift_lots_o_bugs;
+		hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
+		/*
+		 * Gee george, I wonder why Sun is so hush hush about
+		 * this hardware bug... really braindamage stuff going
+		 * on here.  However I think we can find a way to avoid
+		 * all of the workaround overhead under Linux.  Basically,
+		 * any page fault can cause kernel pages to become user
+		 * accessible (the mmu gets confused and clears some of
+		 * the ACC bits in kernel ptes).  Aha, sounds pretty
+		 * horrible eh?  But wait, after extensive testing it appears
+		 * that if you use pgd_t level large kernel pte's (like the
+		 * 4MB pages on the Pentium) the bug does not get tripped
+		 * at all.  This avoids almost all of the major overhead.
+		 * Welcome to a world where your vendor tells you to,
+		 * "apply this kernel patch" instead of "sorry for the
+		 * broken hardware, send it back and we'll give you
+		 * properly functioning parts"
+		 */
+		break;
+	case 0x25:
+	case 0x31:
+		srmmu_modtype = Swift_bad_c;
+		hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
+		/*
+		 * You see Sun allude to this hardware bug but never
+		 * admit things directly, they'll say things like,
+		 * "the Swift chip cache problems" or similar.
+		 */
+		break;
+	default:
+		srmmu_modtype = Swift_ok;
+		break;
+	}
+
+	sparc32_cachetlb_ops = &swift_ops;
+	flush_page_for_dma_global = 0;
+
+	/*
+	 * Are you now convinced that the Swift is one of the
+	 * biggest VLSI abortions of all time?  Bravo Fujitsu!
+	 * Fujitsu, the !#?!%$'d up processor people.  I bet if
+	 * you examined the microcode of the Swift you'd find
+	 * XXX's all over the place.
+	 */
+	poke_srmmu = poke_swift;
+}
+
+static void turbosparc_flush_cache_all(void)
+{
+	flush_user_windows();
+	turbosparc_idflash_clear();
+}
+
+static void turbosparc_flush_cache_mm(struct mm_struct *mm)
+{
+	FLUSH_BEGIN(mm)
+	flush_user_windows();
+	turbosparc_idflash_clear();
+	FLUSH_END
+}
+
+static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+	FLUSH_BEGIN(vma->vm_mm)
+	flush_user_windows();
+	turbosparc_idflash_clear();
+	FLUSH_END
+}
+
+static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+	FLUSH_BEGIN(vma->vm_mm)
+	flush_user_windows();
+	if (vma->vm_flags & VM_EXEC)
+		turbosparc_flush_icache();
+	turbosparc_flush_dcache();
+	FLUSH_END
+}
+
+/* TurboSparc is copy-back, if we turn it on, but this does not work. */
+static void turbosparc_flush_page_to_ram(unsigned long page)
+{
+#ifdef TURBOSPARC_WRITEBACK
+	volatile unsigned long clear;
+
+	if (srmmu_probe(page))
+		turbosparc_flush_page_cache(page);
+	clear = srmmu_get_fstatus();
+#endif
+}
+
+static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+}
+
+static void turbosparc_flush_page_for_dma(unsigned long page)
+{
+	turbosparc_flush_dcache();
+}
+
+static void turbosparc_flush_tlb_all(void)
+{
+	srmmu_flush_whole_tlb();
+}
+
+static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
+{
+	FLUSH_BEGIN(mm)
+	srmmu_flush_whole_tlb();
+	FLUSH_END
+}
+
+static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+	FLUSH_BEGIN(vma->vm_mm)
+	srmmu_flush_whole_tlb();
+	FLUSH_END
+}
+
+static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	FLUSH_BEGIN(vma->vm_mm)
+	srmmu_flush_whole_tlb();
+	FLUSH_END
+}
+
+
+static void poke_turbosparc(void)
+{
+	unsigned long mreg = srmmu_get_mmureg();
+	unsigned long ccreg;
+
+	/* Clear any crap from the cache or else... */
+	turbosparc_flush_cache_all();
+	/* Temporarily disable I & D caches */
+	mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
+	mreg &= ~(TURBOSPARC_PCENABLE);		/* Don't check parity */
+	srmmu_set_mmureg(mreg);
+
+	ccreg = turbosparc_get_ccreg();
+
+#ifdef TURBOSPARC_WRITEBACK
+	ccreg |= (TURBOSPARC_SNENABLE);		/* Do DVMA snooping in Dcache */
+	ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
+			/* Write-back D-cache, emulate VLSI
+			 * abortion number three, not number one */
+#else
+	/* For now let's play safe, optimize later */
+	ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
+			/* Do DVMA snooping in Dcache, Write-thru D-cache */
+	ccreg &= ~(TURBOSPARC_uS2);
+			/* Emulate VLSI abortion number three, not number one */
+#endif
+
+	switch (ccreg & 7) {
+	case 0: /* No SE cache */
+	case 7: /* Test mode */
+		break;
+	default:
+		ccreg |= (TURBOSPARC_SCENABLE);
+	}
+	turbosparc_set_ccreg(ccreg);
+
+	mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
+	mreg |= (TURBOSPARC_ICSNOOP);		/* Icache snooping on */
+	srmmu_set_mmureg(mreg);
+}
+
+static const struct sparc32_cachetlb_ops turbosparc_ops = {
+	.cache_all	= turbosparc_flush_cache_all,
+	.cache_mm	= turbosparc_flush_cache_mm,
+	.cache_page	= turbosparc_flush_cache_page,
+	.cache_range	= turbosparc_flush_cache_range,
+	.tlb_all	= turbosparc_flush_tlb_all,
+	.tlb_mm		= turbosparc_flush_tlb_mm,
+	.tlb_page	= turbosparc_flush_tlb_page,
+	.tlb_range	= turbosparc_flush_tlb_range,
+	.page_to_ram	= turbosparc_flush_page_to_ram,
+	.sig_insns	= turbosparc_flush_sig_insns,
+	.page_for_dma	= turbosparc_flush_page_for_dma,
+};
+
+static void __init init_turbosparc(void)
+{
+	srmmu_name = "Fujitsu TurboSparc";
+	srmmu_modtype = TurboSparc;
+	sparc32_cachetlb_ops = &turbosparc_ops;
+	poke_srmmu = poke_turbosparc;
+}
+
+static void poke_tsunami(void)
+{
+	unsigned long mreg = srmmu_get_mmureg();
+
+	tsunami_flush_icache();
+	tsunami_flush_dcache();
+	mreg &= ~TSUNAMI_ITD;
+	mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
+	srmmu_set_mmureg(mreg);
+}
+
+static const struct sparc32_cachetlb_ops tsunami_ops = {
+	.cache_all	= tsunami_flush_cache_all,
+	.cache_mm	= tsunami_flush_cache_mm,
+	.cache_page	= tsunami_flush_cache_page,
+	.cache_range	= tsunami_flush_cache_range,
+	.tlb_all	= tsunami_flush_tlb_all,
+	.tlb_mm		= tsunami_flush_tlb_mm,
+	.tlb_page	= tsunami_flush_tlb_page,
+	.tlb_range	= tsunami_flush_tlb_range,
+	.page_to_ram	= tsunami_flush_page_to_ram,
+	.sig_insns	= tsunami_flush_sig_insns,
+	.page_for_dma	= tsunami_flush_page_for_dma,
+};
+
+static void __init init_tsunami(void)
+{
+	/*
+	 * Tsunami's pretty sane, Sun and TI actually got it
+	 * somewhat right this time.  Fujitsu should have
+	 * taken some lessons from them.
+	 */
+
+	srmmu_name = "TI Tsunami";
+	srmmu_modtype = Tsunami;
+	sparc32_cachetlb_ops = &tsunami_ops;
+	poke_srmmu = poke_tsunami;
+
+	tsunami_setup_blockops();
+}
+
+static void poke_viking(void)
+{
+	unsigned long mreg = srmmu_get_mmureg();
+	static int smp_catch;
+
+	if (viking_mxcc_present) {
+		unsigned long mxcc_control = mxcc_get_creg();
+
+		mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
+		mxcc_control &= ~(MXCC_CTL_RRC);
+		mxcc_set_creg(mxcc_control);
+
+		/*
+		 * We don't need memory parity checks.
+		 * XXX This is a mess, have to dig out later. ecd.
+		viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
+		 */
+
+		/* We do cache ptables on MXCC. */
+		mreg |= VIKING_TCENABLE;
+	} else {
+		unsigned long bpreg;
+
+		mreg &= ~(VIKING_TCENABLE);
+		if (smp_catch++) {
+			/* Must disable mixed-cmd mode here for other cpu's. */
+			bpreg = viking_get_bpreg();
+			bpreg &= ~(VIKING_ACTION_MIX);
+			viking_set_bpreg(bpreg);
+
+			/* Just in case PROM does something funny. */
+			msi_set_sync();
+		}
+	}
+
+	mreg |= VIKING_SPENABLE;
+	mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
+	mreg |= VIKING_SBENABLE;
+	mreg &= ~(VIKING_ACENABLE);
+	srmmu_set_mmureg(mreg);
+}
+
+static struct sparc32_cachetlb_ops viking_ops __ro_after_init = {
+	.cache_all	= viking_flush_cache_all,
+	.cache_mm	= viking_flush_cache_mm,
+	.cache_page	= viking_flush_cache_page,
+	.cache_range	= viking_flush_cache_range,
+	.tlb_all	= viking_flush_tlb_all,
+	.tlb_mm		= viking_flush_tlb_mm,
+	.tlb_page	= viking_flush_tlb_page,
+	.tlb_range	= viking_flush_tlb_range,
+	.page_to_ram	= viking_flush_page_to_ram,
+	.sig_insns	= viking_flush_sig_insns,
+	.page_for_dma	= viking_flush_page_for_dma,
+};
+
+#ifdef CONFIG_SMP
+/* On sun4d the cpu broadcasts local TLB flushes, so we can just
+ * perform the local TLB flush and all the other cpus will see it.
+ * But, unfortunately, there is a bug in the sun4d XBUS backplane
+ * that requires that we add some synchronization to these flushes.
+ *
+ * The bug is that the fifo which keeps track of all the pending TLB
+ * broadcasts in the system is an entry or two too small, so if we
+ * have too many going at once we'll overflow that fifo and lose a TLB
+ * flush resulting in corruption.
+ *
+ * Our workaround is to take a global spinlock around the TLB flushes,
+ * which guarentees we won't ever have too many pending.  It's a big
+ * hammer, but a semaphore like system to make sure we only have N TLB
+ * flushes going at once will require SMP locking anyways so there's
+ * no real value in trying any harder than this.
+ */
+static struct sparc32_cachetlb_ops viking_sun4d_smp_ops __ro_after_init = {
+	.cache_all	= viking_flush_cache_all,
+	.cache_mm	= viking_flush_cache_mm,
+	.cache_page	= viking_flush_cache_page,
+	.cache_range	= viking_flush_cache_range,
+	.tlb_all	= sun4dsmp_flush_tlb_all,
+	.tlb_mm		= sun4dsmp_flush_tlb_mm,
+	.tlb_page	= sun4dsmp_flush_tlb_page,
+	.tlb_range	= sun4dsmp_flush_tlb_range,
+	.page_to_ram	= viking_flush_page_to_ram,
+	.sig_insns	= viking_flush_sig_insns,
+	.page_for_dma	= viking_flush_page_for_dma,
+};
+#endif
+
+static void __init init_viking(void)
+{
+	unsigned long mreg = srmmu_get_mmureg();
+
+	/* Ahhh, the viking.  SRMMU VLSI abortion number two... */
+	if (mreg & VIKING_MMODE) {
+		srmmu_name = "TI Viking";
+		viking_mxcc_present = 0;
+		msi_set_sync();
+
+		/*
+		 * We need this to make sure old viking takes no hits
+		 * on it's cache for dma snoops to workaround the
+		 * "load from non-cacheable memory" interrupt bug.
+		 * This is only necessary because of the new way in
+		 * which we use the IOMMU.
+		 */
+		viking_ops.page_for_dma = viking_flush_page;
+#ifdef CONFIG_SMP
+		viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
+#endif
+		flush_page_for_dma_global = 0;
+	} else {
+		srmmu_name = "TI Viking/MXCC";
+		viking_mxcc_present = 1;
+		srmmu_cache_pagetables = 1;
+	}
+
+	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+		&viking_ops;
+#ifdef CONFIG_SMP
+	if (sparc_cpu_model == sun4d)
+		sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+			&viking_sun4d_smp_ops;
+#endif
+
+	poke_srmmu = poke_viking;
+}
+
+/* Probe for the srmmu chip version. */
+static void __init get_srmmu_type(void)
+{
+	unsigned long mreg, psr;
+	unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
+
+	srmmu_modtype = SRMMU_INVAL_MOD;
+	hwbug_bitmask = 0;
+
+	mreg = srmmu_get_mmureg(); psr = get_psr();
+	mod_typ = (mreg & 0xf0000000) >> 28;
+	mod_rev = (mreg & 0x0f000000) >> 24;
+	psr_typ = (psr >> 28) & 0xf;
+	psr_vers = (psr >> 24) & 0xf;
+
+	/* First, check for sparc-leon. */
+	if (sparc_cpu_model == sparc_leon) {
+		init_leon();
+		return;
+	}
+
+	/* Second, check for HyperSparc or Cypress. */
+	if (mod_typ == 1) {
+		switch (mod_rev) {
+		case 7:
+			/* UP or MP Hypersparc */
+			init_hypersparc();
+			break;
+		case 0:
+		case 2:
+		case 10:
+		case 11:
+		case 12:
+		case 13:
+		case 14:
+		case 15:
+		default:
+			prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
+			prom_halt();
+			break;
+		}
+		return;
+	}
+
+	/* Now Fujitsu TurboSparc. It might happen that it is
+	 * in Swift emulation mode, so we will check later...
+	 */
+	if (psr_typ == 0 && psr_vers == 5) {
+		init_turbosparc();
+		return;
+	}
+
+	/* Next check for Fujitsu Swift. */
+	if (psr_typ == 0 && psr_vers == 4) {
+		phandle cpunode;
+		char node_str[128];
+
+		/* Look if it is not a TurboSparc emulating Swift... */
+		cpunode = prom_getchild(prom_root_node);
+		while ((cpunode = prom_getsibling(cpunode)) != 0) {
+			prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
+			if (!strcmp(node_str, "cpu")) {
+				if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
+				    prom_getintdefault(cpunode, "psr-version", 1) == 5) {
+					init_turbosparc();
+					return;
+				}
+				break;
+			}
+		}
+
+		init_swift();
+		return;
+	}
+
+	/* Now the Viking family of srmmu. */
+	if (psr_typ == 4 &&
+	   ((psr_vers == 0) ||
+	    ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
+		init_viking();
+		return;
+	}
+
+	/* Finally the Tsunami. */
+	if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
+		init_tsunami();
+		return;
+	}
+
+	/* Oh well */
+	srmmu_is_bad();
+}
+
+#ifdef CONFIG_SMP
+/* Local cross-calls. */
+static void smp_flush_page_for_dma(unsigned long page)
+{
+	xc1((smpfunc_t) local_ops->page_for_dma, page);
+	local_ops->page_for_dma(page);
+}
+
+static void smp_flush_cache_all(void)
+{
+	xc0((smpfunc_t) local_ops->cache_all);
+	local_ops->cache_all();
+}
+
+static void smp_flush_tlb_all(void)
+{
+	xc0((smpfunc_t) local_ops->tlb_all);
+	local_ops->tlb_all();
+}
+
+static void smp_flush_cache_mm(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
+		local_ops->cache_mm(mm);
+	}
+}
+
+static void smp_flush_tlb_mm(struct mm_struct *mm)
+{
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask)) {
+			xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
+			if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
+				cpumask_copy(mm_cpumask(mm),
+					     cpumask_of(smp_processor_id()));
+		}
+		local_ops->tlb_mm(mm);
+	}
+}
+
+static void smp_flush_cache_range(struct vm_area_struct *vma,
+				  unsigned long start,
+				  unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc3((smpfunc_t) local_ops->cache_range,
+			    (unsigned long) vma, start, end);
+		local_ops->cache_range(vma, start, end);
+	}
+}
+
+static void smp_flush_tlb_range(struct vm_area_struct *vma,
+				unsigned long start,
+				unsigned long end)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc3((smpfunc_t) local_ops->tlb_range,
+			    (unsigned long) vma, start, end);
+		local_ops->tlb_range(vma, start, end);
+	}
+}
+
+static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc2((smpfunc_t) local_ops->cache_page,
+			    (unsigned long) vma, page);
+		local_ops->cache_page(vma, page);
+	}
+}
+
+static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+	struct mm_struct *mm = vma->vm_mm;
+
+	if (mm->context != NO_CONTEXT) {
+		cpumask_t cpu_mask;
+		cpumask_copy(&cpu_mask, mm_cpumask(mm));
+		cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+		if (!cpumask_empty(&cpu_mask))
+			xc2((smpfunc_t) local_ops->tlb_page,
+			    (unsigned long) vma, page);
+		local_ops->tlb_page(vma, page);
+	}
+}
+
+static void smp_flush_page_to_ram(unsigned long page)
+{
+	/* Current theory is that those who call this are the one's
+	 * who have just dirtied their cache with the pages contents
+	 * in kernel space, therefore we only run this on local cpu.
+	 *
+	 * XXX This experiment failed, research further... -DaveM
+	 */
+#if 1
+	xc1((smpfunc_t) local_ops->page_to_ram, page);
+#endif
+	local_ops->page_to_ram(page);
+}
+
+static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
+{
+	cpumask_t cpu_mask;
+	cpumask_copy(&cpu_mask, mm_cpumask(mm));
+	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
+	if (!cpumask_empty(&cpu_mask))
+		xc2((smpfunc_t) local_ops->sig_insns,
+		    (unsigned long) mm, insn_addr);
+	local_ops->sig_insns(mm, insn_addr);
+}
+
+static struct sparc32_cachetlb_ops smp_cachetlb_ops __ro_after_init = {
+	.cache_all	= smp_flush_cache_all,
+	.cache_mm	= smp_flush_cache_mm,
+	.cache_page	= smp_flush_cache_page,
+	.cache_range	= smp_flush_cache_range,
+	.tlb_all	= smp_flush_tlb_all,
+	.tlb_mm		= smp_flush_tlb_mm,
+	.tlb_page	= smp_flush_tlb_page,
+	.tlb_range	= smp_flush_tlb_range,
+	.page_to_ram	= smp_flush_page_to_ram,
+	.sig_insns	= smp_flush_sig_insns,
+	.page_for_dma	= smp_flush_page_for_dma,
+};
+#endif
+
+/* Load up routines and constants for sun4m and sun4d mmu */
+void __init load_mmu(void)
+{
+	/* Functions */
+	get_srmmu_type();
+
+#ifdef CONFIG_SMP
+	/* El switcheroo... */
+	local_ops = sparc32_cachetlb_ops;
+
+	if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
+		smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
+		smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
+		smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
+		smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
+	}
+
+	if (poke_srmmu == poke_viking) {
+		/* Avoid unnecessary cross calls. */
+		smp_cachetlb_ops.cache_all = local_ops->cache_all;
+		smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
+		smp_cachetlb_ops.cache_range = local_ops->cache_range;
+		smp_cachetlb_ops.cache_page = local_ops->cache_page;
+
+		smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
+		smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
+		smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
+	}
+
+	/* It really is const after this point. */
+	sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
+		&smp_cachetlb_ops;
+#endif
+
+	if (sparc_cpu_model == sun4d)
+		ld_mmu_iounit();
+	else
+		ld_mmu_iommu();
+#ifdef CONFIG_SMP
+	if (sparc_cpu_model == sun4d)
+		sun4d_init_smp();
+	else if (sparc_cpu_model == sparc_leon)
+		leon_init_smp();
+	else
+		sun4m_init_smp();
+#endif
+}
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644
index 0000000..d8d2e64
--- /dev/null
+++ b/arch/sparc/mm/srmmu_access.S
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Assembler variants of srmmu access functions.
+ * Implemented in assembler to allow run-time patching.
+ * LEON uses a different ASI for MMUREGS than SUN.
+ *
+ * The leon_1insn_patch infrastructure is used
+ * for the run-time patching.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asmmacro.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asi.h>
+
+/* unsigned int srmmu_get_mmureg(void) */
+ENTRY(srmmu_get_mmureg)
+LEON_PI(lda	[%g0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%g0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_mmureg)
+
+/* void srmmu_set_mmureg(unsigned long regval) */
+ENTRY(srmmu_set_mmureg)
+LEON_PI(sta	%o0, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%o0, [%g0] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_mmureg)
+
+/* void srmmu_set_ctable_ptr(unsigned long paddr) */
+ENTRY(srmmu_set_ctable_ptr)
+	/* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
+	srl	%o0, 4, %g1
+	and	%g1, SRMMU_CTX_PMASK, %g1
+
+	mov	SRMMU_CTXTBL_PTR, %g2
+LEON_PI(sta	%g1, [%g2] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%g1, [%g2] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_ctable_ptr)
+
+
+/* void srmmu_set_context(int context) */
+ENTRY(srmmu_set_context)
+	mov	SRMMU_CTX_REG, %g1
+LEON_PI(sta	%o0, [%g1] ASI_LEON_MMUREGS)
+SUN_PI_(sta	%o0, [%g1] ASI_M_MMUREGS)
+	retl
+	 nop
+ENDPROC(srmmu_set_context)
+
+
+/* int srmmu_get_context(void) */
+ENTRY(srmmu_get_context)
+	mov	SRMMU_CTX_REG, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_context)
+
+
+/* unsigned int srmmu_get_fstatus(void) */
+ENTRY(srmmu_get_fstatus)
+	mov	SRMMU_FAULT_STATUS, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_fstatus)
+
+
+/* unsigned int srmmu_get_faddr(void) */
+ENTRY(srmmu_get_faddr)
+	mov	SRMMU_FAULT_ADDR, %o0
+LEON_PI(lda     [%o0] ASI_LEON_MMUREGS, %o0)
+SUN_PI_(lda	[%o0] ASI_M_MMUREGS, %o0)
+	retl
+	 nop
+ENDPROC(srmmu_get_faddr)
diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S
new file mode 100644
index 0000000..f414bfd
--- /dev/null
+++ b/arch/sparc/mm/swift.S
@@ -0,0 +1,256 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * swift.S: MicroSparc-II mmu/cache operations.
+ *
+ * Copyright (C) 1999 David S. Miller (davem@redhat.com)
+ */
+
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.align	4
+
+#if 1	/* XXX screw this, I can't get the VAC flushes working
+	 * XXX reliably... -DaveM
+	 */
+	.globl	swift_flush_cache_all, swift_flush_cache_mm
+	.globl	swift_flush_cache_range, swift_flush_cache_page
+	.globl	swift_flush_page_for_dma
+	.globl	swift_flush_page_to_ram
+
+swift_flush_cache_all:
+swift_flush_cache_mm:
+swift_flush_cache_range:
+swift_flush_cache_page:
+swift_flush_page_for_dma:
+swift_flush_page_to_ram:
+	sethi	%hi(0x2000), %o0
+1:	subcc	%o0, 0x10, %o0
+	add	%o0, %o0, %o1
+	sta	%g0, [%o0] ASI_M_DATAC_TAG
+	bne	1b
+	 sta	%g0, [%o1] ASI_M_TXTC_TAG
+	retl
+	 nop
+#else
+
+	.globl	swift_flush_cache_all
+swift_flush_cache_all:
+	WINDOW_FLUSH(%g4, %g5)
+
+	/* Just clear out all the tags. */
+	sethi	%hi(16 * 1024), %o0
+1:	subcc	%o0, 16, %o0
+	sta	%g0, [%o0] ASI_M_TXTC_TAG
+	bne	1b
+	 sta	%g0, [%o0] ASI_M_DATAC_TAG
+	retl
+	 nop
+
+	.globl	swift_flush_cache_mm
+swift_flush_cache_mm:
+	ld	[%o0 + AOFF_mm_context], %g2
+	cmp	%g2, -1
+	be	swift_flush_cache_mm_out
+	WINDOW_FLUSH(%g4, %g5)
+	rd	%psr, %g1
+	andn	%g1, PSR_ET, %g3
+	wr	%g3, 0x0, %psr
+	nop
+	nop
+	mov	SRMMU_CTX_REG, %g7
+	lda	[%g7] ASI_M_MMUREGS, %g5
+	sta	%g2, [%g7] ASI_M_MMUREGS
+
+#if 1
+	sethi	%hi(0x2000), %o0
+1:	subcc	%o0, 0x10, %o0
+	sta	%g0, [%o0] ASI_M_FLUSH_CTX
+	bne	1b
+	 nop
+#else
+	clr	%o0
+	or	%g0, 2048, %g7
+	or	%g0, 2048, %o1
+	add	%o1, 2048, %o2
+	add	%o2, 2048, %o3
+	mov	16, %o4
+	add	%o4, 2048, %o5
+	add	%o5, 2048, %g2
+	add	%g2, 2048, %g3
+1:	sta	%g0, [%o0      ] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %o1] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %o2] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %o3] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %o4] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %o5] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %g2] ASI_M_FLUSH_CTX
+	sta	%g0, [%o0 + %g3] ASI_M_FLUSH_CTX
+	subcc	%g7, 32, %g7
+	bne	1b
+	 add	%o0, 32, %o0
+#endif
+
+	mov	SRMMU_CTX_REG, %g7
+	sta	%g5, [%g7] ASI_M_MMUREGS
+	wr	%g1, 0x0, %psr
+	nop
+	nop
+swift_flush_cache_mm_out:
+	retl
+	 nop
+
+	.globl	swift_flush_cache_range
+swift_flush_cache_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+	sub	%o2, %o1, %o2
+	sethi	%hi(4096), %o3
+	cmp	%o2, %o3
+	bgu	swift_flush_cache_mm
+	 nop
+	b	70f
+	 nop
+
+	.globl	swift_flush_cache_page
+swift_flush_cache_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+70:
+	ld	[%o0 + AOFF_mm_context], %g2
+	cmp	%g2, -1
+	be	swift_flush_cache_page_out
+	WINDOW_FLUSH(%g4, %g5)
+	rd	%psr, %g1
+	andn	%g1, PSR_ET, %g3
+	wr	%g3, 0x0, %psr
+	nop
+	nop
+	mov	SRMMU_CTX_REG, %g7
+	lda	[%g7] ASI_M_MMUREGS, %g5
+	sta	%g2, [%g7] ASI_M_MMUREGS
+
+	andn	%o1, (PAGE_SIZE - 1), %o1
+#if 1
+	sethi	%hi(0x1000), %o0
+1:	subcc	%o0, 0x10, %o0
+	sta	%g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+	bne	1b
+	 nop
+#else
+	or	%g0, 512, %g7
+	or	%g0, 512, %o0
+	add	%o0, 512, %o2
+	add	%o2, 512, %o3
+	add	%o3, 512, %o4
+	add	%o4, 512, %o5
+	add	%o5, 512, %g3
+	add	%g3, 512, %g4
+1:	sta	%g0, [%o1      ] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+	subcc	%g7, 16, %g7
+	bne	1b
+	 add	%o1, 16, %o1
+#endif
+
+	mov	SRMMU_CTX_REG, %g7
+	sta	%g5, [%g7] ASI_M_MMUREGS
+	wr	%g1, 0x0, %psr
+	nop
+	nop
+swift_flush_cache_page_out:
+	retl
+	 nop
+
+	/* Swift is write-thru, however it is not
+	 * I/O nor TLB-walk coherent.  Also it has
+	 * caches which are virtually indexed and tagged.
+	 */
+	.globl	swift_flush_page_for_dma
+	.globl	swift_flush_page_to_ram
+swift_flush_page_for_dma:
+swift_flush_page_to_ram:
+	andn	%o0, (PAGE_SIZE - 1), %o1
+#if 1
+	sethi	%hi(0x1000), %o0
+1:	subcc	%o0, 0x10, %o0
+	sta	%g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+	bne	1b
+	 nop
+#else
+	or	%g0, 512, %g7
+	or	%g0, 512, %o0
+	add	%o0, 512, %o2
+	add	%o2, 512, %o3
+	add	%o3, 512, %o4
+	add	%o4, 512, %o5
+	add	%o5, 512, %g3
+	add	%g3, 512, %g4
+1:	sta	%g0, [%o1      ] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o0] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o2] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o3] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o4] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %o5] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g3] ASI_M_FLUSH_PAGE
+	sta	%g0, [%o1 + %g4] ASI_M_FLUSH_PAGE
+	subcc	%g7, 16, %g7
+	bne	1b
+	 add	%o1, 16, %o1
+#endif
+	retl
+	 nop
+#endif
+
+	.globl	swift_flush_sig_insns
+swift_flush_sig_insns:
+	flush	%o1
+	retl
+	 flush	%o1 + 4
+
+	.globl	swift_flush_tlb_mm
+	.globl	swift_flush_tlb_range
+	.globl	swift_flush_tlb_all
+swift_flush_tlb_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+swift_flush_tlb_mm:
+	ld	[%o0 + AOFF_mm_context], %g2
+	cmp	%g2, -1
+	be	swift_flush_tlb_all_out
+swift_flush_tlb_all:
+	mov	0x400, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+swift_flush_tlb_all_out:
+	retl
+	 nop
+
+	.globl	swift_flush_tlb_page
+swift_flush_tlb_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	andn	%o1, (PAGE_SIZE - 1), %o1
+	cmp	%o3, -1
+	be	swift_flush_tlb_page_out
+	 nop
+#if 1
+	mov	0x400, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE	
+#else
+	lda	[%g1] ASI_M_MMUREGS, %g5
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%o1] ASI_M_FLUSH_PAGE	/* rem. virt. cache. prot. */
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	sta	%g5, [%g1] ASI_M_MMUREGS
+#endif
+swift_flush_tlb_page_out:
+	retl
+	 nop
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
new file mode 100644
index 0000000..3d72d2d
--- /dev/null
+++ b/arch/sparc/mm/tlb.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0
+/* arch/sparc64/mm/tlb.c
+ *
+ * Copyright (C) 2004 David S. Miller <davem@redhat.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/mm.h>
+#include <linux/swap.h>
+#include <linux/preempt.h>
+
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+#include <asm/tlb.h>
+
+/* Heavily inspired by the ppc64 code.  */
+
+static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
+
+void flush_tlb_pending(void)
+{
+	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+	struct mm_struct *mm = tb->mm;
+
+	if (!tb->tlb_nr)
+		goto out;
+
+	flush_tsb_user(tb);
+
+	if (CTX_VALID(mm->context)) {
+		if (tb->tlb_nr == 1) {
+			global_flush_tlb_page(mm, tb->vaddrs[0]);
+		} else {
+#ifdef CONFIG_SMP
+			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
+					      &tb->vaddrs[0]);
+#else
+			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
+					    tb->tlb_nr, &tb->vaddrs[0]);
+#endif
+		}
+	}
+
+	tb->tlb_nr = 0;
+
+out:
+	put_cpu_var(tlb_batch);
+}
+
+void arch_enter_lazy_mmu_mode(void)
+{
+	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+
+	tb->active = 1;
+}
+
+void arch_leave_lazy_mmu_mode(void)
+{
+	struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
+
+	if (tb->tlb_nr)
+		flush_tlb_pending();
+	tb->active = 0;
+}
+
+static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
+			      bool exec, unsigned int hugepage_shift)
+{
+	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
+	unsigned long nr;
+
+	vaddr &= PAGE_MASK;
+	if (exec)
+		vaddr |= 0x1UL;
+
+	nr = tb->tlb_nr;
+
+	if (unlikely(nr != 0 && mm != tb->mm)) {
+		flush_tlb_pending();
+		nr = 0;
+	}
+
+	if (!tb->active) {
+		flush_tsb_user_page(mm, vaddr, hugepage_shift);
+		global_flush_tlb_page(mm, vaddr);
+		goto out;
+	}
+
+	if (nr == 0) {
+		tb->mm = mm;
+		tb->hugepage_shift = hugepage_shift;
+	}
+
+	if (tb->hugepage_shift != hugepage_shift) {
+		flush_tlb_pending();
+		tb->hugepage_shift = hugepage_shift;
+		nr = 0;
+	}
+
+	tb->vaddrs[nr] = vaddr;
+	tb->tlb_nr = ++nr;
+	if (nr >= TLB_BATCH_NR)
+		flush_tlb_pending();
+
+out:
+	put_cpu_var(tlb_batch);
+}
+
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+		   pte_t *ptep, pte_t orig, int fullmm,
+		   unsigned int hugepage_shift)
+{
+	if (tlb_type != hypervisor &&
+	    pte_dirty(orig)) {
+		unsigned long paddr, pfn = pte_pfn(orig);
+		struct address_space *mapping;
+		struct page *page;
+
+		if (!pfn_valid(pfn))
+			goto no_cache_flush;
+
+		page = pfn_to_page(pfn);
+		if (PageReserved(page))
+			goto no_cache_flush;
+
+		/* A real file page? */
+		mapping = page_mapping_file(page);
+		if (!mapping)
+			goto no_cache_flush;
+
+		paddr = (unsigned long) page_address(page);
+		if ((paddr ^ vaddr) & (1 << 13))
+			flush_dcache_page_all(mm, page);
+	}
+
+no_cache_flush:
+	if (!fullmm)
+		tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
+}
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
+			       pmd_t pmd)
+{
+	unsigned long end;
+	pte_t *pte;
+
+	pte = pte_offset_map(&pmd, vaddr);
+	end = vaddr + HPAGE_SIZE;
+	while (vaddr < end) {
+		if (pte_val(*pte) & _PAGE_VALID) {
+			bool exec = pte_exec(*pte);
+
+			tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
+		}
+		pte++;
+		vaddr += PAGE_SIZE;
+	}
+	pte_unmap(pte);
+}
+
+
+static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
+			   pmd_t orig, pmd_t pmd)
+{
+	if (mm == &init_mm)
+		return;
+
+	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
+		/*
+		 * Note that this routine only sets pmds for THP pages.
+		 * Hugetlb pages are handled elsewhere.  We need to check
+		 * for huge zero page.  Huge zero pages are like hugetlb
+		 * pages in that there is no RSS, but there is the need
+		 * for TSB entries.  So, huge zero page counts go into
+		 * hugetlb_pte_count.
+		 */
+		if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
+			if (is_huge_zero_page(pmd_page(pmd)))
+				mm->context.hugetlb_pte_count++;
+			else
+				mm->context.thp_pte_count++;
+		} else {
+			if (is_huge_zero_page(pmd_page(orig)))
+				mm->context.hugetlb_pte_count--;
+			else
+				mm->context.thp_pte_count--;
+		}
+
+		/* Do not try to allocate the TSB hash table if we
+		 * don't have one already.  We have various locks held
+		 * and thus we'll end up doing a GFP_KERNEL allocation
+		 * in an atomic context.
+		 *
+		 * Instead, we let the first TLB miss on a hugepage
+		 * take care of this.
+		 */
+	}
+
+	if (!pmd_none(orig)) {
+		addr &= HPAGE_MASK;
+		if (pmd_trans_huge(orig)) {
+			pte_t orig_pte = __pte(pmd_val(orig));
+			bool exec = pte_exec(orig_pte);
+
+			tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
+			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
+					  REAL_HPAGE_SHIFT);
+		} else {
+			tlb_batch_pmd_scan(mm, addr, orig);
+		}
+	}
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+		pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t orig = *pmdp;
+
+	*pmdp = pmd;
+	__set_pmd_acct(mm, addr, orig, pmd);
+}
+
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+		unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+	pmd_t old;
+
+	do {
+		old = *pmdp;
+	} while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
+	__set_pmd_acct(vma->vm_mm, address, old, pmd);
+
+	return old;
+}
+
+/*
+ * This routine is only called when splitting a THP
+ */
+pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
+		     pmd_t *pmdp)
+{
+	pmd_t old, entry;
+
+	entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
+	old = pmdp_establish(vma, address, pmdp, entry);
+	flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+	/*
+	 * set_pmd_at() will not be called in a way to decrement
+	 * thp_pte_count when splitting a THP, so do it now.
+	 * Sanity check pmd before doing the actual decrement.
+	 */
+	if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
+	    !is_huge_zero_page(pmd_page(entry)))
+		(vma->vm_mm)->context.thp_pte_count--;
+
+	return old;
+}
+
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+				pgtable_t pgtable)
+{
+	struct list_head *lh = (struct list_head *) pgtable;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	if (!pmd_huge_pte(mm, pmdp))
+		INIT_LIST_HEAD(lh);
+	else
+		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
+	pmd_huge_pte(mm, pmdp) = pgtable;
+}
+
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
+{
+	struct list_head *lh;
+	pgtable_t pgtable;
+
+	assert_spin_locked(&mm->page_table_lock);
+
+	/* FIFO */
+	pgtable = pmd_huge_pte(mm, pmdp);
+	lh = (struct list_head *) pgtable;
+	if (list_empty(lh))
+		pmd_huge_pte(mm, pmdp) = NULL;
+	else {
+		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
+		list_del(lh);
+	}
+	pte_val(pgtable[0]) = 0;
+	pte_val(pgtable[1]) = 0;
+
+	return pgtable;
+}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
new file mode 100644
index 0000000..f5edc28
--- /dev/null
+++ b/arch/sparc/mm/tsb.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0
+/* arch/sparc64/mm/tsb.c
+ *
+ * Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/preempt.h>
+#include <linux/slab.h>
+#include <linux/mm_types.h>
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/mmu_context.h>
+#include <asm/setup.h>
+#include <asm/tsb.h>
+#include <asm/tlb.h>
+#include <asm/oplib.h>
+
+extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
+
+static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries)
+{
+	vaddr >>= hash_shift;
+	return vaddr & (nentries - 1);
+}
+
+static inline int tag_compare(unsigned long tag, unsigned long vaddr)
+{
+	return (tag == (vaddr >> 22));
+}
+
+static void flush_tsb_kernel_range_scan(unsigned long start, unsigned long end)
+{
+	unsigned long idx;
+
+	for (idx = 0; idx < KERNEL_TSB_NENTRIES; idx++) {
+		struct tsb *ent = &swapper_tsb[idx];
+		unsigned long match = idx << 13;
+
+		match |= (ent->tag << 22);
+		if (match >= start && match < end)
+			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+	}
+}
+
+/* TSB flushes need only occur on the processor initiating the address
+ * space modification, not on each cpu the address space has run on.
+ * Only the TLB flush needs that treatment.
+ */
+
+void flush_tsb_kernel_range(unsigned long start, unsigned long end)
+{
+	unsigned long v;
+
+	if ((end - start) >> PAGE_SHIFT >= 2 * KERNEL_TSB_NENTRIES)
+		return flush_tsb_kernel_range_scan(start, end);
+
+	for (v = start; v < end; v += PAGE_SIZE) {
+		unsigned long hash = tsb_hash(v, PAGE_SHIFT,
+					      KERNEL_TSB_NENTRIES);
+		struct tsb *ent = &swapper_tsb[hash];
+
+		if (tag_compare(ent->tag, v))
+			ent->tag = (1UL << TSB_TAG_INVALID_BIT);
+	}
+}
+
+static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
+				  unsigned long hash_shift,
+				  unsigned long nentries)
+{
+	unsigned long tag, ent, hash;
+
+	v &= ~0x1UL;
+	hash = tsb_hash(v, hash_shift, nentries);
+	ent = tsb + (hash * sizeof(struct tsb));
+	tag = (v >> 22UL);
+
+	tsb_flush(ent, tag);
+}
+
+static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+			    unsigned long tsb, unsigned long nentries)
+{
+	unsigned long i;
+
+	for (i = 0; i < tb->tlb_nr; i++)
+		__flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
+}
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static void __flush_huge_tsb_one_entry(unsigned long tsb, unsigned long v,
+				       unsigned long hash_shift,
+				       unsigned long nentries,
+				       unsigned int hugepage_shift)
+{
+	unsigned int hpage_entries;
+	unsigned int i;
+
+	hpage_entries = 1 << (hugepage_shift - hash_shift);
+	for (i = 0; i < hpage_entries; i++)
+		__flush_tsb_one_entry(tsb, v + (i << hash_shift), hash_shift,
+				      nentries);
+}
+
+static void __flush_huge_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
+				 unsigned long tsb, unsigned long nentries,
+				 unsigned int hugepage_shift)
+{
+	unsigned long i;
+
+	for (i = 0; i < tb->tlb_nr; i++)
+		__flush_huge_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift,
+					   nentries, hugepage_shift);
+}
+#endif
+
+void flush_tsb_user(struct tlb_batch *tb)
+{
+	struct mm_struct *mm = tb->mm;
+	unsigned long nentries, base, flags;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	if (tb->hugepage_shift < REAL_HPAGE_SHIFT) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		if (tb->hugepage_shift == PAGE_SHIFT)
+			__flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+		else
+			__flush_huge_tsb_one(tb, PAGE_SHIFT, base, nentries,
+					     tb->hugepage_shift);
+#endif
+	}
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		__flush_huge_tsb_one(tb, REAL_HPAGE_SHIFT, base, nentries,
+				     tb->hugepage_shift);
+	}
+#endif
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr,
+			 unsigned int hugepage_shift)
+{
+	unsigned long nentries, base, flags;
+
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	if (hugepage_shift < REAL_HPAGE_SHIFT) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		if (hugepage_shift == PAGE_SHIFT)
+			__flush_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+					      nentries);
+#if defined(CONFIG_HUGETLB_PAGE)
+		else
+			__flush_huge_tsb_one_entry(base, vaddr, PAGE_SHIFT,
+						   nentries, hugepage_shift);
+#endif
+	}
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
+		base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
+		nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor)
+			base = __pa(base);
+		__flush_huge_tsb_one_entry(base, vaddr, REAL_HPAGE_SHIFT,
+					   nentries, hugepage_shift);
+	}
+#endif
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+}
+
+#define HV_PGSZ_IDX_BASE	HV_PGSZ_IDX_8K
+#define HV_PGSZ_MASK_BASE	HV_PGSZ_MASK_8K
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+#define HV_PGSZ_IDX_HUGE	HV_PGSZ_IDX_4MB
+#define HV_PGSZ_MASK_HUGE	HV_PGSZ_MASK_4MB
+#endif
+
+static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes)
+{
+	unsigned long tsb_reg, base, tsb_paddr;
+	unsigned long page_sz, tte;
+
+	mm->context.tsb_block[tsb_idx].tsb_nentries =
+		tsb_bytes / sizeof(struct tsb);
+
+	switch (tsb_idx) {
+	case MM_TSB_BASE:
+		base = TSBMAP_8K_BASE;
+		break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	case MM_TSB_HUGE:
+		base = TSBMAP_4M_BASE;
+		break;
+#endif
+	default:
+		BUG();
+	}
+
+	tte = pgprot_val(PAGE_KERNEL_LOCKED);
+	tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb);
+	BUG_ON(tsb_paddr & (tsb_bytes - 1UL));
+
+	/* Use the smallest page size that can map the whole TSB
+	 * in one TLB entry.
+	 */
+	switch (tsb_bytes) {
+	case 8192 << 0:
+		tsb_reg = 0x0UL;
+#ifdef DCACHE_ALIASING_POSSIBLE
+		base += (tsb_paddr & 8192);
+#endif
+		page_sz = 8192;
+		break;
+
+	case 8192 << 1:
+		tsb_reg = 0x1UL;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 2:
+		tsb_reg = 0x2UL;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 3:
+		tsb_reg = 0x3UL;
+		page_sz = 64 * 1024;
+		break;
+
+	case 8192 << 4:
+		tsb_reg = 0x4UL;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 5:
+		tsb_reg = 0x5UL;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 6:
+		tsb_reg = 0x6UL;
+		page_sz = 512 * 1024;
+		break;
+
+	case 8192 << 7:
+		tsb_reg = 0x7UL;
+		page_sz = 4 * 1024 * 1024;
+		break;
+
+	default:
+		printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
+		       current->comm, current->pid, tsb_bytes);
+		do_exit(SIGSEGV);
+	}
+	tte |= pte_sz_bits(page_sz);
+
+	if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
+		/* Physical mapping, no locked TLB entry for TSB.  */
+		tsb_reg |= tsb_paddr;
+
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = 0;
+	} else {
+		tsb_reg |= base;
+		tsb_reg |= (tsb_paddr & (page_sz - 1UL));
+		tte |= (tsb_paddr & ~(page_sz - 1UL));
+
+		mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg;
+		mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base;
+		mm->context.tsb_block[tsb_idx].tsb_map_pte = tte;
+	}
+
+	/* Setup the Hypervisor TSB descriptor.  */
+	if (tlb_type == hypervisor) {
+		struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx];
+
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_idx = HV_PGSZ_IDX_BASE;
+			break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+		case MM_TSB_HUGE:
+			hp->pgsz_idx = HV_PGSZ_IDX_HUGE;
+			break;
+#endif
+		default:
+			BUG();
+		}
+		hp->assoc = 1;
+		hp->num_ttes = tsb_bytes / 16;
+		hp->ctx_idx = 0;
+		switch (tsb_idx) {
+		case MM_TSB_BASE:
+			hp->pgsz_mask = HV_PGSZ_MASK_BASE;
+			break;
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+		case MM_TSB_HUGE:
+			hp->pgsz_mask = HV_PGSZ_MASK_HUGE;
+			break;
+#endif
+		default:
+			BUG();
+		}
+		hp->tsb_base = tsb_paddr;
+		hp->resv = 0;
+	}
+}
+
+struct kmem_cache *pgtable_cache __read_mostly;
+
+static struct kmem_cache *tsb_caches[8] __read_mostly;
+
+static const char *tsb_cache_names[8] = {
+	"tsb_8KB",
+	"tsb_16KB",
+	"tsb_32KB",
+	"tsb_64KB",
+	"tsb_128KB",
+	"tsb_256KB",
+	"tsb_512KB",
+	"tsb_1MB",
+};
+
+void __init pgtable_cache_init(void)
+{
+	unsigned long i;
+
+	pgtable_cache = kmem_cache_create("pgtable_cache",
+					  PAGE_SIZE, PAGE_SIZE,
+					  0,
+					  _clear_page);
+	if (!pgtable_cache) {
+		prom_printf("pgtable_cache_init(): Could not create!\n");
+		prom_halt();
+	}
+
+	for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
+		unsigned long size = 8192 << i;
+		const char *name = tsb_cache_names[i];
+
+		tsb_caches[i] = kmem_cache_create(name,
+						  size, size,
+						  0, NULL);
+		if (!tsb_caches[i]) {
+			prom_printf("Could not create %s cache\n", name);
+			prom_halt();
+		}
+	}
+}
+
+int sysctl_tsb_ratio = -2;
+
+static unsigned long tsb_size_to_rss_limit(unsigned long new_size)
+{
+	unsigned long num_ents = (new_size / sizeof(struct tsb));
+
+	if (sysctl_tsb_ratio < 0)
+		return num_ents - (num_ents >> -sysctl_tsb_ratio);
+	else
+		return num_ents + (num_ents >> sysctl_tsb_ratio);
+}
+
+/* When the RSS of an address space exceeds tsb_rss_limit for a TSB,
+ * do_sparc64_fault() invokes this routine to try and grow it.
+ *
+ * When we reach the maximum TSB size supported, we stick ~0UL into
+ * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault()
+ * will not trigger any longer.
+ *
+ * The TSB can be anywhere from 8K to 1MB in size, in increasing powers
+ * of two.  The TSB must be aligned to it's size, so f.e. a 512K TSB
+ * must be 512K aligned.  It also must be physically contiguous, so we
+ * cannot use vmalloc().
+ *
+ * The idea here is to grow the TSB when the RSS of the process approaches
+ * the number of entries that the current TSB can hold at once.  Currently,
+ * we trigger when the RSS hits 3/4 of the TSB capacity.
+ */
+void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss)
+{
+	unsigned long max_tsb_size = 1 * 1024 * 1024;
+	unsigned long new_size, old_size, flags;
+	struct tsb *old_tsb, *new_tsb;
+	unsigned long new_cache_index, old_cache_index;
+	unsigned long new_rss_limit;
+	gfp_t gfp_flags;
+
+	if (max_tsb_size > (PAGE_SIZE << MAX_ORDER))
+		max_tsb_size = (PAGE_SIZE << MAX_ORDER);
+
+	new_cache_index = 0;
+	for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) {
+		new_rss_limit = tsb_size_to_rss_limit(new_size);
+		if (new_rss_limit > rss)
+			break;
+		new_cache_index++;
+	}
+
+	if (new_size == max_tsb_size)
+		new_rss_limit = ~0UL;
+
+retry_tsb_alloc:
+	gfp_flags = GFP_KERNEL;
+	if (new_size > (PAGE_SIZE * 2))
+		gfp_flags |= __GFP_NOWARN | __GFP_NORETRY;
+
+	new_tsb = kmem_cache_alloc_node(tsb_caches[new_cache_index],
+					gfp_flags, numa_node_id());
+	if (unlikely(!new_tsb)) {
+		/* Not being able to fork due to a high-order TSB
+		 * allocation failure is very bad behavior.  Just back
+		 * down to a 0-order allocation and force no TSB
+		 * growing for this address space.
+		 */
+		if (mm->context.tsb_block[tsb_index].tsb == NULL &&
+		    new_cache_index > 0) {
+			new_cache_index = 0;
+			new_size = 8192;
+			new_rss_limit = ~0UL;
+			goto retry_tsb_alloc;
+		}
+
+		/* If we failed on a TSB grow, we are under serious
+		 * memory pressure so don't try to grow any more.
+		 */
+		if (mm->context.tsb_block[tsb_index].tsb != NULL)
+			mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL;
+		return;
+	}
+
+	/* Mark all tags as invalid.  */
+	tsb_init(new_tsb, new_size);
+
+	/* Ok, we are about to commit the changes.  If we are
+	 * growing an existing TSB the locking is very tricky,
+	 * so WATCH OUT!
+	 *
+	 * We have to hold mm->context.lock while committing to the
+	 * new TSB, this synchronizes us with processors in
+	 * flush_tsb_user() and switch_mm() for this address space.
+	 *
+	 * But even with that lock held, processors run asynchronously
+	 * accessing the old TSB via TLB miss handling.  This is OK
+	 * because those actions are just propagating state from the
+	 * Linux page tables into the TSB, page table mappings are not
+	 * being changed.  If a real fault occurs, the processor will
+	 * synchronize with us when it hits flush_tsb_user(), this is
+	 * also true for the case where vmscan is modifying the page
+	 * tables.  The only thing we need to be careful with is to
+	 * skip any locked TSB entries during copy_tsb().
+	 *
+	 * When we finish committing to the new TSB, we have to drop
+	 * the lock and ask all other cpus running this address space
+	 * to run tsb_context_switch() to see the new TSB table.
+	 */
+	spin_lock_irqsave(&mm->context.lock, flags);
+
+	old_tsb = mm->context.tsb_block[tsb_index].tsb;
+	old_cache_index =
+		(mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL);
+	old_size = (mm->context.tsb_block[tsb_index].tsb_nentries *
+		    sizeof(struct tsb));
+
+
+	/* Handle multiple threads trying to grow the TSB at the same time.
+	 * One will get in here first, and bump the size and the RSS limit.
+	 * The others will get in here next and hit this check.
+	 */
+	if (unlikely(old_tsb &&
+		     (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) {
+		spin_unlock_irqrestore(&mm->context.lock, flags);
+
+		kmem_cache_free(tsb_caches[new_cache_index], new_tsb);
+		return;
+	}
+
+	mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit;
+
+	if (old_tsb) {
+		extern void copy_tsb(unsigned long old_tsb_base,
+				     unsigned long old_tsb_size,
+				     unsigned long new_tsb_base,
+				     unsigned long new_tsb_size,
+				     unsigned long page_size_shift);
+		unsigned long old_tsb_base = (unsigned long) old_tsb;
+		unsigned long new_tsb_base = (unsigned long) new_tsb;
+
+		if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
+			old_tsb_base = __pa(old_tsb_base);
+			new_tsb_base = __pa(new_tsb_base);
+		}
+		copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+			tsb_index == MM_TSB_BASE ?
+			PAGE_SHIFT : REAL_HPAGE_SHIFT);
+	}
+
+	mm->context.tsb_block[tsb_index].tsb = new_tsb;
+	setup_tsb_params(mm, tsb_index, new_size);
+
+	spin_unlock_irqrestore(&mm->context.lock, flags);
+
+	/* If old_tsb is NULL, we're being invoked for the first time
+	 * from init_new_context().
+	 */
+	if (old_tsb) {
+		/* Reload it on the local cpu.  */
+		tsb_context_switch(mm);
+
+		/* Now force other processors to do the same.  */
+		preempt_disable();
+		smp_tsb_sync(mm);
+		preempt_enable();
+
+		/* Now it is safe to free the old tsb.  */
+		kmem_cache_free(tsb_caches[old_cache_index], old_tsb);
+	}
+}
+
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
+{
+	unsigned long mm_rss = get_mm_rss(mm);
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	unsigned long saved_hugetlb_pte_count;
+	unsigned long saved_thp_pte_count;
+#endif
+	unsigned int i;
+
+	spin_lock_init(&mm->context.lock);
+
+	mm->context.sparc64_ctx_val = 0UL;
+
+	mm->context.tag_store = NULL;
+	spin_lock_init(&mm->context.tag_lock);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	/* We reset them to zero because the fork() page copying
+	 * will re-increment the counters as the parent PTEs are
+	 * copied into the child address space.
+	 */
+	saved_hugetlb_pte_count = mm->context.hugetlb_pte_count;
+	saved_thp_pte_count = mm->context.thp_pte_count;
+	mm->context.hugetlb_pte_count = 0;
+	mm->context.thp_pte_count = 0;
+
+	mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE);
+#endif
+
+	/* copy_mm() copies over the parent's mm_struct before calling
+	 * us, so we need to zero out the TSB pointer or else tsb_grow()
+	 * will be confused and think there is an older TSB to free up.
+	 */
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		mm->context.tsb_block[i].tsb = NULL;
+
+	/* If this is fork, inherit the parent's TSB size.  We would
+	 * grow it to that size on the first page fault anyways.
+	 */
+	tsb_grow(mm, MM_TSB_BASE, mm_rss);
+
+#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
+	if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count))
+		tsb_grow(mm, MM_TSB_HUGE,
+			 (saved_hugetlb_pte_count + saved_thp_pte_count) *
+			 REAL_HPAGE_PER_HPAGE);
+#endif
+
+	if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void tsb_destroy_one(struct tsb_config *tp)
+{
+	unsigned long cache_index;
+
+	if (!tp->tsb)
+		return;
+	cache_index = tp->tsb_reg_val & 0x7UL;
+	kmem_cache_free(tsb_caches[cache_index], tp->tsb);
+	tp->tsb = NULL;
+	tp->tsb_reg_val = 0UL;
+}
+
+void destroy_context(struct mm_struct *mm)
+{
+	unsigned long flags, i;
+
+	for (i = 0; i < MM_NUM_TSBS; i++)
+		tsb_destroy_one(&mm->context.tsb_block[i]);
+
+	spin_lock_irqsave(&ctx_alloc_lock, flags);
+
+	if (CTX_VALID(mm->context)) {
+		unsigned long nr = CTX_NRBITS(mm->context);
+		mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
+	}
+
+	spin_unlock_irqrestore(&ctx_alloc_lock, flags);
+
+	/* If ADI tag storage was allocated for this task, free it */
+	if (mm->context.tag_store) {
+		tag_storage_desc_t *tag_desc;
+		unsigned long max_desc;
+		unsigned char *tags;
+
+		tag_desc = mm->context.tag_store;
+		max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t);
+		for (i = 0; i < max_desc; i++) {
+			tags = tag_desc->tags;
+			tag_desc->tags = NULL;
+			kfree(tags);
+			tag_desc++;
+		}
+		kfree(mm->context.tag_store);
+		mm->context.tag_store = NULL;
+	}
+}
diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S
new file mode 100644
index 0000000..62b742d
--- /dev/null
+++ b/arch/sparc/mm/tsunami.S
@@ -0,0 +1,132 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tsunami.S: High speed MicroSparc-I mmu/cache operations.
+ *
+ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/ptrace.h>
+#include <asm/asm-offsets.h>
+#include <asm/psr.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+
+	.text
+	.align	4
+
+	.globl	tsunami_flush_cache_all, tsunami_flush_cache_mm
+	.globl	tsunami_flush_cache_range, tsunami_flush_cache_page
+	.globl	tsunami_flush_page_to_ram, tsunami_flush_page_for_dma
+	.globl	tsunami_flush_sig_insns
+	.globl	tsunami_flush_tlb_all, tsunami_flush_tlb_mm
+	.globl	tsunami_flush_tlb_range, tsunami_flush_tlb_page
+
+	/* Sliiick... */
+tsunami_flush_cache_page:
+tsunami_flush_cache_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+tsunami_flush_cache_mm:
+	ld	[%o0 + AOFF_mm_context], %g2
+	cmp	%g2, -1
+	be	tsunami_flush_cache_out
+tsunami_flush_cache_all:
+	WINDOW_FLUSH(%g4, %g5)
+tsunami_flush_page_for_dma:
+	sta	%g0, [%g0] ASI_M_IC_FLCLEAR
+	sta	%g0, [%g0] ASI_M_DC_FLCLEAR
+tsunami_flush_cache_out:
+tsunami_flush_page_to_ram:
+	retl
+	 nop
+
+tsunami_flush_sig_insns:
+	flush	%o1
+	retl
+	 flush	%o1 + 4
+
+	/* More slick stuff... */
+tsunami_flush_tlb_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+tsunami_flush_tlb_mm:
+	ld	[%o0 + AOFF_mm_context], %g2
+	cmp	%g2, -1
+	be	tsunami_flush_tlb_out
+tsunami_flush_tlb_all:
+	 mov	0x400, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	nop
+	nop
+	nop
+	nop
+	nop
+tsunami_flush_tlb_out:
+	retl
+	 nop
+
+	/* This one can be done in a fine grained manner... */
+tsunami_flush_tlb_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	andn	%o1, (PAGE_SIZE - 1), %o1
+	cmp	%o3, -1
+	be	tsunami_flush_tlb_page_out
+	 lda	[%g1] ASI_M_MMUREGS, %g5
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	nop
+	nop
+	nop
+	nop
+	nop
+tsunami_flush_tlb_page_out:
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+
+#define MIRROR_BLOCK(dst, src, offset, t0, t1, t2, t3) \
+	ldd	[src + offset + 0x18], t0; \
+	std	t0, [dst + offset + 0x18]; \
+	ldd	[src + offset + 0x10], t2; \
+	std	t2, [dst + offset + 0x10]; \
+	ldd	[src + offset + 0x08], t0; \
+	std	t0, [dst + offset + 0x08]; \
+	ldd	[src + offset + 0x00], t2; \
+	std	t2, [dst + offset + 0x00];
+
+tsunami_copy_1page:
+/* NOTE: This routine has to be shorter than 70insns --jj */
+	or	%g0, (PAGE_SIZE >> 8), %g1
+1:
+	MIRROR_BLOCK(%o0, %o1, 0x00, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0x20, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0x40, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0x60, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0x80, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0xa0, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0xc0, %o2, %o3, %o4, %o5)
+	MIRROR_BLOCK(%o0, %o1, 0xe0, %o2, %o3, %o4, %o5)
+	subcc	%g1, 1, %g1
+	add	%o0, 0x100, %o0
+	bne	1b
+	 add	%o1, 0x100, %o1
+
+	.globl	tsunami_setup_blockops
+tsunami_setup_blockops:
+	sethi	%hi(__copy_1page), %o0
+	or	%o0, %lo(__copy_1page), %o0
+	sethi	%hi(tsunami_copy_1page), %o1
+	or	%o1, %lo(tsunami_copy_1page), %o1
+	sethi	%hi(tsunami_setup_blockops), %o2
+	or	%o2, %lo(tsunami_setup_blockops), %o2
+	ld	[%o1], %o4
+1:	add	%o1, 4, %o1
+	st	%o4, [%o0]
+	add	%o0, 4, %o0
+	cmp	%o1, %o2
+	bne	1b
+	ld	[%o1], %o4
+	sta	%g0, [%g0] ASI_M_IC_FLCLEAR
+	sta	%g0, [%g0] ASI_M_DC_FLCLEAR
+	retl
+	 nop
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
new file mode 100644
index 0000000..d245f89
--- /dev/null
+++ b/arch/sparc/mm/ultra.S
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ultra.S: Don't expand these all over the place...
+ *
+ * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
+ */
+
+#include <asm/asi.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/spitfire.h>
+#include <asm/mmu_context.h>
+#include <asm/mmu.h>
+#include <asm/pil.h>
+#include <asm/head.h>
+#include <asm/thread_info.h>
+#include <asm/cacheflush.h>
+#include <asm/hypervisor.h>
+#include <asm/cpudata.h>
+
+	/* Basically, most of the Spitfire vs. Cheetah madness
+	 * has to do with the fact that Cheetah does not support
+	 * IMMU flushes out of the secondary context.  Someone needs
+	 * to throw a south lake birthday party for the folks
+	 * in Microelectronics who refused to fix this shit.
+	 */
+
+	/* This file is meant to be read efficiently by the CPU, not humans.
+	 * Staraj sie tego nikomu nie pierdolnac...
+	 */
+	.text
+	.align		32
+	.globl		__flush_tlb_mm
+__flush_tlb_mm:		/* 19 insns */
+	/* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
+	ldxa		[%o1] ASI_DMMU, %g2
+	cmp		%g2, %o0
+	bne,pn		%icc, __spitfire_flush_tlb_mm_slow
+	 mov		0x50, %g3
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	sethi		%hi(KERNBASE), %g3
+	flush		%g3
+	retl
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	.align		32
+	.globl		__flush_tlb_page
+__flush_tlb_page:	/* 22 insns */
+	/* %o0 = context, %o1 = vaddr */
+	rdpr		%pstate, %g7
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, %pstate
+	mov		SECONDARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	stxa		%o0, [%o4] ASI_DMMU
+	andcc		%o1, 1, %g0
+	andn		%o1, 1, %o3
+	be,pn		%icc, 1f
+	 or		%o3, 0x10, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+1:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
+	membar		#Sync
+	stxa		%g2, [%o4] ASI_DMMU
+	sethi		%hi(KERNBASE), %o4
+	flush		%o4
+	retl
+	 wrpr		%g7, 0x0, %pstate
+	nop
+	nop
+	nop
+	nop
+
+	.align		32
+	.globl		__flush_tlb_pending
+__flush_tlb_pending:	/* 27 insns */
+	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+	rdpr		%pstate, %g7
+	sllx		%o1, 3, %o1
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, %pstate
+	mov		SECONDARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	stxa		%o0, [%o4] ASI_DMMU
+1:	sub		%o1, (1 << 3), %o1
+	ldx		[%o2 + %o1], %o3
+	andcc		%o3, 1, %g0
+	andn		%o3, 1, %o3
+	be,pn		%icc, 2f
+	 or		%o3, 0x10, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%o1, 1b
+	 nop
+	stxa		%g2, [%o4] ASI_DMMU
+	sethi		%hi(KERNBASE), %o4
+	flush		%o4
+	retl
+	 wrpr		%g7, 0x0, %pstate
+	nop
+	nop
+	nop
+	nop
+
+	.align		32
+	.globl		__flush_tlb_kernel_range
+__flush_tlb_kernel_range:	/* 31 insns */
+	/* %o0=start, %o1=end */
+	cmp		%o0, %o1
+	be,pn		%xcc, 2f
+	 sub		%o1, %o0, %o3
+	srlx		%o3, 18, %o4
+	brnz,pn		%o4, __spitfire_flush_tlb_kernel_range_slow
+	 sethi		%hi(PAGE_SIZE), %o4
+	sub		%o3, %o4, %o3
+	or		%o0, 0x20, %o0		! Nucleus
+1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
+	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%o3, 1b
+	 sub		%o3, %o4, %o3
+2:	sethi		%hi(KERNBASE), %o3
+	flush		%o3
+	retl
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+__spitfire_flush_tlb_kernel_range_slow:
+	mov		63 * 8, %o4
+1:	ldxa		[%o4] ASI_ITLB_DATA_ACCESS, %o3
+	andcc		%o3, 0x40, %g0			/* _PAGE_L_4U */
+	bne,pn		%xcc, 2f
+	 mov		TLB_TAG_ACCESS, %o3
+	stxa		%g0, [%o3] ASI_IMMU
+	stxa		%g0, [%o4] ASI_ITLB_DATA_ACCESS
+	membar		#Sync
+2:	ldxa		[%o4] ASI_DTLB_DATA_ACCESS, %o3
+	andcc		%o3, 0x40, %g0
+	bne,pn		%xcc, 2f
+	 mov		TLB_TAG_ACCESS, %o3
+	stxa		%g0, [%o3] ASI_DMMU
+	stxa		%g0, [%o4] ASI_DTLB_DATA_ACCESS
+	membar		#Sync
+2:	sub		%o4, 8, %o4
+	brgez,pt	%o4, 1b
+	 nop
+	retl
+	 nop
+
+__spitfire_flush_tlb_mm_slow:
+	rdpr		%pstate, %g1
+	wrpr		%g1, PSTATE_IE, %pstate
+	stxa		%o0, [%o1] ASI_DMMU
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	flush		%g6
+	stxa		%g2, [%o1] ASI_DMMU
+	sethi		%hi(KERNBASE), %o1
+	flush		%o1
+	retl
+	 wrpr		%g1, 0, %pstate
+
+/*
+ * The following code flushes one page_size worth.
+ */
+	.section .kprobes.text, "ax"
+	.align		32
+	.globl		__flush_icache_page
+__flush_icache_page:	/* %o0 = phys_page */
+	srlx		%o0, PAGE_SHIFT, %o0
+	sethi		%hi(PAGE_OFFSET), %g1
+	sllx		%o0, PAGE_SHIFT, %o0
+	sethi		%hi(PAGE_SIZE), %g2
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+	add		%o0, %g1, %o0
+1:	subcc		%g2, 32, %g2
+	bne,pt		%icc, 1b
+	 flush		%o0 + %g2
+	retl
+	 nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+
+#if (PAGE_SHIFT != 13)
+#error only page shift of 13 is supported by dcache flush
+#endif
+
+#define DTAG_MASK 0x3
+
+	/* This routine is Spitfire specific so the hardcoded
+	 * D-cache size and line-size are OK.
+	 */
+	.align		64
+	.globl		__flush_dcache_page
+__flush_dcache_page:	/* %o0=kaddr, %o1=flush_icache */
+	sethi		%hi(PAGE_OFFSET), %g1
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+	sub		%o0, %g1, %o0			! physical address
+	srlx		%o0, 11, %o0			! make D-cache TAG
+	sethi		%hi(1 << 14), %o2		! D-cache size
+	sub		%o2, (1 << 5), %o2		! D-cache line size
+1:	ldxa		[%o2] ASI_DCACHE_TAG, %o3	! load D-cache TAG
+	andcc		%o3, DTAG_MASK, %g0		! Valid?
+	be,pn		%xcc, 2f			! Nope, branch
+	 andn		%o3, DTAG_MASK, %o3		! Clear valid bits
+	cmp		%o3, %o0			! TAG match?
+	bne,pt		%xcc, 2f			! Nope, branch
+	 nop
+	stxa		%g0, [%o2] ASI_DCACHE_TAG	! Invalidate TAG
+	membar		#Sync
+2:	brnz,pt		%o2, 1b
+	 sub		%o2, (1 << 5), %o2		! D-cache line size
+
+	/* The I-cache does not snoop local stores so we
+	 * better flush that too when necessary.
+	 */
+	brnz,pt		%o1, __flush_icache_page
+	 sllx		%o0, 11, %o0
+	retl
+	 nop
+
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	.previous
+
+	/* Cheetah specific versions, patched at boot time. */
+__cheetah_flush_tlb_mm: /* 19 insns */
+	rdpr		%pstate, %g7
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, 0x0, %pstate
+	wrpr		%g0, 1, %tl
+	mov		PRIMARY_CONTEXT, %o2
+	mov		0x40, %g3
+	ldxa		[%o2] ASI_DMMU, %g2
+	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o1
+	sllx		%o1, CTX_PGSZ1_NUC_SHIFT, %o1
+	or		%o0, %o1, %o0	/* Preserve nucleus page size fields */
+	stxa		%o0, [%o2] ASI_DMMU
+	stxa		%g0, [%g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g3] ASI_IMMU_DEMAP
+	stxa		%g2, [%o2] ASI_DMMU
+	sethi		%hi(KERNBASE), %o2
+	flush		%o2
+	wrpr		%g0, 0, %tl
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+__cheetah_flush_tlb_page:	/* 22 insns */
+	/* %o0 = context, %o1 = vaddr */
+	rdpr		%pstate, %g7
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, 0x0, %pstate
+	wrpr		%g0, 1, %tl
+	mov		PRIMARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
+	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
+	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
+	stxa		%o0, [%o4] ASI_DMMU
+	andcc		%o1, 1, %g0
+	be,pn		%icc, 1f
+	 andn		%o1, 1, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+1:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
+	membar		#Sync
+	stxa		%g2, [%o4] ASI_DMMU
+	sethi		%hi(KERNBASE), %o4
+	flush		%o4
+	wrpr		%g0, 0, %tl
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+__cheetah_flush_tlb_pending:	/* 27 insns */
+	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+	rdpr		%pstate, %g7
+	sllx		%o1, 3, %o1
+	andn		%g7, PSTATE_IE, %g2
+	wrpr		%g2, 0x0, %pstate
+	wrpr		%g0, 1, %tl
+	mov		PRIMARY_CONTEXT, %o4
+	ldxa		[%o4] ASI_DMMU, %g2
+	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %o3
+	sllx		%o3, CTX_PGSZ1_NUC_SHIFT, %o3
+	or		%o0, %o3, %o0	/* Preserve nucleus page size fields */
+	stxa		%o0, [%o4] ASI_DMMU
+1:	sub		%o1, (1 << 3), %o1
+	ldx		[%o2 + %o1], %o3
+	andcc		%o3, 1, %g0
+	be,pn		%icc, 2f
+	 andn		%o3, 1, %o3
+	stxa		%g0, [%o3] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%o3] ASI_DMMU_DEMAP	
+	membar		#Sync
+	brnz,pt		%o1, 1b
+	 nop
+	stxa		%g2, [%o4] ASI_DMMU
+	sethi		%hi(KERNBASE), %o4
+	flush		%o4
+	wrpr		%g0, 0, %tl
+	retl
+	 wrpr		%g7, 0x0, %pstate
+
+__cheetah_flush_tlb_kernel_range:	/* 31 insns */
+	/* %o0=start, %o1=end */
+	cmp		%o0, %o1
+	be,pn		%xcc, 2f
+	 sub		%o1, %o0, %o3
+	srlx		%o3, 18, %o4
+	brnz,pn		%o4, 3f
+	 sethi		%hi(PAGE_SIZE), %o4
+	sub		%o3, %o4, %o3
+	or		%o0, 0x20, %o0		! Nucleus
+1:	stxa		%g0, [%o0 + %o3] ASI_DMMU_DEMAP
+	stxa		%g0, [%o0 + %o3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%o3, 1b
+	 sub		%o3, %o4, %o3
+2:	sethi		%hi(KERNBASE), %o3
+	flush		%o3
+	retl
+	 nop
+3:	mov		0x80, %o4
+	stxa		%g0, [%o4] ASI_DMMU_DEMAP
+	membar		#Sync
+	stxa		%g0, [%o4] ASI_IMMU_DEMAP
+	membar		#Sync
+	retl
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+__cheetah_flush_dcache_page: /* 11 insns */
+	sethi		%hi(PAGE_OFFSET), %g1
+	ldx		[%g1 + %lo(PAGE_OFFSET)], %g1
+	sub		%o0, %g1, %o0
+	sethi		%hi(PAGE_SIZE), %o4
+1:	subcc		%o4, (1 << 5), %o4
+	stxa		%g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
+	membar		#Sync
+	bne,pt		%icc, 1b
+	 nop
+	retl		/* I-cache flush never needed on Cheetah, see callers. */
+	 nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	/* Hypervisor specific versions, patched at boot time.  */
+__hypervisor_tlb_tl0_error:
+	save		%sp, -192, %sp
+	mov		%i0, %o0
+	call		hypervisor_tlbop_error
+	 mov		%i1, %o1
+	ret
+	 restore
+
+__hypervisor_flush_tlb_mm: /* 19 insns */
+	mov		%o0, %o2	/* ARG2: mmu context */
+	mov		0, %o0		/* ARG0: CPU lists unimplemented */
+	mov		0, %o1		/* ARG1: CPU lists unimplemented */
+	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
+	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+	ta		HV_FAST_TRAP
+	brnz,pn		%o0, 1f
+	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
+	retl
+	 nop
+1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o5
+	jmpl		%o5 + %lo(__hypervisor_tlb_tl0_error), %g0
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+__hypervisor_flush_tlb_page: /* 22 insns */
+	/* %o0 = context, %o1 = vaddr */
+	mov		%o0, %g2
+	mov		%o1, %o0              /* ARG0: vaddr + IMMU-bit */
+	mov		%g2, %o1	      /* ARG1: mmu context */
+	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
+	srlx		%o0, PAGE_SHIFT, %o0
+	sllx		%o0, PAGE_SHIFT, %o0
+	ta		HV_MMU_UNMAP_ADDR_TRAP
+	brnz,pn		%o0, 1f
+	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+	retl
+	 nop
+1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
+	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+__hypervisor_flush_tlb_pending: /* 27 insns */
+	/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
+	sllx		%o1, 3, %g1
+	mov		%o2, %g2
+	mov		%o0, %g3
+1:	sub		%g1, (1 << 3), %g1
+	ldx		[%g2 + %g1], %o0      /* ARG0: vaddr + IMMU-bit */
+	mov		%g3, %o1	      /* ARG1: mmu context */
+	mov		HV_MMU_ALL, %o2	      /* ARG2: flags */
+	srlx		%o0, PAGE_SHIFT, %o0
+	sllx		%o0, PAGE_SHIFT, %o0
+	ta		HV_MMU_UNMAP_ADDR_TRAP
+	brnz,pn		%o0, 1f
+	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+	brnz,pt		%g1, 1b
+	 nop
+	retl
+	 nop
+1:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
+	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+	 nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+__hypervisor_flush_tlb_kernel_range: /* 31 insns */
+	/* %o0=start, %o1=end */
+	cmp		%o0, %o1
+	be,pn		%xcc, 2f
+	 sub		%o1, %o0, %g2
+	srlx		%g2, 18, %g3
+	brnz,pn		%g3, 4f
+	 mov		%o0, %g1
+	sethi		%hi(PAGE_SIZE), %g3
+	sub		%g2, %g3, %g2
+1:	add		%g1, %g2, %o0	/* ARG0: virtual address */
+	mov		0, %o1		/* ARG1: mmu context */
+	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
+	ta		HV_MMU_UNMAP_ADDR_TRAP
+	brnz,pn		%o0, 3f
+	 mov		HV_MMU_UNMAP_ADDR_TRAP, %o1
+	brnz,pt		%g2, 1b
+	 sub		%g2, %g3, %g2
+2:	retl
+	 nop
+3:	sethi		%hi(__hypervisor_tlb_tl0_error), %o2
+	jmpl		%o2 + %lo(__hypervisor_tlb_tl0_error), %g0
+	 nop
+4:	mov		0, %o0		/* ARG0: CPU lists unimplemented */
+	mov		0, %o1		/* ARG1: CPU lists unimplemented */
+	mov		0, %o2		/* ARG2: mmu context == nucleus */
+	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
+	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+	ta		HV_FAST_TRAP
+	brnz,pn		%o0, 3b
+	 mov		HV_FAST_MMU_DEMAP_CTX, %o1
+	retl
+	 nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	/* XXX Niagara and friends have an 8K cache, so no aliasing is
+	 * XXX possible, but nothing explicit in the Hypervisor API
+	 * XXX guarantees this.
+	 */
+__hypervisor_flush_dcache_page:	/* 2 insns */
+	retl
+	 nop
+#endif
+
+tlb_patch_one:
+1:	lduw		[%o1], %g1
+	stw		%g1, [%o0]
+	flush		%o0
+	subcc		%o2, 1, %o2
+	add		%o1, 4, %o1
+	bne,pt		%icc, 1b
+	 add		%o0, 4, %o0
+	retl
+	 nop
+
+#ifdef CONFIG_SMP
+	/* These are all called by the slaves of a cross call, at
+	 * trap level 1, with interrupts fully disabled.
+	 *
+	 * Register usage:
+	 *   %g5	mm->context	(all tlb flushes)
+	 *   %g1	address arg 1	(tlb page and range flushes)
+	 *   %g7	address arg 2	(tlb range flush only)
+	 *
+	 *   %g6	scratch 1
+	 *   %g2	scratch 2
+	 *   %g3	scratch 3
+	 *   %g4	scratch 4
+	 */
+	.align		32
+	.globl		xcall_flush_tlb_mm
+xcall_flush_tlb_mm:	/* 24 insns */
+	mov		PRIMARY_CONTEXT, %g2
+	ldxa		[%g2] ASI_DMMU, %g3
+	srlx		%g3, CTX_PGSZ1_NUC_SHIFT, %g4
+	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
+	or		%g5, %g4, %g5	/* Preserve nucleus page size fields */
+	stxa		%g5, [%g2] ASI_DMMU
+	mov		0x40, %g4
+	stxa		%g0, [%g4] ASI_DMMU_DEMAP
+	stxa		%g0, [%g4] ASI_IMMU_DEMAP
+	stxa		%g3, [%g2] ASI_DMMU
+	retry
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	.globl		xcall_flush_tlb_page
+xcall_flush_tlb_page:	/* 20 insns */
+	/* %g5=context, %g1=vaddr */
+	mov		PRIMARY_CONTEXT, %g4
+	ldxa		[%g4] ASI_DMMU, %g2
+	srlx		%g2, CTX_PGSZ1_NUC_SHIFT, %g4
+	sllx		%g4, CTX_PGSZ1_NUC_SHIFT, %g4
+	or		%g5, %g4, %g5
+	mov		PRIMARY_CONTEXT, %g4
+	stxa		%g5, [%g4] ASI_DMMU
+	andcc		%g1, 0x1, %g0
+	be,pn		%icc, 2f
+	 andn		%g1, 0x1, %g5
+	stxa		%g0, [%g5] ASI_IMMU_DEMAP
+2:	stxa		%g0, [%g5] ASI_DMMU_DEMAP
+	membar		#Sync
+	stxa		%g2, [%g4] ASI_DMMU
+	retry
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	.globl		xcall_flush_tlb_kernel_range
+xcall_flush_tlb_kernel_range:	/* 44 insns */
+	sethi		%hi(PAGE_SIZE - 1), %g2
+	or		%g2, %lo(PAGE_SIZE - 1), %g2
+	andn		%g1, %g2, %g1
+	andn		%g7, %g2, %g7
+	sub		%g7, %g1, %g3
+	srlx		%g3, 18, %g2
+	brnz,pn		%g2, 2f
+	 add		%g2, 1, %g2
+	sub		%g3, %g2, %g3
+	or		%g1, 0x20, %g1		! Nucleus
+1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%g3, 1b
+	 sub		%g3, %g2, %g3
+	retry
+2:	mov		63 * 8, %g1
+1:	ldxa		[%g1] ASI_ITLB_DATA_ACCESS, %g2
+	andcc		%g2, 0x40, %g0			/* _PAGE_L_4U */
+	bne,pn		%xcc, 2f
+	 mov		TLB_TAG_ACCESS, %g2
+	stxa		%g0, [%g2] ASI_IMMU
+	stxa		%g0, [%g1] ASI_ITLB_DATA_ACCESS
+	membar		#Sync
+2:	ldxa		[%g1] ASI_DTLB_DATA_ACCESS, %g2
+	andcc		%g2, 0x40, %g0
+	bne,pn		%xcc, 2f
+	 mov		TLB_TAG_ACCESS, %g2
+	stxa		%g0, [%g2] ASI_DMMU
+	stxa		%g0, [%g1] ASI_DTLB_DATA_ACCESS
+	membar		#Sync
+2:	sub		%g1, 8, %g1
+	brgez,pt	%g1, 1b
+	 nop
+	retry
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+	/* This runs in a very controlled environment, so we do
+	 * not need to worry about BH races etc.
+	 */
+	.globl		xcall_sync_tick
+xcall_sync_tick:
+
+661:	rdpr		%pstate, %g2
+	wrpr		%g2, PSTATE_IG | PSTATE_AG, %pstate
+	.section	.sun4v_2insn_patch, "ax"
+	.word		661b
+	nop
+	nop
+	.previous
+
+	rdpr		%pil, %g2
+	wrpr		%g0, PIL_NORMAL_MAX, %pil
+	sethi		%hi(109f), %g7
+	b,pt		%xcc, etrap_irq
+109:	 or		%g7, %lo(109b), %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+	call		trace_hardirqs_off
+	 nop
+#endif
+	call		smp_synchronize_tick_client
+	 nop
+	b		rtrap_xcall
+	 ldx		[%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+
+	.globl		xcall_fetch_glob_regs
+xcall_fetch_glob_regs:
+	sethi		%hi(global_cpu_snapshot), %g1
+	or		%g1, %lo(global_cpu_snapshot), %g1
+	__GET_CPUID(%g2)
+	sllx		%g2, 6, %g3
+	add		%g1, %g3, %g1
+	rdpr		%tstate, %g7
+	stx		%g7, [%g1 + GR_SNAP_TSTATE]
+	rdpr		%tpc, %g7
+	stx		%g7, [%g1 + GR_SNAP_TPC]
+	rdpr		%tnpc, %g7
+	stx		%g7, [%g1 + GR_SNAP_TNPC]
+	stx		%o7, [%g1 + GR_SNAP_O7]
+	stx		%i7, [%g1 + GR_SNAP_I7]
+	/* Don't try this at home kids... */
+	rdpr		%cwp, %g3
+	sub		%g3, 1, %g7
+	wrpr		%g7, %cwp
+	mov		%i7, %g7
+	wrpr		%g3, %cwp
+	stx		%g7, [%g1 + GR_SNAP_RPC]
+	sethi		%hi(trap_block), %g7
+	or		%g7, %lo(trap_block), %g7
+	sllx		%g2, TRAP_BLOCK_SZ_SHIFT, %g2
+	add		%g7, %g2, %g7
+	ldx		[%g7 + TRAP_PER_CPU_THREAD], %g3
+	stx		%g3, [%g1 + GR_SNAP_THREAD]
+	retry
+
+	.globl		xcall_fetch_glob_pmu
+xcall_fetch_glob_pmu:
+	sethi		%hi(global_cpu_snapshot), %g1
+	or		%g1, %lo(global_cpu_snapshot), %g1
+	__GET_CPUID(%g2)
+	sllx		%g2, 6, %g3
+	add		%g1, %g3, %g1
+	rd		%pic, %g7
+	stx		%g7, [%g1 + (4 * 8)]
+	rd		%pcr, %g7
+	stx		%g7, [%g1 + (0 * 8)]
+	retry
+
+	.globl		xcall_fetch_glob_pmu_n4
+xcall_fetch_glob_pmu_n4:
+	sethi		%hi(global_cpu_snapshot), %g1
+	or		%g1, %lo(global_cpu_snapshot), %g1
+	__GET_CPUID(%g2)
+	sllx		%g2, 6, %g3
+	add		%g1, %g3, %g1
+
+	ldxa		[%g0] ASI_PIC, %g7
+	stx		%g7, [%g1 + (4 * 8)]
+	mov		0x08, %g3
+	ldxa		[%g3] ASI_PIC, %g7
+	stx		%g7, [%g1 + (5 * 8)]
+	mov		0x10, %g3
+	ldxa		[%g3] ASI_PIC, %g7
+	stx		%g7, [%g1 + (6 * 8)]
+	mov		0x18, %g3
+	ldxa		[%g3] ASI_PIC, %g7
+	stx		%g7, [%g1 + (7 * 8)]
+
+	mov		%o0, %g2
+	mov		%o1, %g3
+	mov		%o5, %g7
+
+	mov		HV_FAST_VT_GET_PERFREG, %o5
+	mov		3, %o0
+	ta		HV_FAST_TRAP
+	stx		%o1, [%g1 + (3 * 8)]
+	mov		HV_FAST_VT_GET_PERFREG, %o5
+	mov		2, %o0
+	ta		HV_FAST_TRAP
+	stx		%o1, [%g1 + (2 * 8)]
+	mov		HV_FAST_VT_GET_PERFREG, %o5
+	mov		1, %o0
+	ta		HV_FAST_TRAP
+	stx		%o1, [%g1 + (1 * 8)]
+	mov		HV_FAST_VT_GET_PERFREG, %o5
+	mov		0, %o0
+	ta		HV_FAST_TRAP
+	stx		%o1, [%g1 + (0 * 8)]
+
+	mov		%g2, %o0
+	mov		%g3, %o1
+	mov		%g7, %o5
+
+	retry
+
+__cheetah_xcall_flush_tlb_kernel_range:	/* 44 insns */
+	sethi		%hi(PAGE_SIZE - 1), %g2
+	or		%g2, %lo(PAGE_SIZE - 1), %g2
+	andn		%g1, %g2, %g1
+	andn		%g7, %g2, %g7
+	sub		%g7, %g1, %g3
+	srlx		%g3, 18, %g2
+	brnz,pn		%g2, 2f
+	 add		%g2, 1, %g2
+	sub		%g3, %g2, %g3
+	or		%g1, 0x20, %g1		! Nucleus
+1:	stxa		%g0, [%g1 + %g3] ASI_DMMU_DEMAP
+	stxa		%g0, [%g1 + %g3] ASI_IMMU_DEMAP
+	membar		#Sync
+	brnz,pt		%g3, 1b
+	 sub		%g3, %g2, %g3
+	retry
+2:	mov		0x80, %g2
+	stxa		%g0, [%g2] ASI_DMMU_DEMAP
+	membar		#Sync
+	stxa		%g0, [%g2] ASI_IMMU_DEMAP
+	membar		#Sync
+	retry
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	.align		32
+	.globl		xcall_flush_dcache_page_cheetah
+xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
+	sethi		%hi(PAGE_SIZE), %g3
+1:	subcc		%g3, (1 << 5), %g3
+	stxa		%g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
+	membar		#Sync
+	bne,pt		%icc, 1b
+	 nop
+	retry
+	nop
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+	.globl		xcall_flush_dcache_page_spitfire
+xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
+				     %g7 == kernel page virtual address
+				     %g5 == (page->mapping != NULL)  */
+#ifdef DCACHE_ALIASING_POSSIBLE
+	srlx		%g1, (13 - 2), %g1	! Form tag comparitor
+	sethi		%hi(L1DCACHE_SIZE), %g3	! D$ size == 16K
+	sub		%g3, (1 << 5), %g3	! D$ linesize == 32
+1:	ldxa		[%g3] ASI_DCACHE_TAG, %g2
+	andcc		%g2, 0x3, %g0
+	be,pn		%xcc, 2f
+	 andn		%g2, 0x3, %g2
+	cmp		%g2, %g1
+
+	bne,pt		%xcc, 2f
+	 nop
+	stxa		%g0, [%g3] ASI_DCACHE_TAG
+	membar		#Sync
+2:	cmp		%g3, 0
+	bne,pt		%xcc, 1b
+	 sub		%g3, (1 << 5), %g3
+
+	brz,pn		%g5, 2f
+#endif /* DCACHE_ALIASING_POSSIBLE */
+	 sethi		%hi(PAGE_SIZE), %g3
+
+1:	flush		%g7
+	subcc		%g3, (1 << 5), %g3
+	bne,pt		%icc, 1b
+	 add		%g7, (1 << 5), %g7
+
+2:	retry
+	nop
+	nop
+
+	/* %g5:	error
+	 * %g6:	tlb op
+	 */
+__hypervisor_tlb_xcall_error:
+	mov	%g5, %g4
+	mov	%g6, %g5
+	ba,pt	%xcc, etrap
+	 rd	%pc, %g7
+	mov	%l4, %o0
+	call	hypervisor_tlbop_error_xcall
+	 mov	%l5, %o1
+	ba,a,pt	%xcc, rtrap
+
+	.globl		__hypervisor_xcall_flush_tlb_mm
+__hypervisor_xcall_flush_tlb_mm: /* 24 insns */
+	/* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
+	mov		%o0, %g2
+	mov		%o1, %g3
+	mov		%o2, %g4
+	mov		%o3, %g1
+	mov		%o5, %g7
+	clr		%o0		/* ARG0: CPU lists unimplemented */
+	clr		%o1		/* ARG1: CPU lists unimplemented */
+	mov		%g5, %o2	/* ARG2: mmu context */
+	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
+	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+	ta		HV_FAST_TRAP
+	mov		HV_FAST_MMU_DEMAP_CTX, %g6
+	brnz,pn		%o0, 1f
+	 mov		%o0, %g5
+	mov		%g2, %o0
+	mov		%g3, %o1
+	mov		%g4, %o2
+	mov		%g1, %o3
+	mov		%g7, %o5
+	membar		#Sync
+	retry
+1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
+	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+	 nop
+
+	.globl		__hypervisor_xcall_flush_tlb_page
+__hypervisor_xcall_flush_tlb_page: /* 20 insns */
+	/* %g5=ctx, %g1=vaddr */
+	mov		%o0, %g2
+	mov		%o1, %g3
+	mov		%o2, %g4
+	mov		%g1, %o0	        /* ARG0: virtual address */
+	mov		%g5, %o1		/* ARG1: mmu context */
+	mov		HV_MMU_ALL, %o2		/* ARG2: flags */
+	srlx		%o0, PAGE_SHIFT, %o0
+	sllx		%o0, PAGE_SHIFT, %o0
+	ta		HV_MMU_UNMAP_ADDR_TRAP
+	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
+	brnz,a,pn	%o0, 1f
+	 mov		%o0, %g5
+	mov		%g2, %o0
+	mov		%g3, %o1
+	mov		%g4, %o2
+	membar		#Sync
+	retry
+1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
+	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+	 nop
+
+	.globl		__hypervisor_xcall_flush_tlb_kernel_range
+__hypervisor_xcall_flush_tlb_kernel_range: /* 44 insns */
+	/* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
+	sethi		%hi(PAGE_SIZE - 1), %g2
+	or		%g2, %lo(PAGE_SIZE - 1), %g2
+	andn		%g1, %g2, %g1
+	andn		%g7, %g2, %g7
+	sub		%g7, %g1, %g3
+	srlx		%g3, 18, %g7
+	add		%g2, 1, %g2
+	sub		%g3, %g2, %g3
+	mov		%o0, %g2
+	mov		%o1, %g4
+	brnz,pn		%g7, 2f
+	 mov		%o2, %g7
+1:	add		%g1, %g3, %o0	/* ARG0: virtual address */
+	mov		0, %o1		/* ARG1: mmu context */
+	mov		HV_MMU_ALL, %o2	/* ARG2: flags */
+	ta		HV_MMU_UNMAP_ADDR_TRAP
+	mov		HV_MMU_UNMAP_ADDR_TRAP, %g6
+	brnz,pn		%o0, 1f
+	 mov		%o0, %g5
+	sethi		%hi(PAGE_SIZE), %o2
+	brnz,pt		%g3, 1b
+	 sub		%g3, %o2, %g3
+5:	mov		%g2, %o0
+	mov		%g4, %o1
+	mov		%g7, %o2
+	membar		#Sync
+	retry
+1:	sethi		%hi(__hypervisor_tlb_xcall_error), %g4
+	jmpl		%g4 + %lo(__hypervisor_tlb_xcall_error), %g0
+	 nop
+2:	mov		%o3, %g1
+	mov		%o5, %g3
+	mov		0, %o0		/* ARG0: CPU lists unimplemented */
+	mov		0, %o1		/* ARG1: CPU lists unimplemented */
+	mov		0, %o2		/* ARG2: mmu context == nucleus */
+	mov		HV_MMU_ALL, %o3	/* ARG3: flags */
+	mov		HV_FAST_MMU_DEMAP_CTX, %o5
+	ta		HV_FAST_TRAP
+	mov		%g1, %o3
+	brz,pt		%o0, 5b
+	 mov		%g3, %o5
+	mov		HV_FAST_MMU_DEMAP_CTX, %g6
+	ba,pt		%xcc, 1b
+	 clr		%g5
+
+	/* These just get rescheduled to PIL vectors. */
+	.globl		xcall_call_function
+xcall_call_function:
+	wr		%g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
+	retry
+
+	.globl		xcall_call_function_single
+xcall_call_function_single:
+	wr		%g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
+	retry
+
+	.globl		xcall_receive_signal
+xcall_receive_signal:
+	wr		%g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
+	retry
+
+	.globl		xcall_capture
+xcall_capture:
+	wr		%g0, (1 << PIL_SMP_CAPTURE), %set_softint
+	retry
+
+#ifdef CONFIG_KGDB
+	.globl		xcall_kgdb_capture
+xcall_kgdb_capture:
+	wr		%g0, (1 << PIL_KGDB_CAPTURE), %set_softint
+	retry
+#endif
+
+#endif /* CONFIG_SMP */
+
+	.globl		cheetah_patch_cachetlbops
+cheetah_patch_cachetlbops:
+	save		%sp, -128, %sp
+
+	sethi		%hi(__flush_tlb_mm), %o0
+	or		%o0, %lo(__flush_tlb_mm), %o0
+	sethi		%hi(__cheetah_flush_tlb_mm), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_mm), %o1
+	call		tlb_patch_one
+	 mov		19, %o2
+
+	sethi		%hi(__flush_tlb_page), %o0
+	or		%o0, %lo(__flush_tlb_page), %o0
+	sethi		%hi(__cheetah_flush_tlb_page), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_page), %o1
+	call		tlb_patch_one
+	 mov		22, %o2
+
+	sethi		%hi(__flush_tlb_pending), %o0
+	or		%o0, %lo(__flush_tlb_pending), %o0
+	sethi		%hi(__cheetah_flush_tlb_pending), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_pending), %o1
+	call		tlb_patch_one
+	 mov		27, %o2
+
+	sethi		%hi(__flush_tlb_kernel_range), %o0
+	or		%o0, %lo(__flush_tlb_kernel_range), %o0
+	sethi		%hi(__cheetah_flush_tlb_kernel_range), %o1
+	or		%o1, %lo(__cheetah_flush_tlb_kernel_range), %o1
+	call		tlb_patch_one
+	 mov		31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	sethi		%hi(__flush_dcache_page), %o0
+	or		%o0, %lo(__flush_dcache_page), %o0
+	sethi		%hi(__cheetah_flush_dcache_page), %o1
+	or		%o1, %lo(__cheetah_flush_dcache_page), %o1
+	call		tlb_patch_one
+	 mov		11, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
+	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
+	sethi		%hi(__cheetah_xcall_flush_tlb_kernel_range), %o1
+	or		%o1, %lo(__cheetah_xcall_flush_tlb_kernel_range), %o1
+	call		tlb_patch_one
+	 mov		44, %o2
+#endif /* CONFIG_SMP */
+
+	ret
+	 restore
+
+	.globl		hypervisor_patch_cachetlbops
+hypervisor_patch_cachetlbops:
+	save		%sp, -128, %sp
+
+	sethi		%hi(__flush_tlb_mm), %o0
+	or		%o0, %lo(__flush_tlb_mm), %o0
+	sethi		%hi(__hypervisor_flush_tlb_mm), %o1
+	or		%o1, %lo(__hypervisor_flush_tlb_mm), %o1
+	call		tlb_patch_one
+	 mov		19, %o2
+
+	sethi		%hi(__flush_tlb_page), %o0
+	or		%o0, %lo(__flush_tlb_page), %o0
+	sethi		%hi(__hypervisor_flush_tlb_page), %o1
+	or		%o1, %lo(__hypervisor_flush_tlb_page), %o1
+	call		tlb_patch_one
+	 mov		22, %o2
+
+	sethi		%hi(__flush_tlb_pending), %o0
+	or		%o0, %lo(__flush_tlb_pending), %o0
+	sethi		%hi(__hypervisor_flush_tlb_pending), %o1
+	or		%o1, %lo(__hypervisor_flush_tlb_pending), %o1
+	call		tlb_patch_one
+	 mov		27, %o2
+
+	sethi		%hi(__flush_tlb_kernel_range), %o0
+	or		%o0, %lo(__flush_tlb_kernel_range), %o0
+	sethi		%hi(__hypervisor_flush_tlb_kernel_range), %o1
+	or		%o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
+	call		tlb_patch_one
+	 mov		31, %o2
+
+#ifdef DCACHE_ALIASING_POSSIBLE
+	sethi		%hi(__flush_dcache_page), %o0
+	or		%o0, %lo(__flush_dcache_page), %o0
+	sethi		%hi(__hypervisor_flush_dcache_page), %o1
+	or		%o1, %lo(__hypervisor_flush_dcache_page), %o1
+	call		tlb_patch_one
+	 mov		2, %o2
+#endif /* DCACHE_ALIASING_POSSIBLE */
+
+#ifdef CONFIG_SMP
+	sethi		%hi(xcall_flush_tlb_mm), %o0
+	or		%o0, %lo(xcall_flush_tlb_mm), %o0
+	sethi		%hi(__hypervisor_xcall_flush_tlb_mm), %o1
+	or		%o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
+	call		tlb_patch_one
+	 mov		24, %o2
+
+	sethi		%hi(xcall_flush_tlb_page), %o0
+	or		%o0, %lo(xcall_flush_tlb_page), %o0
+	sethi		%hi(__hypervisor_xcall_flush_tlb_page), %o1
+	or		%o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+	call		tlb_patch_one
+	 mov		20, %o2
+
+	sethi		%hi(xcall_flush_tlb_kernel_range), %o0
+	or		%o0, %lo(xcall_flush_tlb_kernel_range), %o0
+	sethi		%hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
+	or		%o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
+	call		tlb_patch_one
+	 mov		44, %o2
+#endif /* CONFIG_SMP */
+
+	ret
+	 restore
diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S
new file mode 100644
index 0000000..adaef6e
--- /dev/null
+++ b/arch/sparc/mm/viking.S
@@ -0,0 +1,283 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * viking.S: High speed Viking cache/mmu operations
+ *
+ * Copyright (C) 1997  Eddie C. Dost  (ecd@skynet.be)
+ * Copyright (C) 1997,1998,1999  Jakub Jelinek  (jj@ultra.linux.cz)
+ * Copyright (C) 1999  Pavel Semerad  (semerad@ss1000.ms.mff.cuni.cz)
+ */
+
+#include <asm/ptrace.h>
+#include <asm/psr.h>
+#include <asm/asm-offsets.h>
+#include <asm/asi.h>
+#include <asm/mxcc.h>
+#include <asm/page.h>
+#include <asm/pgtsrmmu.h>
+#include <asm/viking.h>
+
+#ifdef CONFIG_SMP
+	.data
+	.align	4
+sun4dsmp_flush_tlb_spin:
+	.word	0
+#endif
+
+	.text
+	.align	4
+
+	.globl	viking_flush_cache_all, viking_flush_cache_mm
+	.globl	viking_flush_cache_range, viking_flush_cache_page
+	.globl	viking_flush_page, viking_mxcc_flush_page
+	.globl	viking_flush_page_for_dma, viking_flush_page_to_ram
+	.globl	viking_flush_sig_insns
+	.globl	viking_flush_tlb_all, viking_flush_tlb_mm
+	.globl	viking_flush_tlb_range, viking_flush_tlb_page
+
+viking_flush_page:
+	sethi	%hi(PAGE_OFFSET), %g2
+	sub	%o0, %g2, %g3
+	srl	%g3, 12, %g1		! ppage >> 12
+
+	clr	%o1			! set counter, 0 - 127
+	sethi	%hi(PAGE_OFFSET + PAGE_SIZE - 0x80000000), %o3
+	sethi	%hi(0x80000000), %o4
+	sethi	%hi(VIKING_PTAG_VALID), %o5
+	sethi	%hi(2*PAGE_SIZE), %o0
+	sethi	%hi(PAGE_SIZE), %g7
+	clr	%o2			! block counter, 0 - 3
+5:
+	sll	%o1, 5, %g4
+	or	%g4, %o4, %g4		! 0x80000000 | (set << 5)
+
+	sll	%o2, 26, %g5		! block << 26
+6:
+	or	%g5, %g4, %g5
+	ldda	[%g5] ASI_M_DATAC_TAG, %g2
+	cmp	%g3, %g1		! ptag == ppage?
+	bne	7f
+	 inc	%o2
+
+	andcc	%g2, %o5, %g0		! ptag VALID?
+	be	7f
+	 add	%g4, %o3, %g2		! (PAGE_OFFSET + PAGE_SIZE) | (set << 5)
+	ld	[%g2], %g3
+	ld	[%g2 + %g7], %g3
+	add	%g2, %o0, %g2
+	ld	[%g2], %g3
+	ld	[%g2 + %g7], %g3
+	add	%g2, %o0, %g2
+	ld	[%g2], %g3
+	ld	[%g2 + %g7], %g3
+	add	%g2, %o0, %g2
+	ld	[%g2], %g3
+	b	8f
+	 ld	[%g2 + %g7], %g3
+
+7:
+	cmp	%o2, 3
+	ble	6b
+	 sll	%o2, 26, %g5			! block << 26
+
+8:	inc	%o1
+	cmp	%o1, 0x7f
+	ble	5b
+	 clr	%o2
+
+9:	retl
+	 nop
+
+viking_mxcc_flush_page:
+	sethi	%hi(PAGE_OFFSET), %g2
+	sub	%o0, %g2, %g3
+	sub	%g3, -PAGE_SIZE, %g3		! ppage + PAGE_SIZE
+	sethi	%hi(MXCC_SRCSTREAM), %o3	! assume %hi(MXCC_SRCSTREAM) == %hi(MXCC_DESTSTREAM)
+	mov	0x10, %g2			! set cacheable bit
+	or	%o3, %lo(MXCC_SRCSTREAM), %o2
+	or	%o3, %lo(MXCC_DESSTREAM), %o3
+	sub	%g3, MXCC_STREAM_SIZE, %g3
+6:
+	stda	%g2, [%o2] ASI_M_MXCC
+	stda	%g2, [%o3] ASI_M_MXCC
+	andncc	%g3, PAGE_MASK, %g0
+	bne	6b
+	 sub	%g3, MXCC_STREAM_SIZE, %g3
+
+9:	retl
+	 nop
+
+viking_flush_cache_page:
+viking_flush_cache_range:
+#ifndef CONFIG_SMP
+	ld	[%o0 + VMA_VM_MM], %o0
+#endif
+viking_flush_cache_mm:
+#ifndef CONFIG_SMP
+	ld	[%o0 + AOFF_mm_context], %g1
+	cmp	%g1, -1
+	bne	viking_flush_cache_all
+	 nop
+	b,a	viking_flush_cache_out
+#endif
+viking_flush_cache_all:
+	WINDOW_FLUSH(%g4, %g5)
+viking_flush_cache_out:
+	retl
+	 nop
+
+viking_flush_tlb_all:
+	mov	0x400, %g1
+	retl
+	 sta	%g0, [%g1] ASI_M_FLUSH_PROBE
+
+viking_flush_tlb_mm:
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o1
+	lda	[%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+	cmp	%o1, -1
+	be	1f
+#endif
+	mov	0x300, %g2
+	sta	%o1, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%g2] ASI_M_FLUSH_PROBE
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+1:	retl
+	 nop
+#endif
+
+viking_flush_tlb_range:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	lda	[%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+	cmp	%o3, -1
+	be	2f
+#endif
+	sethi	%hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	and	%o1, %o4, %o1
+	add	%o1, 0x200, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+1:	sub	%o1, %o4, %o1
+	cmp	%o1, %o2
+	blu,a	1b
+	 sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+2:	retl
+	 nop
+#endif
+
+viking_flush_tlb_page:
+	ld	[%o0 + VMA_VM_MM], %o0
+	mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o3
+	lda	[%g1] ASI_M_MMUREGS, %g5
+#ifndef CONFIG_SMP
+	cmp	%o3, -1
+	be	1f
+#endif
+	and	%o1, PAGE_MASK, %o1
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	retl
+	 sta	%g5, [%g1] ASI_M_MMUREGS
+#ifndef CONFIG_SMP
+1:	retl
+	 nop
+#endif
+
+viking_flush_page_to_ram:
+viking_flush_page_for_dma:
+viking_flush_sig_insns:
+	retl
+	 nop
+
+#ifdef CONFIG_SMP
+	.globl	sun4dsmp_flush_tlb_all, sun4dsmp_flush_tlb_mm
+	.globl	sun4dsmp_flush_tlb_range, sun4dsmp_flush_tlb_page
+sun4dsmp_flush_tlb_all:
+	sethi	%hi(sun4dsmp_flush_tlb_spin), %g3
+1:	ldstub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	tst	%g5
+	bne	2f
+	 mov	0x400, %g1
+	sta	%g0, [%g1] ASI_M_FLUSH_PROBE
+	retl
+	 stb	%g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2:	tst	%g5
+	bne,a	2b
+	 ldub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	b,a	1b
+
+sun4dsmp_flush_tlb_mm:
+	sethi	%hi(sun4dsmp_flush_tlb_spin), %g3
+1:	ldstub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	tst	%g5
+	bne	2f
+	 mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + AOFF_mm_context], %o1
+	lda	[%g1] ASI_M_MMUREGS, %g5
+	mov	0x300, %g2
+	sta	%o1, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%g2] ASI_M_FLUSH_PROBE
+	sta	%g5, [%g1] ASI_M_MMUREGS
+	retl
+	 stb	%g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2:	tst	%g5
+	bne,a	2b
+	 ldub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	b,a	1b
+
+sun4dsmp_flush_tlb_range:
+	sethi	%hi(sun4dsmp_flush_tlb_spin), %g3
+1:	ldstub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	tst	%g5
+	bne	3f
+	 mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + VMA_VM_MM], %o0
+	ld	[%o0 + AOFF_mm_context], %o3
+	lda	[%g1] ASI_M_MMUREGS, %g5
+	sethi	%hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	and	%o1, %o4, %o1
+	add	%o1, 0x200, %o1
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+2:	sub	%o1, %o4, %o1
+	cmp	%o1, %o2
+	blu,a	2b
+	 sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	sta	%g5, [%g1] ASI_M_MMUREGS
+	retl
+	 stb	%g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+3:	tst	%g5
+	bne,a	3b
+	 ldub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	b,a	1b
+
+sun4dsmp_flush_tlb_page:
+	sethi	%hi(sun4dsmp_flush_tlb_spin), %g3
+1:	ldstub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	tst	%g5
+	bne	2f
+	 mov	SRMMU_CTX_REG, %g1
+	ld	[%o0 + VMA_VM_MM], %o0
+	ld	[%o0 + AOFF_mm_context], %o3
+	lda	[%g1] ASI_M_MMUREGS, %g5
+	and	%o1, PAGE_MASK, %o1
+	sta	%o3, [%g1] ASI_M_MMUREGS
+	sta	%g0, [%o1] ASI_M_FLUSH_PROBE
+	sta	%g5, [%g1] ASI_M_MMUREGS
+	retl
+	 stb	%g0, [%g3 + %lo(sun4dsmp_flush_tlb_spin)]
+2:	tst	%g5
+	bne,a	2b
+	 ldub	[%g3 + %lo(sun4dsmp_flush_tlb_spin)], %g5
+	b,a	1b
+	 nop
+#endif