Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 5c8a2eb..703d306 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -15,8 +15,7 @@
 
 config PAGE_OFFSET
 	hex
-	default "0x80000000" if MMU && SUPERH32
-	default "0x20000000" if MMU && SUPERH64
+	default "0x80000000" if MMU
 	default "0x00000000"
 
 config FORCE_MAX_ZONEORDER
@@ -45,7 +44,7 @@
 config MEMORY_START
 	hex "Physical memory start address"
 	default "0x08000000"
-	---help---
+	help
 	  Computers built with Hitachi SuperH processors always
 	  map the ROM starting at address zero.  But the processor
 	  does not specify the range that RAM takes.
@@ -72,12 +71,11 @@
 
 config 29BIT
 	def_bool !32BIT
-	depends on SUPERH32
 	select UNCACHED_MAPPING
 
 config 32BIT
 	bool
-	default y if CPU_SH5 || !MMU
+	default !MMU
 
 config PMB
 	bool "Support 32-bit physical addressing through PMB"
@@ -152,7 +150,7 @@
 
 config IOREMAP_FIXED
        def_bool y
-       depends on X2TLB || SUPERH64
+       depends on X2TLB
 
 config UNCACHED_MAPPING
 	bool
@@ -184,7 +182,7 @@
 
 config PAGE_SIZE_64KB
 	bool "64kB"
-	depends on !MMU || CPU_SH4 || CPU_SH5
+	depends on !MMU || CPU_SH4
 	help
 	  This enables support for 64kB pages, possible on all SH-4
 	  CPUs and later.
@@ -216,10 +214,6 @@
 	bool "64MB"
 	depends on X2TLB
 
-config HUGETLB_PAGE_SIZE_512MB
-	bool "512MB"
-	depends on CPU_SH5
-
 endchoice
 
 config SCHED_MC
@@ -242,7 +236,7 @@
 
 choice
 	prompt "Cache mode"
-	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
+	default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
 	default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
 
 config CACHE_WRITEBACK
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 5051b38..f69ddc7 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -10,15 +10,14 @@
 cacheops-$(CONFIG_CPU_SH2A)		:= cache-sh2a.o
 cacheops-$(CONFIG_CPU_SH3)		:= cache-sh3.o
 cacheops-$(CONFIG_CPU_SH4)		:= cache-sh4.o flush-sh4.o
-cacheops-$(CONFIG_CPU_SH5)		:= cache-sh5.o flush-sh4.o
 cacheops-$(CONFIG_SH7705_CACHE_32KB)	+= cache-sh7705.o
 cacheops-$(CONFIG_CPU_SHX3)		+= cache-shx3.o
 
 obj-y			+= $(cacheops-y)
 
 mmu-y			:= nommu.o extable_32.o
-mmu-$(CONFIG_MMU)	:= extable_$(BITS).o fault.o ioremap.o kmap.o \
-			   pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
+mmu-$(CONFIG_MMU)	:= extable_32.o fault.o ioremap.o kmap.o \
+			   pgtable.o tlbex_32.o tlbflush_32.o
 
 obj-y			+= $(mmu-y)
 
@@ -31,7 +30,6 @@
 debugfs-$(CONFIG_CPU_SH4)	+= tlb-debugfs.o
 tlb-$(CONFIG_CPU_SH3)		:= tlb-sh3.o
 tlb-$(CONFIG_CPU_SH4)		:= tlb-sh4.o tlb-urb.o
-tlb-$(CONFIG_CPU_SH5)		:= tlb-sh5.o
 tlb-$(CONFIG_CPU_HAS_PTEAEX)	:= tlb-pteaex.o tlb-urb.o
 obj-y				+= $(tlb-y)
 endif
@@ -45,30 +43,3 @@
 obj-$(CONFIG_HAVE_SRAM_POOL)	+= sram.o
 
 GCOV_PROFILE_pmb.o := n
-
-# Special flags for tlbex_64.o.  This puts restrictions on the number of
-# caller-save registers that the compiler can target when building this file.
-# This is required because the code is called from a context in entry.S where
-# very few registers have been saved in the exception handler (for speed
-# reasons).
-# The caller save registers that have been saved and which can be used are
-# r2,r3,r4,r5 : argument passing
-# r15, r18 : SP and LINK
-# tr0-4 : allow all caller-save TR's.  The compiler seems to be able to make
-#         use of them, so it's probably beneficial to performance to save them
-#         and have them available for it.
-#
-# The resources not listed below are callee save, i.e. the compiler is free to
-# use any of them and will spill them to the stack itself.
-
-CFLAGS_tlbex_64.o += -ffixed-r7 \
-	-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
-	-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
-	-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
-	-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
-	-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
-	-ffixed-r41 -ffixed-r42 -ffixed-r43  \
-	-ffixed-r60 -ffixed-r61 -ffixed-r62 \
-	-fomit-frame-pointer
-
-ccflags-y := -Werror
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
index ec2b253..fb517b8 100644
--- a/arch/sh/mm/alignment.c
+++ b/arch/sh/mm/alignment.c
@@ -152,13 +152,12 @@
 	return count;
 }
 
-static const struct file_operations alignment_proc_fops = {
-	.owner		= THIS_MODULE,
-	.open		= alignment_proc_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-	.write		= alignment_proc_write,
+static const struct proc_ops alignment_proc_ops = {
+	.proc_open	= alignment_proc_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+	.proc_write	= alignment_proc_write,
 };
 
 /*
@@ -176,12 +175,12 @@
 		return -ENOMEM;
 
 	res = proc_create_data("alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_usermode);
+			       &alignment_proc_ops, &se_usermode);
 	if (!res)
 		return -ENOMEM;
 
         res = proc_create_data("kernel_alignment", S_IWUSR | S_IRUGO, dir,
-			       &alignment_proc_fops, &se_kernmode_warn);
+			       &alignment_proc_ops, &se_kernmode_warn);
         if (!res)
                 return -ENOMEM;
 
diff --git a/arch/sh/mm/cache-sh3.c b/arch/sh/mm/cache-sh3.c
index 8172a17..bc59598 100644
--- a/arch/sh/mm/cache-sh3.c
+++ b/arch/sh/mm/cache-sh3.c
@@ -12,12 +12,10 @@
 #include <linux/threads.h>
 #include <asm/addrspace.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 #include <asm/io.h>
 #include <linux/uaccess.h>
-#include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index eee9114..ddfa968 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -16,7 +16,6 @@
 #include <linux/mutex.h>
 #include <linux/fs.h>
 #include <linux/highmem.h>
-#include <asm/pgtable.h>
 #include <asm/mmu_context.h>
 #include <asm/cache_insns.h>
 #include <asm/cacheflush.h>
@@ -183,7 +182,7 @@
  * accessed with (hence cache set) is in accord with the physical
  * address (i.e. tag).  It's no different here.
  *
- * Caller takes mm->mmap_sem.
+ * Caller takes mm->mmap_lock.
  */
 static void sh4_flush_cache_mm(void *arg)
 {
@@ -208,8 +207,6 @@
 	struct page *page;
 	unsigned long address, pfn, phys;
 	int map_coherent = 0;
-	pgd_t *pgd;
-	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
 	void *vaddr;
@@ -223,9 +220,7 @@
 	if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
 		return;
 
-	pgd = pgd_offset(vma->vm_mm, address);
-	pud = pud_offset(pgd, address);
-	pmd = pmd_offset(pud, address);
+	pmd = pmd_off(vma->vm_mm, address);
 	pte = pte_offset_kernel(pmd, address);
 
 	/* If the page isn't present, there is nothing to do here. */
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
deleted file mode 100644
index 445b5e6..0000000
--- a/arch/sh/mm/cache-sh5.c
+++ /dev/null
@@ -1,621 +0,0 @@
-/*
- * arch/sh/mm/cache-sh5.c
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2002  Benedict Gaster
- * Copyright (C) 2003  Richard Curnow
- * Copyright (C) 2003 - 2008  Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <asm/tlb.h>
-#include <asm/processor.h>
-#include <asm/cache.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-#include <asm/mmu_context.h>
-
-extern void __weak sh4__flush_region_init(void);
-
-/* Wired TLB entry for the D-cache */
-static unsigned long long dtlb_cache_slot;
-
-/*
- * The following group of functions deal with mapping and unmapping a
- * temporary page into a DTLB slot that has been set aside for exclusive
- * use.
- */
-static inline void
-sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
-			   unsigned long paddr)
-{
-	local_irq_disable();
-	sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
-}
-
-static inline void sh64_teardown_dtlb_cache_slot(void)
-{
-	sh64_teardown_tlb_slot(dtlb_cache_slot);
-	local_irq_enable();
-}
-
-static inline void sh64_icache_inv_all(void)
-{
-	unsigned long long addr, flag, data;
-	unsigned long flags;
-
-	addr = ICCR0;
-	flag = ICCR0_ICI;
-	data = 0;
-
-	/* Make this a critical section for safety (probably not strictly necessary.) */
-	local_irq_save(flags);
-
-	/* Without %1 it gets unexplicably wrong */
-	__asm__ __volatile__ (
-		"getcfg	%3, 0, %0\n\t"
-		"or	%0, %2, %0\n\t"
-		"putcfg	%3, 0, %0\n\t"
-		"synci"
-		: "=&r" (data)
-		: "0" (data), "r" (flag), "r" (addr));
-
-	local_irq_restore(flags);
-}
-
-static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
-{
-	/* Invalidate range of addresses [start,end] from the I-cache, where
-	 * the addresses lie in the kernel superpage. */
-
-	unsigned long long ullend, addr, aligned_start;
-	aligned_start = (unsigned long long)(signed long long)(signed long) start;
-	addr = L1_CACHE_ALIGN(aligned_start);
-	ullend = (unsigned long long) (signed long long) (signed long) end;
-
-	while (addr <= ullend) {
-		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
-		addr += L1_CACHE_BYTES;
-	}
-}
-
-static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
-{
-	/* If we get called, we know that vma->vm_flags contains VM_EXEC.
-	   Also, eaddr is page-aligned. */
-	unsigned int cpu = smp_processor_id();
-	unsigned long long addr, end_addr;
-	unsigned long flags = 0;
-	unsigned long running_asid, vma_asid;
-	addr = eaddr;
-	end_addr = addr + PAGE_SIZE;
-
-	/* Check whether we can use the current ASID for the I-cache
-	   invalidation.  For example, if we're called via
-	   access_process_vm->flush_cache_page->here, (e.g. when reading from
-	   /proc), 'running_asid' will be that of the reader, not of the
-	   victim.
-
-	   Also, note the risk that we might get pre-empted between the ASID
-	   compare and blocking IRQs, and before we regain control, the
-	   pid->ASID mapping changes.  However, the whole cache will get
-	   invalidated when the mapping is renewed, so the worst that can
-	   happen is that the loop below ends up invalidating somebody else's
-	   cache entries.
-	*/
-
-	running_asid = get_asid();
-	vma_asid = cpu_asid(cpu, vma->vm_mm);
-	if (running_asid != vma_asid) {
-		local_irq_save(flags);
-		switch_and_save_asid(vma_asid);
-	}
-	while (addr < end_addr) {
-		/* Worth unrolling a little */
-		__asm__ __volatile__("icbi %0,  0" : : "r" (addr));
-		__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
-		__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
-		__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
-		addr += 128;
-	}
-	if (running_asid != vma_asid) {
-		switch_and_save_asid(running_asid);
-		local_irq_restore(flags);
-	}
-}
-
-static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
-			  unsigned long start, unsigned long end)
-{
-	/* Used for invalidating big chunks of I-cache, i.e. assume the range
-	   is whole pages.  If 'start' or 'end' is not page aligned, the code
-	   is conservative and invalidates to the ends of the enclosing pages.
-	   This is functionally OK, just a performance loss. */
-
-	/* See the comments below in sh64_dcache_purge_user_range() regarding
-	   the choice of algorithm.  However, for the I-cache option (2) isn't
-	   available because there are no physical tags so aliases can't be
-	   resolved.  The icbi instruction has to be used through the user
-	   mapping.   Because icbi is cheaper than ocbp on a cache hit, it
-	   would be cheaper to use the selective code for a large range than is
-	   possible with the D-cache.  Just assume 64 for now as a working
-	   figure.
-	   */
-	int n_pages;
-
-	if (!mm)
-		return;
-
-	n_pages = ((end - start) >> PAGE_SHIFT);
-	if (n_pages >= 64) {
-		sh64_icache_inv_all();
-	} else {
-		unsigned long aligned_start;
-		unsigned long eaddr;
-		unsigned long after_last_page_start;
-		unsigned long mm_asid, current_asid;
-		unsigned long flags = 0;
-
-		mm_asid = cpu_asid(smp_processor_id(), mm);
-		current_asid = get_asid();
-
-		if (mm_asid != current_asid) {
-			/* Switch ASID and run the invalidate loop under cli */
-			local_irq_save(flags);
-			switch_and_save_asid(mm_asid);
-		}
-
-		aligned_start = start & PAGE_MASK;
-		after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
-
-		while (aligned_start < after_last_page_start) {
-			struct vm_area_struct *vma;
-			unsigned long vma_end;
-			vma = find_vma(mm, aligned_start);
-			if (!vma || (aligned_start <= vma->vm_end)) {
-				/* Avoid getting stuck in an error condition */
-				aligned_start += PAGE_SIZE;
-				continue;
-			}
-			vma_end = vma->vm_end;
-			if (vma->vm_flags & VM_EXEC) {
-				/* Executable */
-				eaddr = aligned_start;
-				while (eaddr < vma_end) {
-					sh64_icache_inv_user_page(vma, eaddr);
-					eaddr += PAGE_SIZE;
-				}
-			}
-			aligned_start = vma->vm_end; /* Skip to start of next region */
-		}
-
-		if (mm_asid != current_asid) {
-			switch_and_save_asid(current_asid);
-			local_irq_restore(flags);
-		}
-	}
-}
-
-static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
-{
-	/* The icbi instruction never raises ITLBMISS.  i.e. if there's not a
-	   cache hit on the virtual tag the instruction ends there, without a
-	   TLB lookup. */
-
-	unsigned long long aligned_start;
-	unsigned long long ull_end;
-	unsigned long long addr;
-
-	ull_end = end;
-
-	/* Just invalidate over the range using the natural addresses.  TLB
-	   miss handling will be OK (TBC).  Since it's for the current process,
-	   either we're already in the right ASID context, or the ASIDs have
-	   been recycled since we were last active in which case we might just
-	   invalidate another processes I-cache entries : no worries, just a
-	   performance drop for him. */
-	aligned_start = L1_CACHE_ALIGN(start);
-	addr = aligned_start;
-	while (addr < ull_end) {
-		__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
-		__asm__ __volatile__ ("nop");
-		__asm__ __volatile__ ("nop");
-		addr += L1_CACHE_BYTES;
-	}
-}
-
-/* Buffer used as the target of alloco instructions to purge data from cache
-   sets by natural eviction. -- RPC */
-#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
-static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
-
-static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
-{
-	/* Purge all ways in a particular block of sets, specified by the base
-	   set number and number of sets.  Can handle wrap-around, if that's
-	   needed.  */
-
-	int dummy_buffer_base_set;
-	unsigned long long eaddr, eaddr0, eaddr1;
-	int j;
-	int set_offset;
-
-	dummy_buffer_base_set = ((int)&dummy_alloco_area &
-				 cpu_data->dcache.entry_mask) >>
-				 cpu_data->dcache.entry_shift;
-	set_offset = sets_to_purge_base - dummy_buffer_base_set;
-
-	for (j = 0; j < n_sets; j++, set_offset++) {
-		set_offset &= (cpu_data->dcache.sets - 1);
-		eaddr0 = (unsigned long long)dummy_alloco_area +
-			(set_offset << cpu_data->dcache.entry_shift);
-
-		/*
-		 * Do one alloco which hits the required set per cache
-		 * way.  For write-back mode, this will purge the #ways
-		 * resident lines.  There's little point unrolling this
-		 * loop because the allocos stall more if they're too
-		 * close together.
-		 */
-		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
-				  cpu_data->dcache.ways;
-
-		for (eaddr = eaddr0; eaddr < eaddr1;
-		     eaddr += cpu_data->dcache.way_size) {
-			__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
-			__asm__ __volatile__ ("synco"); /* TAKum03020 */
-		}
-
-		eaddr1 = eaddr0 + cpu_data->dcache.way_size *
-				  cpu_data->dcache.ways;
-
-		for (eaddr = eaddr0; eaddr < eaddr1;
-		     eaddr += cpu_data->dcache.way_size) {
-			/*
-			 * Load from each address.  Required because
-			 * alloco is a NOP if the cache is write-through.
-			 */
-			if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
-				__raw_readb((unsigned long)eaddr);
-		}
-	}
-
-	/*
-	 * Don't use OCBI to invalidate the lines.  That costs cycles
-	 * directly.  If the dummy block is just left resident, it will
-	 * naturally get evicted as required.
-	 */
-}
-
-/*
- * Purge the entire contents of the dcache.  The most efficient way to
- * achieve this is to use alloco instructions on a region of unused
- * memory equal in size to the cache, thereby causing the current
- * contents to be discarded by natural eviction.  The alternative, namely
- * reading every tag, setting up a mapping for the corresponding page and
- * doing an OCBP for the line, would be much more expensive.
- */
-static void sh64_dcache_purge_all(void)
-{
-
-	sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
-}
-
-
-/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
-   anything else in the kernel */
-#define MAGIC_PAGE0_START 0xffffffffec000000ULL
-
-/* Purge the physical page 'paddr' from the cache.  It's known that any
- * cache lines requiring attention have the same page colour as the the
- * address 'eaddr'.
- *
- * This relies on the fact that the D-cache matches on physical tags when
- * no virtual tag matches.  So we create an alias for the original page
- * and purge through that.  (Alternatively, we could have done this by
- * switching ASID to match the original mapping and purged through that,
- * but that involves ASID switching cost + probably a TLBMISS + refill
- * anyway.)
- */
-static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
-					        unsigned long eaddr)
-{
-	unsigned long long magic_page_start;
-	unsigned long long magic_eaddr, magic_eaddr_end;
-
-	magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
-
-	/* As long as the kernel is not pre-emptible, this doesn't need to be
-	   under cli/sti. */
-	sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
-
-	magic_eaddr = magic_page_start;
-	magic_eaddr_end = magic_eaddr + PAGE_SIZE;
-
-	while (magic_eaddr < magic_eaddr_end) {
-		/* Little point in unrolling this loop - the OCBPs are blocking
-		   and won't go any quicker (i.e. the loop overhead is parallel
-		   to part of the OCBP execution.) */
-		__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
-		magic_eaddr += L1_CACHE_BYTES;
-	}
-
-	sh64_teardown_dtlb_cache_slot();
-}
-
-/*
- * Purge a page given its physical start address, by creating a temporary
- * 1 page mapping and purging across that.  Even if we know the virtual
- * address (& vma or mm) of the page, the method here is more elegant
- * because it avoids issues of coping with page faults on the purge
- * instructions (i.e. no special-case code required in the critical path
- * in the TLB miss handling).
- */
-static void sh64_dcache_purge_phy_page(unsigned long paddr)
-{
-	unsigned long long eaddr_start, eaddr, eaddr_end;
-	int i;
-
-	/* As long as the kernel is not pre-emptible, this doesn't need to be
-	   under cli/sti. */
-	eaddr_start = MAGIC_PAGE0_START;
-	for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
-		sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
-
-		eaddr = eaddr_start;
-		eaddr_end = eaddr + PAGE_SIZE;
-		while (eaddr < eaddr_end) {
-			__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
-			eaddr += L1_CACHE_BYTES;
-		}
-
-		sh64_teardown_dtlb_cache_slot();
-		eaddr_start += PAGE_SIZE;
-	}
-}
-
-static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
-				unsigned long addr, unsigned long end)
-{
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-	pte_t entry;
-	spinlock_t *ptl;
-	unsigned long paddr;
-
-	if (!mm)
-		return; /* No way to find physical address of page */
-
-	pgd = pgd_offset(mm, addr);
-	if (pgd_bad(*pgd))
-		return;
-
-	pud = pud_offset(pgd, addr);
-	if (pud_none(*pud) || pud_bad(*pud))
-		return;
-
-	pmd = pmd_offset(pud, addr);
-	if (pmd_none(*pmd) || pmd_bad(*pmd))
-		return;
-
-	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-	do {
-		entry = *pte;
-		if (pte_none(entry) || !pte_present(entry))
-			continue;
-		paddr = pte_val(entry) & PAGE_MASK;
-		sh64_dcache_purge_coloured_phy_page(paddr, addr);
-	} while (pte++, addr += PAGE_SIZE, addr != end);
-	pte_unmap_unlock(pte - 1, ptl);
-}
-
-/*
- * There are at least 5 choices for the implementation of this, with
- * pros (+), cons(-), comments(*):
- *
- * 1. ocbp each line in the range through the original user's ASID
- *    + no lines spuriously evicted
- *    - tlbmiss handling (must either handle faults on demand => extra
- *	special-case code in tlbmiss critical path), or map the page in
- *	advance (=> flush_tlb_range in advance to avoid multiple hits)
- *    - ASID switching
- *    - expensive for large ranges
- *
- * 2. temporarily map each page in the range to a special effective
- *    address and ocbp through the temporary mapping; relies on the
- *    fact that SH-5 OCB* always do TLB lookup and match on ptags (they
- *    never look at the etags)
- *    + no spurious evictions
- *    - expensive for large ranges
- *    * surely cheaper than (1)
- *
- * 3. walk all the lines in the cache, check the tags, if a match
- *    occurs create a page mapping to ocbp the line through
- *    + no spurious evictions
- *    - tag inspection overhead
- *    - (especially for small ranges)
- *    - potential cost of setting up/tearing down page mapping for
- *	every line that matches the range
- *    * cost partly independent of range size
- *
- * 4. walk all the lines in the cache, check the tags, if a match
- *    occurs use 4 * alloco to purge the line (+3 other probably
- *    innocent victims) by natural eviction
- *    + no tlb mapping overheads
- *    - spurious evictions
- *    - tag inspection overhead
- *
- * 5. implement like flush_cache_all
- *    + no tag inspection overhead
- *    - spurious evictions
- *    - bad for small ranges
- *
- * (1) can be ruled out as more expensive than (2).  (2) appears best
- * for small ranges.  The choice between (3), (4) and (5) for large
- * ranges and the range size for the large/small boundary need
- * benchmarking to determine.
- *
- * For now use approach (2) for small ranges and (5) for large ones.
- */
-static void sh64_dcache_purge_user_range(struct mm_struct *mm,
-			  unsigned long start, unsigned long end)
-{
-	int n_pages = ((end - start) >> PAGE_SHIFT);
-
-	if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
-		sh64_dcache_purge_all();
-	} else {
-		/* Small range, covered by a single page table page */
-		start &= PAGE_MASK;	/* should already be so */
-		end = PAGE_ALIGN(end);	/* should already be so */
-		sh64_dcache_purge_user_pages(mm, start, end);
-	}
-}
-
-/*
- * Invalidate the entire contents of both caches, after writing back to
- * memory any dirty data from the D-cache.
- */
-static void sh5_flush_cache_all(void *unused)
-{
-	sh64_dcache_purge_all();
-	sh64_icache_inv_all();
-}
-
-/*
- * Invalidate an entire user-address space from both caches, after
- * writing back dirty data (e.g. for shared mmap etc).
- *
- * This could be coded selectively by inspecting all the tags then
- * doing 4*alloco on any set containing a match (as for
- * flush_cache_range), but fork/exit/execve (where this is called from)
- * are expensive anyway.
- *
- * Have to do a purge here, despite the comments re I-cache below.
- * There could be odd-coloured dirty data associated with the mm still
- * in the cache - if this gets written out through natural eviction
- * after the kernel has reused the page there will be chaos.
- *
- * The mm being torn down won't ever be active again, so any Icache
- * lines tagged with its ASID won't be visible for the rest of the
- * lifetime of this ASID cycle.  Before the ASID gets reused, there
- * will be a flush_cache_all.  Hence we don't need to touch the
- * I-cache.  This is similar to the lack of action needed in
- * flush_tlb_mm - see fault.c.
- */
-static void sh5_flush_cache_mm(void *unused)
-{
-	sh64_dcache_purge_all();
-}
-
-/*
- * Invalidate (from both caches) the range [start,end) of virtual
- * addresses from the user address space specified by mm, after writing
- * back any dirty data.
- *
- * Note, 'end' is 1 byte beyond the end of the range to flush.
- */
-static void sh5_flush_cache_range(void *args)
-{
-	struct flusher_data *data = args;
-	struct vm_area_struct *vma;
-	unsigned long start, end;
-
-	vma = data->vma;
-	start = data->addr1;
-	end = data->addr2;
-
-	sh64_dcache_purge_user_range(vma->vm_mm, start, end);
-	sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
-}
-
-/*
- * Invalidate any entries in either cache for the vma within the user
- * address space vma->vm_mm for the page starting at virtual address
- * 'eaddr'.   This seems to be used primarily in breaking COW.  Note,
- * the I-cache must be searched too in case the page in question is
- * both writable and being executed from (e.g. stack trampolines.)
- *
- * Note, this is called with pte lock held.
- */
-static void sh5_flush_cache_page(void *args)
-{
-	struct flusher_data *data = args;
-	struct vm_area_struct *vma;
-	unsigned long eaddr, pfn;
-
-	vma = data->vma;
-	eaddr = data->addr1;
-	pfn = data->addr2;
-
-	sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
-
-	if (vma->vm_flags & VM_EXEC)
-		sh64_icache_inv_user_page(vma, eaddr);
-}
-
-static void sh5_flush_dcache_page(void *page)
-{
-	sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
-	wmb();
-}
-
-/*
- * Flush the range [start,end] of kernel virtual address space from
- * the I-cache.  The corresponding range must be purged from the
- * D-cache also because the SH-5 doesn't have cache snooping between
- * the caches.  The addresses will be visible through the superpage
- * mapping, therefore it's guaranteed that there no cache entries for
- * the range in cache sets of the wrong colour.
- */
-static void sh5_flush_icache_range(void *args)
-{
-	struct flusher_data *data = args;
-	unsigned long start, end;
-
-	start = data->addr1;
-	end = data->addr2;
-
-	__flush_purge_region((void *)start, end);
-	wmb();
-	sh64_icache_inv_kernel_range(start, end);
-}
-
-/*
- * For the address range [start,end), write back the data from the
- * D-cache and invalidate the corresponding region of the I-cache for the
- * current process.  Used to flush signal trampolines on the stack to
- * make them executable.
- */
-static void sh5_flush_cache_sigtramp(void *vaddr)
-{
-	unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
-
-	__flush_wback_region(vaddr, L1_CACHE_BYTES);
-	wmb();
-	sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
-}
-
-void __init sh5_cache_init(void)
-{
-	local_flush_cache_all		= sh5_flush_cache_all;
-	local_flush_cache_mm		= sh5_flush_cache_mm;
-	local_flush_cache_dup_mm	= sh5_flush_cache_mm;
-	local_flush_cache_page		= sh5_flush_cache_page;
-	local_flush_cache_range		= sh5_flush_cache_range;
-	local_flush_dcache_page		= sh5_flush_dcache_page;
-	local_flush_icache_range	= sh5_flush_icache_range;
-	local_flush_cache_sigtramp	= sh5_flush_cache_sigtramp;
-
-	/* Reserve a slot for dcache colouring in the DTLB */
-	dtlb_cache_slot	= sh64_get_wired_dtlb_entry();
-
-	sh4__flush_region_init();
-}
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index ed25eba..4c67b3d 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -16,12 +16,10 @@
 #include <linux/threads.h>
 #include <asm/addrspace.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/processor.h>
 #include <asm/cache.h>
 #include <asm/io.h>
 #include <linux/uaccess.h>
-#include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 464f160..3aef78c 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -355,12 +355,6 @@
 		}
 	}
 
-	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
-		extern void __weak sh5_cache_init(void);
-
-		sh5_cache_init();
-	}
-
 skip:
 	emit_cache_params();
 }
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 792f361..0de206c 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -43,8 +43,7 @@
 
 	r = pdev->resource + pdev->num_resources - 1;
 	if (r->flags) {
-		pr_warning("%s: unable to find empty space for resource\n",
-			name);
+		pr_warn("%s: unable to find empty space for resource\n", name);
 		return -EINVAL;
 	}
 
@@ -54,12 +53,10 @@
 
 	buf = dma_alloc_coherent(&pdev->dev, memsize, &dma_handle, GFP_KERNEL);
 	if (!buf) {
-		pr_warning("%s: unable to allocate memory\n", name);
+		pr_warn("%s: unable to allocate memory\n", name);
 		return -ENOMEM;
 	}
 
-	memset(buf, 0, memsize);
-
 	r->flags = IORESOURCE_MEM;
 	r->start = dma_handle;
 	r->end = r->start + memsize - 1;
diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c
deleted file mode 100644
index 7a3b4d3..0000000
--- a/arch/sh/mm/extable_64.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * arch/sh/mm/extable_64.c
- *
- * Copyright (C) 2003 Richard Curnow
- * Copyright (C) 2003, 2004  Paul Mundt
- *
- * Cloned from the 2.5 SH version..
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/bsearch.h>
-#include <linux/rwsem.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
-extern void __copy_user_fixup(void);
-
-static const struct exception_table_entry __copy_user_fixup_ex = {
-	.fixup = (unsigned long)&__copy_user_fixup,
-};
-
-/*
- * Some functions that may trap due to a bad user-mode address have too
- * many loads and stores in them to make it at all practical to label
- * each one and put them all in the main exception table.
- *
- * In particular, the fast memcpy routine is like this.  It's fix-up is
- * just to fall back to a slow byte-at-a-time copy, which is handled the
- * conventional way.  So it's functionally OK to just handle any trap
- * occurring in the fast memcpy with that fixup.
- */
-static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
-{
-	if ((addr >= (unsigned long)&copy_user_memcpy) &&
-	    (addr <= (unsigned long)&copy_user_memcpy_end))
-		return &__copy_user_fixup_ex;
-
-	return NULL;
-}
-
-static int cmp_ex_search(const void *key, const void *elt)
-{
-	const struct exception_table_entry *_elt = elt;
-	unsigned long _key = *(unsigned long *)key;
-
-	/* avoid overflow */
-	if (_key > _elt->insn)
-		return 1;
-	if (_key < _elt->insn)
-		return -1;
-	return 0;
-}
-
-/* Simple binary search */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
-		 const size_t num,
-		 unsigned long value)
-{
-	const struct exception_table_entry *mid;
-
-	mid = check_exception_ranges(value);
-	if (mid)
-		return mid;
-
-	return bsearch(&value, base, num,
-		       sizeof(struct exception_table_entry), cmp_ex_search);
-}
-
-int fixup_exception(struct pt_regs *regs)
-{
-	const struct exception_table_entry *fixup;
-
-	fixup = search_exception_tables(regs->pc);
-	if (fixup) {
-		regs->pc = fixup->fixup;
-		return 1;
-	}
-
-	return 0;
-}
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 5f51456..88a1f45 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -47,12 +47,13 @@
 			pgd = swapper_pg_dir;
 	}
 
-	printk(KERN_ALERT "pgd = %p\n", pgd);
+	pr_alert("pgd = %p\n", pgd);
 	pgd += pgd_index(addr);
-	printk(KERN_ALERT "[%08lx] *pgd=%0*Lx", addr,
-	       (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd));
+	pr_alert("[%08lx] *pgd=%0*llx", addr, (u32)(sizeof(*pgd) * 2),
+		 (u64)pgd_val(*pgd));
 
 	do {
+		p4d_t *p4d;
 		pud_t *pud;
 		pmd_t *pmd;
 		pte_t *pte;
@@ -61,33 +62,46 @@
 			break;
 
 		if (pgd_bad(*pgd)) {
-			printk("(bad)");
+			pr_cont("(bad)");
 			break;
 		}
 
-		pud = pud_offset(pgd, addr);
+		p4d = p4d_offset(pgd, addr);
+		if (PTRS_PER_P4D != 1)
+			pr_cont(", *p4d=%0*Lx", (u32)(sizeof(*p4d) * 2),
+			        (u64)p4d_val(*p4d));
+
+		if (p4d_none(*p4d))
+			break;
+
+		if (p4d_bad(*p4d)) {
+			pr_cont("(bad)");
+			break;
+		}
+
+		pud = pud_offset(p4d, addr);
 		if (PTRS_PER_PUD != 1)
-			printk(", *pud=%0*Lx", (u32)(sizeof(*pud) * 2),
-			       (u64)pud_val(*pud));
+			pr_cont(", *pud=%0*llx", (u32)(sizeof(*pud) * 2),
+				(u64)pud_val(*pud));
 
 		if (pud_none(*pud))
 			break;
 
 		if (pud_bad(*pud)) {
-			printk("(bad)");
+			pr_cont("(bad)");
 			break;
 		}
 
 		pmd = pmd_offset(pud, addr);
 		if (PTRS_PER_PMD != 1)
-			printk(", *pmd=%0*Lx", (u32)(sizeof(*pmd) * 2),
-			       (u64)pmd_val(*pmd));
+			pr_cont(", *pmd=%0*llx", (u32)(sizeof(*pmd) * 2),
+				(u64)pmd_val(*pmd));
 
 		if (pmd_none(*pmd))
 			break;
 
 		if (pmd_bad(*pmd)) {
-			printk("(bad)");
+			pr_cont("(bad)");
 			break;
 		}
 
@@ -96,17 +110,18 @@
 			break;
 
 		pte = pte_offset_kernel(pmd, addr);
-		printk(", *pte=%0*Lx", (u32)(sizeof(*pte) * 2),
-		       (u64)pte_val(*pte));
+		pr_cont(", *pte=%0*llx", (u32)(sizeof(*pte) * 2),
+			(u64)pte_val(*pte));
 	} while (0);
 
-	printk("\n");
+	pr_cont("\n");
 }
 
 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 {
 	unsigned index = pgd_index(address);
 	pgd_t *pgd_k;
+	p4d_t *p4d, *p4d_k;
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
 
@@ -116,8 +131,13 @@
 	if (!pgd_present(*pgd_k))
 		return NULL;
 
-	pud = pud_offset(pgd, address);
-	pud_k = pud_offset(pgd_k, address);
+	p4d = p4d_offset(pgd, address);
+	p4d_k = p4d_offset(pgd_k, address);
+	if (!p4d_present(*p4d_k))
+		return NULL;
+
+	pud = pud_offset(p4d, address);
+	pud_k = pud_offset(p4d_k, address);
 	if (!pud_present(*pud_k))
 		return NULL;
 
@@ -188,14 +208,11 @@
 	if (!oops_may_print())
 		return;
 
-	printk(KERN_ALERT "BUG: unable to handle kernel ");
-	if (address < PAGE_SIZE)
-		printk(KERN_CONT "NULL pointer dereference");
-	else
-		printk(KERN_CONT "paging request");
-
-	printk(KERN_CONT " at %08lx\n", address);
-	printk(KERN_ALERT "PC:");
+	pr_alert("BUG: unable to handle kernel %s at %08lx\n",
+		 address < PAGE_SIZE ? "NULL pointer dereference"
+				     : "paging request",
+		 address);
+	pr_alert("PC:");
 	printk_address(regs->pc, 1);
 
 	show_pte(NULL, address);
@@ -261,7 +278,7 @@
 	 * Something tried to access memory that isn't in our memory map..
 	 * Fix it, but check if it's kernel or user first..
 	 */
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 
 	__bad_area_nosemaphore(regs, error_code, address, si_code);
 }
@@ -285,7 +302,7 @@
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->mm;
 
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 
 	/* Kernel mode? Handle exceptions or die: */
 	if (!user_mode(regs))
@@ -302,25 +319,25 @@
 	 * Pagefault was interrupted by SIGKILL. We have no reason to
 	 * continue pagefault.
 	 */
-	if (fatal_signal_pending(current)) {
-		if (!(fault & VM_FAULT_RETRY))
-			up_read(&current->mm->mmap_sem);
+	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			no_context(regs, error_code, address);
 		return 1;
 	}
 
+	/* Release mmap_lock first if necessary */
+	if (!(fault & VM_FAULT_RETRY))
+		mmap_read_unlock(current->mm);
+
 	if (!(fault & VM_FAULT_ERROR))
 		return 0;
 
 	if (fault & VM_FAULT_OOM) {
 		/* Kernel mode? Handle exceptions or die: */
 		if (!user_mode(regs)) {
-			up_read(&current->mm->mmap_sem);
 			no_context(regs, error_code, address);
 			return 1;
 		}
-		up_read(&current->mm->mmap_sem);
 
 		/*
 		 * We ran out of memory, call the OOM killer, and return the
@@ -355,7 +372,7 @@
 		return 1;
 
 	/* read, not present: */
-	if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))))
+	if (unlikely(!vma_is_accessible(vma)))
 		return 1;
 
 	return 0;
@@ -380,7 +397,7 @@
 	struct mm_struct *mm;
 	struct vm_area_struct * vma;
 	vm_fault_t fault;
-	unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+	unsigned int flags = FAULT_FLAG_DEFAULT;
 
 	tsk = current;
 	mm = tsk->mm;
@@ -424,7 +441,7 @@
 	}
 
 retry:
-	down_read(&mm->mmap_sem);
+	mmap_read_lock(mm);
 
 	vma = find_vma(mm, address);
 	if (unlikely(!vma)) {
@@ -464,28 +481,18 @@
 	 * make sure we exit gracefully rather than endlessly redo
 	 * the fault.
 	 */
-	fault = handle_mm_fault(vma, address, flags);
+	fault = handle_mm_fault(vma, address, flags, regs);
 
 	if (unlikely(fault & (VM_FAULT_RETRY | VM_FAULT_ERROR)))
 		if (mm_fault_error(regs, error_code, address, fault))
 			return;
 
 	if (flags & FAULT_FLAG_ALLOW_RETRY) {
-		if (fault & VM_FAULT_MAJOR) {
-			tsk->maj_flt++;
-			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
-				      regs, address);
-		} else {
-			tsk->min_flt++;
-			perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
-				      regs, address);
-		}
 		if (fault & VM_FAULT_RETRY) {
-			flags &= ~FAULT_FLAG_ALLOW_RETRY;
 			flags |= FAULT_FLAG_TRIED;
 
 			/*
-			 * No need to up_read(&mm->mmap_sem) as we would
+			 * No need to mmap_read_unlock(mm) as we would
 			 * have already released it in __lock_page_or_retry
 			 * in mm/filemap.c.
 			 */
@@ -493,5 +500,5 @@
 		}
 	}
 
-	up_read(&mm->mmap_sem);
+	mmap_read_unlock(mm);
 }
diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
index 960deb1..220d7bc 100644
--- a/arch/sh/mm/hugetlbpage.c
+++ b/arch/sh/mm/hugetlbpage.c
@@ -17,7 +17,6 @@
 #include <linux/sysctl.h>
 
 #include <asm/mman.h>
-#include <asm/pgalloc.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
@@ -26,17 +25,21 @@
 			unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 
 	pgd = pgd_offset(mm, addr);
 	if (pgd) {
-		pud = pud_alloc(mm, pgd, addr);
-		if (pud) {
-			pmd = pmd_alloc(mm, pud, addr);
-			if (pmd)
-				pte = pte_alloc_map(mm, pmd, addr);
+		p4d = p4d_alloc(mm, pgd, addr);
+		if (p4d) {
+			pud = pud_alloc(mm, p4d, addr);
+			if (pud) {
+				pmd = pmd_alloc(mm, pud, addr);
+				if (pmd)
+					pte = pte_alloc_map(mm, pmd, addr);
+			}
 		}
 	}
 
@@ -47,17 +50,21 @@
 		       unsigned long addr, unsigned long sz)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte = NULL;
 
 	pgd = pgd_offset(mm, addr);
 	if (pgd) {
-		pud = pud_offset(pgd, addr);
-		if (pud) {
-			pmd = pmd_offset(pud, addr);
-			if (pmd)
-				pte = pte_offset_map(pmd, addr);
+		p4d = p4d_offset(pgd, addr);
+		if (p4d) {
+			pud = pud_offset(p4d, addr);
+			if (pud) {
+				pmd = pmd_offset(pud, addr);
+				if (pmd)
+					pte = pte_offset_map(pmd, addr);
+			}
 		}
 	}
 
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index d1b1ff2..3348e0c 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -27,7 +27,9 @@
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/cache.h>
+#include <asm/pgalloc.h>
 #include <linux/sizes.h>
+#include "ioremap.h"
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD];
 
@@ -45,6 +47,7 @@
 static pte_t *__get_pte_phys(unsigned long addr)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 
@@ -54,7 +57,13 @@
 		return NULL;
 	}
 
-	pud = pud_alloc(NULL, pgd, addr);
+	p4d = p4d_alloc(NULL, pgd, addr);
+	if (unlikely(!p4d)) {
+		p4d_ERROR(*p4d);
+		return NULL;
+	}
+
+	pud = pud_alloc(NULL, p4d, addr);
 	if (unlikely(!pud)) {
 		pud_ERROR(*pud);
 		return NULL;
@@ -172,9 +181,9 @@
 	unsigned long vaddr;
 
 	vaddr = start;
-	i = __pgd_offset(vaddr);
-	j = __pud_offset(vaddr);
-	k = __pmd_offset(vaddr);
+	i = pgd_index(vaddr);
+	j = pud_index(vaddr);
+	k = pmd_index(vaddr);
 	pgd = pgd_base + i;
 
 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
@@ -217,15 +226,12 @@
 
 static void __init do_init_bootmem(void)
 {
-	struct memblock_region *reg;
+	unsigned long start_pfn, end_pfn;
+	int i;
 
 	/* Add active regions with valid PFNs. */
-	for_each_memblock(memory, reg) {
-		unsigned long start_pfn, end_pfn;
-		start_pfn = memblock_region_memory_base_pfn(reg);
-		end_pfn = memblock_region_memory_end_pfn(reg);
+	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
 		__add_active_range(0, start_pfn, end_pfn);
-	}
 
 	/* All of system RAM sits in node 0 for the non-NUMA case */
 	allocate_pgdat(0);
@@ -233,12 +239,6 @@
 
 	plat_mem_setup();
 
-	for_each_memblock(memory, reg) {
-		int nid = memblock_get_region_node(reg);
-
-		memory_present(nid, memblock_region_memory_base_pfn(reg),
-			memblock_region_memory_end_pfn(reg));
-	}
 	sparse_init();
 }
 
@@ -334,7 +334,7 @@
 
 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-	free_area_init_nodes(max_zone_pfns);
+	free_area_init(max_zone_pfns);
 }
 
 unsigned int mem_init_done = 0;
@@ -406,29 +406,23 @@
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 int arch_add_memory(int nid, u64 start, u64 size,
-			struct mhp_restrictions *restrictions)
+		    struct mhp_params *params)
 {
 	unsigned long start_pfn = PFN_DOWN(start);
 	unsigned long nr_pages = size >> PAGE_SHIFT;
 	int ret;
 
+	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
+		return -EINVAL;
+
 	/* We only have ZONE_NORMAL, so this is easy.. */
-	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
+	ret = __add_pages(nid, start_pfn, nr_pages, params);
 	if (unlikely(ret))
 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
 
 	return ret;
 }
 
-#ifdef CONFIG_NUMA
-int memory_add_physaddr_to_nid(u64 addr)
-{
-	/* Node 0 for now.. */
-	return 0;
-}
-EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
-#endif
-
 void arch_remove_memory(int nid, u64 start, u64 size,
 			struct vmem_altmap *altmap)
 {
diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c
index d09ddfe..2134258 100644
--- a/arch/sh/mm/ioremap.c
+++ b/arch/sh/mm/ioremap.c
@@ -18,12 +18,59 @@
 #include <linux/mm.h>
 #include <linux/pci.h>
 #include <linux/io.h>
+#include <asm/io_trapped.h>
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu.h>
+#include "ioremap.h"
+
+/*
+ * On 32-bit SH, we traditionally have the whole physical address space mapped
+ * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
+ * anything but place the address in the proper segment.  This is true for P1
+ * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
+ * and newer cores using extended addressing need to map through page tables, so
+ * the ioremap() implementation becomes a bit more complicated.
+ */
+#ifdef CONFIG_29BIT
+static void __iomem *
+__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
+{
+	phys_addr_t last_addr = offset + size - 1;
+
+	/*
+	 * For P1 and P2 space this is trivial, as everything is already
+	 * mapped. Uncached access for P1 addresses are done through P2.
+	 * In the P3 case or for addresses outside of the 29-bit space,
+	 * mapping must be done by the PMB or by using page tables.
+	 */
+	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
+		u64 flags = pgprot_val(prot);
+
+		/*
+		 * Anything using the legacy PTEA space attributes needs
+		 * to be kicked down to page table mappings.
+		 */
+		if (unlikely(flags & _PAGE_PCC_MASK))
+			return NULL;
+		if (unlikely(flags & _PAGE_CACHABLE))
+			return (void __iomem *)P1SEGADDR(offset);
+
+		return (void __iomem *)P2SEGADDR(offset);
+	}
+
+	/* P4 above the store queues are always mapped. */
+	if (unlikely(offset >= P3_ADDR_MAX))
+		return (void __iomem *)P4SEGADDR(offset);
+
+	return NULL;
+}
+#else
+#define __ioremap_29bit(offset, size, prot)		NULL
+#endif /* CONFIG_29BIT */
 
 /*
  * Remap an arbitrary physical address space into the kernel virtual
@@ -42,6 +89,14 @@
 	unsigned long offset, last_addr, addr, orig_addr;
 	void __iomem *mapped;
 
+	mapped = __ioremap_trapped(phys_addr, size);
+	if (mapped)
+		return mapped;
+
+	mapped = __ioremap_29bit(phys_addr, size, pgprot);
+	if (mapped)
+		return mapped;
+
 	/* Don't allow wraparound or zero size */
 	last_addr = phys_addr + size - 1;
 	if (!size || last_addr < phys_addr)
@@ -103,7 +158,7 @@
 	return 0;
 }
 
-void __iounmap(void __iomem *addr)
+void iounmap(void __iomem *addr)
 {
 	unsigned long vaddr = (unsigned long __force)addr;
 	struct vm_struct *p;
@@ -134,4 +189,4 @@
 
 	kfree(p);
 }
-EXPORT_SYMBOL(__iounmap);
+EXPORT_SYMBOL(iounmap);
diff --git a/arch/sh/mm/ioremap.h b/arch/sh/mm/ioremap.h
new file mode 100644
index 0000000..f2544e7
--- /dev/null
+++ b/arch/sh/mm/ioremap.h
@@ -0,0 +1,23 @@
+#ifndef _SH_MM_IORMEMAP_H
+#define _SH_MM_IORMEMAP_H 1
+
+#ifdef CONFIG_IOREMAP_FIXED
+void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
+int iounmap_fixed(void __iomem *);
+void ioremap_fixed_init(void);
+#else
+static inline void __iomem *
+ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
+{
+	BUG();
+	return NULL;
+}
+static inline void ioremap_fixed_init(void)
+{
+}
+static inline int iounmap_fixed(void __iomem *addr)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_IOREMAP_FIXED */
+#endif /* _SH_MM_IORMEMAP_H */
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c
index 07e744d..136113b 100644
--- a/arch/sh/mm/ioremap_fixed.c
+++ b/arch/sh/mm/ioremap_fixed.c
@@ -18,12 +18,12 @@
 #include <linux/proc_fs.h>
 #include <asm/fixmap.h>
 #include <asm/page.h>
-#include <asm/pgalloc.h>
 #include <asm/addrspace.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
+#include "ioremap.h"
 
 struct ioremap_map {
 	void __iomem *addr;
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 9e6b38b..73fd7cc 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -14,9 +14,6 @@
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
-#define kmap_get_fixmap_pte(vaddr)                                     \
-	pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
-
 static pte_t *kmap_coherent_pte;
 
 void __init kmap_coherent_init(void)
@@ -25,7 +22,7 @@
 
 	/* cache the first coherent kmap pte */
 	vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
-	kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+	kmap_coherent_pte = virt_to_kpte(vaddr);
 }
 
 void *kmap_coherent(struct page *page, unsigned long addr)
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index dca946f..8b45044 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -10,7 +10,6 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/mm.h>
-#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/page.h>
 #include <linux/uaccess.h>
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index f7e4439..50f0dc1 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -53,7 +53,4 @@
 
 	/* It's up */
 	node_set_online(nid);
-
-	/* Kick sparsemem */
-	sparse_memory_present_with_active_regions(nid);
 }
diff --git a/arch/sh/mm/pgtable.c b/arch/sh/mm/pgtable.c
index 5c8f924..cf7ce4b 100644
--- a/arch/sh/mm/pgtable.c
+++ b/arch/sh/mm/pgtable.c
@@ -2,8 +2,6 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
-
 static struct kmem_cache *pgd_cachep;
 #if PAGETABLE_LEVELS > 2
 static struct kmem_cache *pmd_cachep;
@@ -13,6 +11,7 @@
 {
 	pgd_t *pgd = x;
 
+	memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
 	memcpy(pgd + USER_PTRS_PER_PGD,
 	       swapper_pg_dir + USER_PTRS_PER_PGD,
 	       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
@@ -32,7 +31,7 @@
 
 pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
+	return kmem_cache_alloc(pgd_cachep, GFP_KERNEL);
 }
 
 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -48,7 +47,7 @@
 
 pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-	return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
+	return kmem_cache_alloc(pmd_cachep, GFP_KERNEL | __GFP_ZERO);
 }
 
 void pmd_free(struct mm_struct *mm, pmd_t *pmd)
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index b59bad8..b20aba6 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -23,10 +23,10 @@
 #include <linux/io.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include <linux/pgtable.h>
 #include <asm/cacheflush.h>
 #include <linux/sizes.h>
 #include <linux/uaccess.h>
-#include <asm/pgtable.h>
 #include <asm/page.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 8692435..fb400af 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -21,7 +21,6 @@
 
 #include <asm/io.h>
 #include <linux/uaccess.h>
-#include <asm/pgalloc.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
deleted file mode 100644
index e4bb2a8..0000000
--- a/arch/sh/mm/tlb-sh5.c
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * arch/sh/mm/tlb-sh5.c
- *
- * Copyright (C) 2003  Paul Mundt <lethal@linux-sh.org>
- * Copyright (C) 2003  Richard Curnow <richard.curnow@superh.com>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include <asm/tlb.h>
-#include <asm/mmu_context.h>
-
-/**
- * sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
- */
-int sh64_tlb_init(void)
-{
-	/* Assign some sane DTLB defaults */
-	cpu_data->dtlb.entries	= 64;
-	cpu_data->dtlb.step	= 0x10;
-
-	cpu_data->dtlb.first	= DTLB_FIXED | cpu_data->dtlb.step;
-	cpu_data->dtlb.next	= cpu_data->dtlb.first;
-
-	cpu_data->dtlb.last	= DTLB_FIXED |
-				  ((cpu_data->dtlb.entries - 1) *
-				   cpu_data->dtlb.step);
-
-	/* And again for the ITLB */
-	cpu_data->itlb.entries	= 64;
-	cpu_data->itlb.step	= 0x10;
-
-	cpu_data->itlb.first	= ITLB_FIXED | cpu_data->itlb.step;
-	cpu_data->itlb.next	= cpu_data->itlb.first;
-	cpu_data->itlb.last	= ITLB_FIXED |
-				  ((cpu_data->itlb.entries - 1) *
-				   cpu_data->itlb.step);
-
-	return 0;
-}
-
-/**
- * sh64_next_free_dtlb_entry - Find the next available DTLB entry
- */
-unsigned long long sh64_next_free_dtlb_entry(void)
-{
-	return cpu_data->dtlb.next;
-}
-
-/**
- * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
- */
-unsigned long long sh64_get_wired_dtlb_entry(void)
-{
-	unsigned long long entry = sh64_next_free_dtlb_entry();
-
-	cpu_data->dtlb.first += cpu_data->dtlb.step;
-	cpu_data->dtlb.next  += cpu_data->dtlb.step;
-
-	return entry;
-}
-
-/**
- * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
- *
- * @entry:	Address of TLB slot.
- *
- * Works like a stack, last one to allocate must be first one to free.
- */
-int sh64_put_wired_dtlb_entry(unsigned long long entry)
-{
-	__flush_tlb_slot(entry);
-
-	/*
-	 * We don't do any particularly useful tracking of wired entries,
-	 * so this approach works like a stack .. last one to be allocated
-	 * has to be the first one to be freed.
-	 *
-	 * We could potentially load wired entries into a list and work on
-	 * rebalancing the list periodically (which also entails moving the
-	 * contents of a TLB entry) .. though I have a feeling that this is
-	 * more trouble than it's worth.
-	 */
-
-	/*
-	 * Entry must be valid .. we don't want any ITLB addresses!
-	 */
-	if (entry <= DTLB_FIXED)
-		return -EINVAL;
-
-	/*
-	 * Next, check if we're within range to be freed. (ie, must be the
-	 * entry beneath the first 'free' entry!
-	 */
-	if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
-		return -EINVAL;
-
-	/* If we are, then bring this entry back into the list */
-	cpu_data->dtlb.first	-= cpu_data->dtlb.step;
-	cpu_data->dtlb.next	= entry;
-
-	return 0;
-}
-
-/**
- * sh64_setup_tlb_slot - Load up a translation in a wired slot.
- *
- * @config_addr:	Address of TLB slot.
- * @eaddr:		Virtual address.
- * @asid:		Address Space Identifier.
- * @paddr:		Physical address.
- *
- * Load up a virtual<->physical translation for @eaddr<->@paddr in the
- * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
- */
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
-			 unsigned long asid, unsigned long paddr)
-{
-	unsigned long long pteh, ptel;
-
-	pteh = neff_sign_extend(eaddr);
-	pteh &= PAGE_MASK;
-	pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
-	ptel = neff_sign_extend(paddr);
-	ptel &= PAGE_MASK;
-	ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
-
-	asm volatile("putcfg %0, 1, %1\n\t"
-			"putcfg %0, 0, %2\n"
-			: : "r" (config_addr), "r" (ptel), "r" (pteh));
-}
-
-/**
- * sh64_teardown_tlb_slot - Teardown a translation.
- *
- * @config_addr:	Address of TLB slot.
- *
- * Teardown any existing mapping in the TLB slot @config_addr.
- */
-void sh64_teardown_tlb_slot(unsigned long long config_addr)
-	__attribute__ ((alias("__flush_tlb_slot")));
-
-static int dtlb_entry;
-static unsigned long long dtlb_entries[64];
-
-void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
-	unsigned long long entry;
-	unsigned long paddr, flags;
-
-	BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
-
-	local_irq_save(flags);
-
-	entry = sh64_get_wired_dtlb_entry();
-	dtlb_entries[dtlb_entry++] = entry;
-
-	paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
-	paddr &= ~PAGE_MASK;
-
-	sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
-
-	local_irq_restore(flags);
-}
-
-void tlb_unwire_entry(void)
-{
-	unsigned long long entry;
-	unsigned long flags;
-
-	BUG_ON(!dtlb_entry);
-
-	local_irq_save(flags);
-	entry = dtlb_entries[dtlb_entry--];
-
-	sh64_teardown_tlb_slot(entry);
-	sh64_put_wired_dtlb_entry(entry);
-
-	local_irq_restore(flags);
-}
-
-void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
-{
-	unsigned long long ptel;
-	unsigned long long pteh=0;
-	struct tlb_info *tlbp;
-	unsigned long long next;
-	unsigned int fault_code = get_thread_fault_code();
-
-	/* Get PTEL first */
-	ptel = pte.pte_low;
-
-	/*
-	 * Set PTEH register
-	 */
-	pteh = neff_sign_extend(address & MMU_VPN_MASK);
-
-	/* Set the ASID. */
-	pteh |= get_asid() << PTEH_ASID_SHIFT;
-	pteh |= PTEH_VALID;
-
-	/* Set PTEL register, set_pte has performed the sign extension */
-	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-
-	if (fault_code & FAULT_CODE_ITLB)
-		tlbp = &cpu_data->itlb;
-	else
-		tlbp = &cpu_data->dtlb;
-
-	next = tlbp->next;
-	__flush_tlb_slot(next);
-	asm volatile ("putcfg %0,1,%2\n\n\t"
-		      "putcfg %0,0,%1\n"
-		      :  : "r" (next), "r" (pteh), "r" (ptel) );
-
-	next += TLB_STEP;
-	if (next > tlbp->last)
-		next = tlbp->first;
-	tlbp->next = next;
-}
diff --git a/arch/sh/mm/tlbex_32.c b/arch/sh/mm/tlbex_32.c
index 382262d..1c53868 100644
--- a/arch/sh/mm/tlbex_32.c
+++ b/arch/sh/mm/tlbex_32.c
@@ -23,6 +23,7 @@
 	       unsigned long address)
 {
 	pgd_t *pgd;
+	p4d_t *p4d;
 	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
@@ -42,7 +43,10 @@
 		pgd = pgd_offset(current->mm, address);
 	}
 
-	pud = pud_offset(pgd, address);
+	p4d = p4d_offset(pgd, address);
+	if (p4d_none_or_clear_bad(p4d))
+		return 1;
+	pud = pud_offset(p4d, address);
 	if (pud_none_or_clear_bad(pud))
 		return 1;
 	pmd = pmd_offset(pud, address);
diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c
deleted file mode 100644
index 8ff966d..0000000
--- a/arch/sh/mm/tlbex_64.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * The SH64 TLB miss.
- *
- * Original code from fault.c
- * Copyright (C) 2000, 2001  Paolo Alberelli
- *
- * Fast PTE->TLB refill path
- * Copyright (C) 2003 Richard.Curnow@superh.com
- *
- * IMPORTANT NOTES :
- * The do_fast_page_fault function is called from a context in entry.S
- * where very few registers have been saved.  In particular, the code in
- * this file must be compiled not to use ANY caller-save registers that
- * are not part of the restricted save set.  Also, it means that code in
- * this file must not make calls to functions elsewhere in the kernel, or
- * else the excepting context will see corruption in its caller-save
- * registers.  Plus, the entry.S save area is non-reentrant, so this code
- * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
- * on any exception.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/interrupt.h>
-#include <linux/kprobes.h>
-#include <asm/tlb.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-static int handle_tlbmiss(unsigned long long protection_flags,
-			  unsigned long address)
-{
-	pgd_t *pgd;
-	pud_t *pud;
-	pmd_t *pmd;
-	pte_t *pte;
-	pte_t entry;
-
-	if (is_vmalloc_addr((void *)address)) {
-		pgd = pgd_offset_k(address);
-	} else {
-		if (unlikely(address >= TASK_SIZE || !current->mm))
-			return 1;
-
-		pgd = pgd_offset(current->mm, address);
-	}
-
-	pud = pud_offset(pgd, address);
-	if (pud_none(*pud) || !pud_present(*pud))
-		return 1;
-
-	pmd = pmd_offset(pud, address);
-	if (pmd_none(*pmd) || !pmd_present(*pmd))
-		return 1;
-
-	pte = pte_offset_kernel(pmd, address);
-	entry = *pte;
-	if (pte_none(entry) || !pte_present(entry))
-		return 1;
-
-	/*
-	 * If the page doesn't have sufficient protection bits set to
-	 * service the kind of fault being handled, there's not much
-	 * point doing the TLB refill.  Punt the fault to the general
-	 * handler.
-	 */
-	if ((pte_val(entry) & protection_flags) != protection_flags)
-		return 1;
-
-	update_mmu_cache(NULL, address, pte);
-
-	return 0;
-}
-
-/*
- * Put all this information into one structure so that everything is just
- * arithmetic relative to a single base address.  This reduces the number
- * of movi/shori pairs needed just to load addresses of static data.
- */
-struct expevt_lookup {
-	unsigned short protection_flags[8];
-	unsigned char  is_text_access[8];
-	unsigned char  is_write_access[8];
-};
-
-#define PRU (1<<9)
-#define PRW (1<<8)
-#define PRX (1<<7)
-#define PRR (1<<6)
-
-/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
-   the fault happened in user mode or privileged mode. */
-static struct expevt_lookup expevt_lookup_table = {
-	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
-	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
-};
-
-static inline unsigned int
-expevt_to_fault_code(unsigned long expevt)
-{
-	if (expevt == 0xa40)
-		return FAULT_CODE_ITLB;
-	else if (expevt == 0x060)
-		return FAULT_CODE_WRITE;
-
-	return 0;
-}
-
-/*
-   This routine handles page faults that can be serviced just by refilling a
-   TLB entry from an existing page table entry.  (This case represents a very
-   large majority of page faults.) Return 1 if the fault was successfully
-   handled.  Return 0 if the fault could not be handled.  (This leads into the
-   general fault handling in fault.c which deals with mapping file-backed
-   pages, stack growth, segmentation faults, swapping etc etc)
- */
-asmlinkage int __kprobes
-do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
-		   unsigned long address)
-{
-	unsigned long long protection_flags;
-	unsigned long long index;
-	unsigned long long expevt4;
-	unsigned int fault_code;
-
-	/* The next few lines implement a way of hashing EXPEVT into a
-	 * small array index which can be used to lookup parameters
-	 * specific to the type of TLBMISS being handled.
-	 *
-	 * Note:
-	 *	ITLBMISS has EXPEVT==0xa40
-	 *	RTLBMISS has EXPEVT==0x040
-	 *	WTLBMISS has EXPEVT==0x060
-	 */
-	expevt4 = (expevt >> 4);
-	/* TODO : xor ssr_md into this expression too. Then we can check
-	 * that PRU is set when it needs to be. */
-	index = expevt4 ^ (expevt4 >> 5);
-	index &= 7;
-
-	fault_code = expevt_to_fault_code(expevt);
-
-	protection_flags = expevt_lookup_table.protection_flags[index];
-
-	if (expevt_lookup_table.is_text_access[index])
-		fault_code |= FAULT_CODE_ITLB;
-	if (!ssr_md)
-		fault_code |= FAULT_CODE_USER;
-
-	set_thread_fault_code(fault_code);
-
-	return handle_tlbmiss(protection_flags, address);
-}
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
deleted file mode 100644
index bd0715d..0000000
--- a/arch/sh/mm/tlbflush_64.c
+++ /dev/null
@@ -1,172 +0,0 @@
-/*
- * arch/sh/mm/tlb-flush_64.c
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Richard Curnow (/proc/tlb, bug fixes)
- * Copyright (C) 2003 - 2012 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/signal.h>
-#include <linux/rwsem.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/perf_event.h>
-#include <linux/interrupt.h>
-#include <asm/io.h>
-#include <asm/tlb.h>
-#include <linux/uaccess.h>
-#include <asm/pgalloc.h>
-#include <asm/mmu_context.h>
-
-void local_flush_tlb_one(unsigned long asid, unsigned long page)
-{
-	unsigned long long match, pteh=0, lpage;
-	unsigned long tlb;
-
-	/*
-	 * Sign-extend based on neff.
-	 */
-	lpage = neff_sign_extend(page);
-	match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
-	match |= lpage;
-
-	for_each_itlb_entry(tlb) {
-		asm volatile ("getcfg	%1, 0, %0"
-			      : "=r" (pteh)
-			      : "r" (tlb) );
-
-		if (pteh == match) {
-			__flush_tlb_slot(tlb);
-			break;
-		}
-	}
-
-	for_each_dtlb_entry(tlb) {
-		asm volatile ("getcfg	%1, 0, %0"
-			      : "=r" (pteh)
-			      : "r" (tlb) );
-
-		if (pteh == match) {
-			__flush_tlb_slot(tlb);
-			break;
-		}
-
-	}
-}
-
-void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
-{
-	unsigned long flags;
-
-	if (vma->vm_mm) {
-		page &= PAGE_MASK;
-		local_irq_save(flags);
-		local_flush_tlb_one(get_asid(), page);
-		local_irq_restore(flags);
-	}
-}
-
-void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
-			   unsigned long end)
-{
-	unsigned long flags;
-	unsigned long long match, pteh=0, pteh_epn, pteh_low;
-	unsigned long tlb;
-	unsigned int cpu = smp_processor_id();
-	struct mm_struct *mm;
-
-	mm = vma->vm_mm;
-	if (cpu_context(cpu, mm) == NO_CONTEXT)
-		return;
-
-	local_irq_save(flags);
-
-	start &= PAGE_MASK;
-	end &= PAGE_MASK;
-
-	match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
-
-	/* Flush ITLB */
-	for_each_itlb_entry(tlb) {
-		asm volatile ("getcfg	%1, 0, %0"
-			      : "=r" (pteh)
-			      : "r" (tlb) );
-
-		pteh_epn = pteh & PAGE_MASK;
-		pteh_low = pteh & ~PAGE_MASK;
-
-		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
-			__flush_tlb_slot(tlb);
-	}
-
-	/* Flush DTLB */
-	for_each_dtlb_entry(tlb) {
-		asm volatile ("getcfg	%1, 0, %0"
-			      : "=r" (pteh)
-			      : "r" (tlb) );
-
-		pteh_epn = pteh & PAGE_MASK;
-		pteh_low = pteh & ~PAGE_MASK;
-
-		if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
-			__flush_tlb_slot(tlb);
-	}
-
-	local_irq_restore(flags);
-}
-
-void local_flush_tlb_mm(struct mm_struct *mm)
-{
-	unsigned long flags;
-	unsigned int cpu = smp_processor_id();
-
-	if (cpu_context(cpu, mm) == NO_CONTEXT)
-		return;
-
-	local_irq_save(flags);
-
-	cpu_context(cpu, mm) = NO_CONTEXT;
-	if (mm == current->mm)
-		activate_context(mm, cpu);
-
-	local_irq_restore(flags);
-}
-
-void local_flush_tlb_all(void)
-{
-	/* Invalidate all, including shared pages, excluding fixed TLBs */
-	unsigned long flags, tlb;
-
-	local_irq_save(flags);
-
-	/* Flush each ITLB entry */
-	for_each_itlb_entry(tlb)
-		__flush_tlb_slot(tlb);
-
-	/* Flush each DTLB entry */
-	for_each_dtlb_entry(tlb)
-		__flush_tlb_slot(tlb);
-
-	local_irq_restore(flags);
-}
-
-void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
-        /* FIXME: Optimize this later.. */
-        flush_tlb_all();
-}
-
-void __flush_tlb_global(void)
-{
-	flush_tlb_all();
-}