Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 51a54df..fc44d9c 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,22 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0
 generated-y += syscall_table.h
-generic-y += compat.h
-generic-y += current.h
-generic-y += delay.h
-generic-y += div64.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
 generic-y += kvm_para.h
-generic-y += local.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
 generic-y += parport.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += serial.h
-generic-y += trace_clock.h
-generic-y += xor.h
diff --git a/arch/sh/include/asm/adc.h b/arch/sh/include/asm/adc.h
index 99ec668..feccfe6 100644
--- a/arch/sh/include/asm/adc.h
+++ b/arch/sh/include/asm/adc.h
@@ -1,7 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __ASM_ADC_H
 #define __ASM_ADC_H
-#ifdef __KERNEL__
 /*
  * Copyright (C) 2004  Andriy Skulysh
  */
@@ -10,5 +9,4 @@
 
 int adc_single(unsigned int channel);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_ADC_H */
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 34bfbcd..468fba3 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -7,8 +7,6 @@
 #ifndef __ASM_SH_ADDRSPACE_H
 #define __ASM_SH_ADDRSPACE_H
 
-#ifdef __KERNEL__
-
 #include <cpu/addrspace.h>
 
 /* If this CPU supports segmentation, hook up the helpers */
@@ -62,5 +60,4 @@
 #define P3_ADDR_MAX		P4SEG
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_ADDRSPACE_H */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index f37b95a..7c2a8a7 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -19,8 +19,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i)	{ (i) }
-
 #define atomic_read(v)		READ_ONCE((v)->counter)
 #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
 
diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h
index 66faae1..0d58a01 100644
--- a/arch/sh/include/asm/barrier.h
+++ b/arch/sh/include/asm/barrier.h
@@ -6,7 +6,7 @@
 #ifndef __ASM_SH_BARRIER_H
 #define __ASM_SH_BARRIER_H
 
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
 #include <asm/cache_insns.h>
 #endif
 
@@ -24,7 +24,7 @@
  * Historically we have only done this type of barrier for the MMUCR, but
  * it's also necessary for the CCR, so we make it generic here instead.
  */
-#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
+#if defined(CONFIG_CPU_SH4A)
 #define mb()		__asm__ __volatile__ ("synco": : :"memory")
 #define rmb()		mb()
 #define wmb()		mb()
diff --git a/arch/sh/include/asm/bitops-op32.h b/arch/sh/include/asm/bitops-op32.h
index 4668803..cfe5465 100644
--- a/arch/sh/include/asm/bitops-op32.h
+++ b/arch/sh/include/asm/bitops-op32.h
@@ -16,11 +16,9 @@
 #define BYTE_OFFSET(nr)		((nr) % BITS_PER_BYTE)
 #endif
 
-#define IS_IMMEDIATE(nr)	(__builtin_constant_p(nr))
-
 static inline void __set_bit(int nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
+	if (__builtin_constant_p(nr)) {
 		__asm__ __volatile__ (
 			"bset.b %1, @(%O2,%0)		! __set_bit\n\t"
 			: "+r" (addr)
@@ -37,7 +35,7 @@
 
 static inline void __clear_bit(int nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
+	if (__builtin_constant_p(nr)) {
 		__asm__ __volatile__ (
 			"bclr.b %1, @(%O2,%0)		! __clear_bit\n\t"
 			: "+r" (addr)
@@ -64,7 +62,7 @@
  */
 static inline void __change_bit(int nr, volatile unsigned long *addr)
 {
-	if (IS_IMMEDIATE(nr)) {
+	if (__builtin_constant_p(nr)) {
 		__asm__ __volatile__ (
 			"bxor.b %1, @(%O2,%0)		! __change_bit\n\t"
 			: "+r" (addr)
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 8c35782..450b585 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_BITOPS_H
 #define __ASM_SH_BITOPS_H
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -26,7 +24,6 @@
 #include <asm-generic/bitops/non-atomic.h>
 #endif
 
-#ifdef CONFIG_SUPERH32
 static inline unsigned long ffz(unsigned long word)
 {
 	unsigned long result;
@@ -60,31 +57,6 @@
 		: "t");
 	return result;
 }
-#else
-static inline unsigned long ffz(unsigned long word)
-{
-	unsigned long result, __d2, __d3;
-
-        __asm__("gettr  tr0, %2\n\t"
-                "pta    $+32, tr0\n\t"
-                "andi   %1, 1, %3\n\t"
-                "beq    %3, r63, tr0\n\t"
-                "pta    $+4, tr0\n"
-                "0:\n\t"
-                "shlri.l        %1, 1, %1\n\t"
-                "addi   %0, 1, %0\n\t"
-                "andi   %1, 1, %3\n\t"
-                "beqi   %3, 1, tr0\n"
-                "1:\n\t"
-                "ptabs  %2, tr0\n\t"
-                : "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
-                : "0" (0L), "1" (word));
-
-	return result;
-}
-
-#include <asm-generic/bitops/__ffs.h>
-#endif
 
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/ffs.h>
@@ -97,6 +69,4 @@
 #include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_BITOPS_H */
diff --git a/arch/sh/include/asm/bl_bit.h b/arch/sh/include/asm/bl_bit.h
index 7e3d816..5d04f2c 100644
--- a/arch/sh/include/asm/bl_bit.h
+++ b/arch/sh/include/asm/bl_bit.h
@@ -1,11 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_BL_BIT_H
-#define __ASM_SH_BL_BIT_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/bl_bit_32.h>
-#else
-# include <asm/bl_bit_64.h>
-#endif
-
-#endif /* __ASM_SH_BL_BIT_H */
+#include <asm/bl_bit_32.h>
diff --git a/arch/sh/include/asm/bl_bit_64.h b/arch/sh/include/asm/bl_bit_64.h
deleted file mode 100644
index aac9780..0000000
--- a/arch/sh/include/asm/bl_bit_64.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- */
-#ifndef __ASM_SH_BL_BIT_64_H
-#define __ASM_SH_BL_BIT_64_H
-
-#include <asm/processor.h>
-
-#define SR_BL_LL	0x0000000010000000LL
-
-static inline void set_bl_bit(void)
-{
-	unsigned long long __dummy0, __dummy1 = SR_BL_LL;
-
-	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
-			     "or	%0, %1, %0\n\t"
-			     "putcon	%0, " __SR "\n\t"
-			     : "=&r" (__dummy0)
-			     : "r" (__dummy1));
-
-}
-
-static inline void clear_bl_bit(void)
-{
-	unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
-
-	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
-			     "and	%0, %1, %0\n\t"
-			     "putcon	%0, " __SR "\n\t"
-			     : "=&r" (__dummy0)
-			     : "r" (__dummy1));
-}
-
-#endif /* __ASM_SH_BL_BIT_64_H */
diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h
index 030df56..fe52abb 100644
--- a/arch/sh/include/asm/bugs.h
+++ b/arch/sh/include/asm/bugs.h
@@ -53,10 +53,6 @@
 		*p++ = 's';
 		*p++ = 'p';
 		break;
-	case CPU_FAMILY_SH5:
-		*p++ = '6';
-		*p++ = '4';
-		break;
 	case CPU_FAMILY_UNKNOWN:
 		/*
 		 * Specifically use CPU_FAMILY_UNKNOWN rather than
diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
index 2408ac4..32dfa6b 100644
--- a/arch/sh/include/asm/cache.h
+++ b/arch/sh/include/asm/cache.h
@@ -8,14 +8,13 @@
  */
 #ifndef __ASM_SH_CACHE_H
 #define __ASM_SH_CACHE_H
-#ifdef __KERNEL__
 
 #include <linux/init.h>
 #include <cpu/cache.h>
 
 #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
 
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(".data..read_mostly")
 
 #ifndef __ASSEMBLY__
 struct cache_info {
@@ -44,5 +43,4 @@
 	unsigned long flags;
 };
 #endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHE_H */
diff --git a/arch/sh/include/asm/cache_insns.h b/arch/sh/include/asm/cache_insns.h
index c5a4acd..d7edd52 100644
--- a/arch/sh/include/asm/cache_insns.h
+++ b/arch/sh/include/asm/cache_insns.h
@@ -1,12 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_CACHE_INSNS_H
-#define __ASM_SH_CACHE_INSNS_H
-
-
-#ifdef CONFIG_SUPERH32
-# include <asm/cache_insns_32.h>
-#else
-# include <asm/cache_insns_64.h>
-#endif
-
-#endif /* __ASM_SH_CACHE_INSNS_H */
+#include <asm/cache_insns_32.h>
diff --git a/arch/sh/include/asm/cache_insns_64.h b/arch/sh/include/asm/cache_insns_64.h
deleted file mode 100644
index ed682b9..0000000
--- a/arch/sh/include/asm/cache_insns_64.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- */
-#ifndef __ASM_SH_CACHE_INSNS_64_H
-#define __ASM_SH_CACHE_INSNS_64_H
-
-#define __icbi(addr)	__asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
-#define __ocbp(addr)	__asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
-#define __ocbi(addr)	__asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
-#define __ocbwb(addr)	__asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
-
-static inline reg_size_t register_align(void *val)
-{
-	return (unsigned long long)(signed long long)(signed long)val;
-}
-
-#endif /* __ASM_SH_CACHE_INSNS_64_H */
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index b932e42..4486a86 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_CACHEFLUSH_H
 #define __ASM_SH_CACHEFLUSH_H
 
-#ifdef __KERNEL__
-
 #include <linux/mm.h>
 
 /*
@@ -46,6 +44,7 @@
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 extern void flush_dcache_page(struct page *page);
 extern void flush_icache_range(unsigned long start, unsigned long end);
+#define flush_icache_user_range flush_icache_range
 extern void flush_icache_page(struct vm_area_struct *vma,
 				 struct page *page);
 extern void flush_cache_sigtramp(unsigned long address);
@@ -108,5 +107,4 @@
 	return vaddr;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHEFLUSH_H */
diff --git a/arch/sh/include/asm/checksum.h b/arch/sh/include/asm/checksum.h
index a460a10..00e39dd 100644
--- a/arch/sh/include/asm/checksum.h
+++ b/arch/sh/include/asm/checksum.h
@@ -1,6 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/checksum_32.h>
-#else
-# include <asm-generic/checksum.h>
-#endif
+#include <asm/checksum_32.h>
diff --git a/arch/sh/include/asm/checksum_32.h b/arch/sh/include/asm/checksum_32.h
index 36b84cf..1a391e3 100644
--- a/arch/sh/include/asm/checksum_32.h
+++ b/arch/sh/include/asm/checksum_32.h
@@ -30,10 +30,9 @@
  * better 64-bit) boundary
  */
 
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
-					    int len, __wsum sum,
-					    int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
 
+#define _HAVE_ARCH_CSUM_AND_COPY
 /*
  *	Note: when you get a NULL pointer exception here this means someone
  *	passed in an incorrect kernel address to one of these functions.
@@ -42,18 +41,18 @@
  *	access_ok().
  */
 static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
-				 int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
 {
-	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+	return csum_partial_copy_generic(src, dst, len);
 }
 
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
 static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-				   int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len)
 {
-	return csum_partial_copy_generic((__force const void *)src, dst,
-					len, sum, err_ptr, NULL);
+	if (!access_ok(src, len))
+		return 0;
+	return csum_partial_copy_generic((__force const void *)src, dst, len);
 }
 
 /*
@@ -194,16 +193,10 @@
 #define HAVE_CSUM_COPY_USER
 static inline __wsum csum_and_copy_to_user(const void *src,
 					   void __user *dst,
-					   int len, __wsum sum,
-					   int *err_ptr)
+					   int len)
 {
-	if (access_ok(dst, len))
-		return csum_partial_copy_generic((__force const void *)src,
-						dst, len, sum, NULL, err_ptr);
-
-	if (len)
-		*err_ptr = -EFAULT;
-
-	return (__force __wsum)-1; /* invalid checksum */
+	if (!access_ok(dst, len))
+		return 0;
+	return csum_partial_copy_generic((__force const void *)src, dst, len);
 }
 #endif /* __ASM_SH_CHECKSUM_H */
diff --git a/arch/sh/include/asm/dma.h b/arch/sh/include/asm/dma.h
index 4d5a21a..17d23ae 100644
--- a/arch/sh/include/asm/dma.h
+++ b/arch/sh/include/asm/dma.h
@@ -6,7 +6,6 @@
  */
 #ifndef __ASM_SH_DMA_H
 #define __ASM_SH_DMA_H
-#ifdef __KERNEL__
 
 #include <linux/spinlock.h>
 #include <linux/wait.h>
@@ -144,5 +143,4 @@
 #define isa_dma_bridge_buggy	(0)
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_DMA_H */
diff --git a/arch/sh/include/asm/elf.h b/arch/sh/include/asm/elf.h
index 5ec8db1..2862d6d 100644
--- a/arch/sh/include/asm/elf.h
+++ b/arch/sh/include/asm/elf.h
@@ -90,7 +90,6 @@
 #endif
 #define ELF_ARCH	EM_SH
 
-#ifdef __KERNEL__
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
@@ -133,28 +132,6 @@
 
 #define ELF_PLATFORM	(utsname()->machine)
 
-#ifdef __SH5__
-#define ELF_PLAT_INIT(_r, load_addr) \
-  do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
-       _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
-       _r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
-       _r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
-       _r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
-       _r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
-       _r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
-       _r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
-       _r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
-       _r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
-       _r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
-       _r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
-       _r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
-       _r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
-       _r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
-       _r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
-       _r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
-       _r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
-       _r->sr = SR_FD | SR_MMU; } while (0)
-#else
 #define ELF_PLAT_INIT(_r, load_addr) \
   do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
        _r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
@@ -182,7 +159,6 @@
 	_r->regs[14]	= 0;						\
 	_r->sr		= SR_FD;					\
 } while (0)
-#endif
 
 #define SET_PERSONALITY(ex) \
 	set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
@@ -232,5 +208,4 @@
 	NEW_AUX_ENT(AT_L2_CACHESHAPE, l2_cache_shape);		\
 } while (0)
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_ELF_H */
diff --git a/arch/sh/include/asm/extable.h b/arch/sh/include/asm/extable.h
index ed46f8b..5658d2b 100644
--- a/arch/sh/include/asm/extable.h
+++ b/arch/sh/include/asm/extable.h
@@ -4,8 +4,4 @@
 
 #include <asm-generic/extable.h>
 
-#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
-#define ARCH_HAS_SEARCH_EXTABLE
-#endif
-
 #endif
diff --git a/arch/sh/include/asm/fixmap.h b/arch/sh/include/asm/fixmap.h
index e30348c..f38adc1 100644
--- a/arch/sh/include/asm/fixmap.h
+++ b/arch/sh/include/asm/fixmap.h
@@ -83,11 +83,7 @@
  * the start of the fixmap, and leave one page empty
  * at the top of mem..
  */
-#ifdef CONFIG_SUPERH32
 #define FIXADDR_TOP	(P4SEG - PAGE_SIZE)
-#else
-#define FIXADDR_TOP	((unsigned long)(-PAGE_SIZE))
-#endif
 #define FIXADDR_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
 #define FIXADDR_START	(FIXADDR_TOP - FIXADDR_SIZE)
 
diff --git a/arch/sh/include/asm/fpu.h b/arch/sh/include/asm/fpu.h
index 43cfaf9..04584be 100644
--- a/arch/sh/include/asm/fpu.h
+++ b/arch/sh/include/asm/fpu.h
@@ -37,11 +37,6 @@
 extern int do_fpu_inst(unsigned short, struct pt_regs *);
 extern int init_fpu(struct task_struct *);
 
-extern int fpregs_get(struct task_struct *target,
-		      const struct user_regset *regset,
-		      unsigned int pos, unsigned int count,
-		      void *kbuf, void __user *ubuf);
-
 static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
 {
 	if (task_thread_info(tsk)->status & TS_USEDFPU) {
diff --git a/arch/sh/include/asm/freq.h b/arch/sh/include/asm/freq.h
index 18133bf..87c2362 100644
--- a/arch/sh/include/asm/freq.h
+++ b/arch/sh/include/asm/freq.h
@@ -6,9 +6,7 @@
  */
 #ifndef __ASM_SH_FREQ_H
 #define __ASM_SH_FREQ_H
-#ifdef __KERNEL__
 
 #include <cpu/freq.h>
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_FREQ_H */
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 3190ec8..b70f3fc 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_FUTEX_H
 #define __ASM_SH_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <asm/errno.h>
@@ -34,8 +32,6 @@
 	u32 oldval, newval, prev;
 	int ret;
 
-	pagefault_disable();
-
 	do {
 		ret = get_user(oldval, uaddr);
 
@@ -67,13 +63,10 @@
 		ret = futex_atomic_cmpxchg_inatomic(&prev, uaddr, oldval, newval);
 	} while (!ret && prev != oldval);
 
-	pagefault_enable();
-
 	if (!ret)
 		*oval = oldval;
 
 	return ret;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_FUTEX_H */
diff --git a/arch/sh/include/asm/hugetlb.h b/arch/sh/include/asm/hugetlb.h
index 6f025fe..ae4de7b 100644
--- a/arch/sh/include/asm/hugetlb.h
+++ b/arch/sh/include/asm/hugetlb.h
@@ -5,12 +5,6 @@
 #include <asm/cacheflush.h>
 #include <asm/page.h>
 
-static inline int is_hugepage_only_range(struct mm_struct *mm,
-					 unsigned long addr,
-					 unsigned long len) {
-	return 0;
-}
-
 /*
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
@@ -36,6 +30,7 @@
 {
 	clear_bit(PG_dcache_clean, &page->flags);
 }
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
 
 #include <asm-generic/hugetlb.h>
 
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index ac05619..6d5c646 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -17,13 +17,12 @@
 #include <asm/cache.h>
 #include <asm/addrspace.h>
 #include <asm/machvec.h>
-#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <linux/pgtable.h>
 #include <asm-generic/iomap.h>
 
-#ifdef __KERNEL__
 #define __IO_PREFIX     generic
 #include <asm/io_generic.h>
-#include <asm/io_trapped.h>
 #include <asm-generic/pci_iomap.h>
 #include <mach/mangle-port.h>
 
@@ -115,12 +114,8 @@
 __BUILD_MEMORY_STRING(__raw_, b, u8)
 __BUILD_MEMORY_STRING(__raw_, w, u16)
 
-#ifdef CONFIG_SUPERH32
 void __raw_writesl(void __iomem *addr, const void *data, int longlen);
 void __raw_readsl(const void __iomem *addr, void *data, int longlen);
-#else
-__BUILD_MEMORY_STRING(__raw_, l, u32)
-#endif
 
 __BUILD_MEMORY_STRING(__raw_, q, u64)
 
@@ -247,134 +242,41 @@
 #define phys_to_virt(address)	(__va(address))
 #endif
 
-/*
- * On 32-bit SH, we traditionally have the whole physical address space
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
- * not need to do anything but place the address in the proper segment.
- * This is true for P1 and P2 addresses, as well as some P3 ones.
- * However, most of the P3 addresses and newer cores using extended
- * addressing need to map through page tables, so the ioremap()
- * implementation becomes a bit more complicated.
- *
- * See arch/sh/mm/ioremap.c for additional notes on this.
- *
- * We cheat a bit and always return uncachable areas until we've fixed
- * the drivers to handle caching properly.
- *
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
- * doesn't exist, so everything must go through page tables.
- */
 #ifdef CONFIG_MMU
+void iounmap(void __iomem *addr);
 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
 			       pgprot_t prot, void *caller);
-void __iounmap(void __iomem *addr);
-
-static inline void __iomem *
-__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
-}
-
-static inline void __iomem *
-__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-#ifdef CONFIG_29BIT
-	phys_addr_t last_addr = offset + size - 1;
-
-	/*
-	 * For P1 and P2 space this is trivial, as everything is already
-	 * mapped. Uncached access for P1 addresses are done through P2.
-	 * In the P3 case or for addresses outside of the 29-bit space,
-	 * mapping must be done by the PMB or by using page tables.
-	 */
-	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
-		u64 flags = pgprot_val(prot);
-
-		/*
-		 * Anything using the legacy PTEA space attributes needs
-		 * to be kicked down to page table mappings.
-		 */
-		if (unlikely(flags & _PAGE_PCC_MASK))
-			return NULL;
-		if (unlikely(flags & _PAGE_CACHABLE))
-			return (void __iomem *)P1SEGADDR(offset);
-
-		return (void __iomem *)P2SEGADDR(offset);
-	}
-
-	/* P4 above the store queues are always mapped. */
-	if (unlikely(offset >= P3_ADDR_MAX))
-		return (void __iomem *)P4SEGADDR(offset);
-#endif
-
-	return NULL;
-}
-
-static inline void __iomem *
-__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
-{
-	void __iomem *ret;
-
-	ret = __ioremap_trapped(offset, size);
-	if (ret)
-		return ret;
-
-	ret = __ioremap_29bit(offset, size, prot);
-	if (ret)
-		return ret;
-
-	return __ioremap(offset, size, prot);
-}
-#else
-#define __ioremap(offset, size, prot)		((void __iomem *)(offset))
-#define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
-#define __iounmap(addr)				do { } while (0)
-#endif /* CONFIG_MMU */
 
 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
+	return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
+			__builtin_return_address(0));
 }
 
 static inline void __iomem *
 ioremap_cache(phys_addr_t offset, unsigned long size)
 {
-	return __ioremap_mode(offset, size, PAGE_KERNEL);
+	return __ioremap_caller(offset, size, PAGE_KERNEL,
+			__builtin_return_address(0));
 }
 #define ioremap_cache ioremap_cache
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static inline void __iomem *
-ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
+static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
+		unsigned long flags)
 {
-	return __ioremap_mode(offset, size, __pgprot(flags));
+	return __ioremap_caller(offset, size, __pgprot(flags),
+			__builtin_return_address(0));
 }
-#endif
+#endif /* CONFIG_HAVE_IOREMAP_PROT */
 
-#ifdef CONFIG_IOREMAP_FIXED
-extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
-extern int iounmap_fixed(void __iomem *);
-extern void ioremap_fixed_init(void);
-#else
-static inline void __iomem *
-ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
-{
-	BUG();
-	return NULL;
-}
+#else /* CONFIG_MMU */
+#define iounmap(addr)		do { } while (0)
+#define ioremap(offset, size)	((void __iomem *)(unsigned long)(offset))
+#endif /* CONFIG_MMU */
 
-static inline void ioremap_fixed_init(void) { }
-static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
-#endif
-
-#define ioremap_nocache	ioremap
 #define ioremap_uc	ioremap
 
-static inline void iounmap(void __iomem *addr)
-{
-	__iounmap(addr);
-}
-
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
  * access
@@ -390,6 +292,4 @@
 int valid_phys_addr_range(phys_addr_t addr, size_t size);
 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_IO_H */
diff --git a/arch/sh/include/asm/io_noioport.h b/arch/sh/include/asm/io_noioport.h
index 90d6109..f7938fe 100644
--- a/arch/sh/include/asm/io_noioport.h
+++ b/arch/sh/include/asm/io_noioport.h
@@ -53,12 +53,34 @@
 #define outw_p(x, addr)	outw((x), (addr))
 #define outl_p(x, addr)	outl((x), (addr))
 
-#define insb(a, b, c)	BUG()
-#define insw(a, b, c)	BUG()
-#define insl(a, b, c)	BUG()
+static inline void insb(unsigned long port, void *dst, unsigned long count)
+{
+	BUG();
+}
 
-#define outsb(a, b, c)	BUG()
-#define outsw(a, b, c)	BUG()
-#define outsl(a, b, c)	BUG()
+static inline void insw(unsigned long port, void *dst, unsigned long count)
+{
+	BUG();
+}
+
+static inline void insl(unsigned long port, void *dst, unsigned long count)
+{
+	BUG();
+}
+
+static inline void outsb(unsigned long port, const void *src, unsigned long count)
+{
+	BUG();
+}
+
+static inline void outsw(unsigned long port, const void *src, unsigned long count)
+{
+	BUG();
+}
+
+static inline void outsl(unsigned long port, const void *src, unsigned long count)
+{
+	BUG();
+}
 
 #endif /* __ASM_SH_IO_NOIOPORT_H */
diff --git a/arch/sh/include/asm/irq.h b/arch/sh/include/asm/irq.h
index 8065a32..6d44c32 100644
--- a/arch/sh/include/asm/irq.h
+++ b/arch/sh/include/asm/irq.h
@@ -66,8 +66,5 @@
 #endif
 
 #include <asm-generic/irq.h>
-#ifdef CONFIG_CPU_SH5
-#include <cpu/irq.h>
-#endif
 
 #endif /* __ASM_SH_IRQ_H */
diff --git a/arch/sh/include/asm/kdebug.h b/arch/sh/include/asm/kdebug.h
index 5212f5f..de8693f 100644
--- a/arch/sh/include/asm/kdebug.h
+++ b/arch/sh/include/asm/kdebug.h
@@ -13,6 +13,7 @@
 
 /* arch/sh/kernel/dumpstack.c */
 extern void printk_address(unsigned long address, int reliable);
-extern void dump_mem(const char *str, unsigned long bottom, unsigned long top);
+extern void dump_mem(const char *str, const char *loglvl,
+		     unsigned long bottom, unsigned long top);
 
 #endif /* __ASM_SH_KDEBUG_H */
diff --git a/arch/sh/include/asm/machvec.h b/arch/sh/include/asm/machvec.h
index f7d0554..2b4b085 100644
--- a/arch/sh/include/asm/machvec.h
+++ b/arch/sh/include/asm/machvec.h
@@ -36,6 +36,6 @@
 #define get_system_type()	sh_mv.mv_name
 
 #define __initmv \
-	__used __section(.machvec.init)
+	__used __section(".machvec.init")
 
 #endif /* _ASM_SH_MACHVEC_H */
diff --git a/arch/sh/include/asm/mmu_context.h b/arch/sh/include/asm/mmu_context.h
index 2d09650..f664e51 100644
--- a/arch/sh/include/asm/mmu_context.h
+++ b/arch/sh/include/asm/mmu_context.h
@@ -8,7 +8,6 @@
 #ifndef __ASM_SH_MMU_CONTEXT_H
 #define __ASM_SH_MMU_CONTEXT_H
 
-#ifdef __KERNEL__
 #include <cpu/mmu_context.h>
 #include <asm/tlbflush.h>
 #include <linux/uaccess.h>
@@ -48,11 +47,7 @@
  */
 #define MMU_VPN_MASK	0xfffff000
 
-#if defined(CONFIG_SUPERH32)
 #include <asm/mmu_context_32.h>
-#else
-#include <asm/mmu_context_64.h>
-#endif
 
 /*
  * Get MMU context if needed.
@@ -74,14 +69,6 @@
 		 */
 		local_flush_tlb_all();
 
-#ifdef CONFIG_SUPERH64
-		/*
-		 * The SH-5 cache uses the ASIDs, requiring both the I and D
-		 * cache to be flushed when the ASID is exhausted. Weak.
-		 */
-		flush_cache_all();
-#endif
-
 		/*
 		 * Fix version; Note that we avoid version #0
 		 * to distinguish NO_CONTEXT.
@@ -189,5 +176,4 @@
 #define disable_mmu()	do { } while (0)
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_MMU_CONTEXT_H */
diff --git a/arch/sh/include/asm/mmu_context_64.h b/arch/sh/include/asm/mmu_context_64.h
deleted file mode 100644
index bacafe0..0000000
--- a/arch/sh/include/asm/mmu_context_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_MMU_CONTEXT_64_H
-#define __ASM_SH_MMU_CONTEXT_64_H
-
-/*
- * sh64-specific mmu_context interface.
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003 - 2007  Paul Mundt
- */
-#include <cpu/registers.h>
-#include <asm/cacheflush.h>
-
-#define SR_ASID_MASK		0xffffffffff00ffffULL
-#define SR_ASID_SHIFT		16
-
-/*
- * Destroy context related info for an mm_struct that is about
- * to be put to rest.
- */
-static inline void destroy_context(struct mm_struct *mm)
-{
-	/* Well, at least free TLB entries */
-	flush_tlb_mm(mm);
-}
-
-static inline unsigned long get_asid(void)
-{
-	unsigned long long sr;
-
-	asm volatile ("getcon   " __SR ", %0\n\t"
-		      : "=r" (sr));
-
-	sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
-	return (unsigned long) sr;
-}
-
-/* Set ASID into SR */
-static inline void set_asid(unsigned long asid)
-{
-	unsigned long long sr, pc;
-
-	asm volatile ("getcon	" __SR ", %0" : "=r" (sr));
-
-	sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
-
-	/*
-	 * It is possible that this function may be inlined and so to avoid
-	 * the assembler reporting duplicate symbols we make use of the
-	 * gas trick of generating symbols using numerics and forward
-	 * reference.
-	 */
-	asm volatile ("movi	1, %1\n\t"
-		      "shlli	%1, 28, %1\n\t"
-		      "or	%0, %1, %1\n\t"
-		      "putcon	%1, " __SR "\n\t"
-		      "putcon	%0, " __SSR "\n\t"
-		      "movi	1f, %1\n\t"
-		      "ori	%1, 1 , %1\n\t"
-		      "putcon	%1, " __SPC "\n\t"
-		      "rte\n"
-		      "1:\n\t"
-		      : "=r" (sr), "=r" (pc) : "0" (sr));
-}
-
-/* arch/sh/kernel/cpu/sh5/entry.S */
-extern unsigned long switch_and_save_asid(unsigned long new_asid);
-
-/* No spare register to twiddle, so use a software cache */
-extern pgd_t *mmu_pdtp_cache;
-
-#define set_TTB(pgd)	(mmu_pdtp_cache = (pgd))
-#define get_TTB()	(mmu_pdtp_cache)
-
-#endif /* __ASM_SH_MMU_CONTEXT_64_H */
diff --git a/arch/sh/include/asm/mmzone.h b/arch/sh/include/asm/mmzone.h
index cbaee1d..6552a08 100644
--- a/arch/sh/include/asm/mmzone.h
+++ b/arch/sh/include/asm/mmzone.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_MMZONE_H
 #define __ASM_SH_MMZONE_H
 
-#ifdef __KERNEL__
-
 #ifdef CONFIG_NEED_MULTIPLE_NODES
 #include <linux/numa.h>
 
@@ -44,5 +42,4 @@
 /* arch/sh/mm/init.c */
 void __init allocate_pgdat(unsigned int nid);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_MMZONE_H */
diff --git a/arch/sh/include/asm/module.h b/arch/sh/include/asm/module.h
index 9f38fb3..337663a 100644
--- a/arch/sh/include/asm/module.h
+++ b/arch/sh/include/asm/module.h
@@ -11,32 +11,4 @@
 };
 #endif
 
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-# ifdef CONFIG_CPU_SH2
-#  define MODULE_PROC_FAMILY "SH2LE "
-# elif defined  CONFIG_CPU_SH3
-#  define MODULE_PROC_FAMILY "SH3LE "
-# elif defined  CONFIG_CPU_SH4
-#  define MODULE_PROC_FAMILY "SH4LE "
-# elif defined  CONFIG_CPU_SH5
-#  define MODULE_PROC_FAMILY "SH5LE "
-# else
-#  error unknown processor family
-# endif
-#else
-# ifdef CONFIG_CPU_SH2
-#  define MODULE_PROC_FAMILY "SH2BE "
-# elif defined  CONFIG_CPU_SH3
-#  define MODULE_PROC_FAMILY "SH3BE "
-# elif defined  CONFIG_CPU_SH4
-#  define MODULE_PROC_FAMILY "SH4BE "
-# elif defined  CONFIG_CPU_SH5
-#  define MODULE_PROC_FAMILY "SH5BE "
-# else
-#  error unknown processor family
-# endif
-#endif
-
-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
-
 #endif /* _ASM_SH_MODULE_H */
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 5eef8be..eca5daa 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -35,8 +35,6 @@
 #define HPAGE_SHIFT	22
 #elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
 #define HPAGE_SHIFT	26
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define HPAGE_SHIFT	29
 #endif
 
 #ifdef CONFIG_HUGETLB_PAGE
@@ -82,18 +80,12 @@
 	((x).pte_low | ((unsigned long long)(x).pte_high << 32))
 #define __pte(x) \
 	({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
-#elif defined(CONFIG_SUPERH32)
+#else
 typedef struct { unsigned long pte_low; } pte_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
 typedef struct { unsigned long pgd; } pgd_t;
 #define pte_val(x)	((x).pte_low)
 #define __pte(x)	((pte_t) { (x) } )
-#else
-typedef struct { unsigned long long pte_low; } pte_t;
-typedef struct { unsigned long long pgprot; } pgprot_t;
-typedef struct { unsigned long pgd; } pgd_t;
-#define pte_val(x)	((x).pte_low)
-#define __pte(x)	((pte_t) { (x) } )
 #endif
 
 #define pgd_val(x)	((x).pgd)
@@ -182,9 +174,6 @@
 #endif
 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
 
-#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
-				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
 #include <asm-generic/memory_model.h>
 #include <asm-generic/getorder.h>
 
@@ -194,15 +183,4 @@
  */
 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
 
-#ifdef CONFIG_SUPERH64
-/*
- * While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
- * happily generate {ld/st}.q pairs, requiring us to have 8-byte
- * alignment to avoid traps. The kmalloc alignment is guaranteed by
- * virtue of L1_CACHE_BYTES, requiring this to only be special cased
- * for slab caches.
- */
-#define ARCH_SLAB_MINALIGN	8
-#endif
-
 #endif /* __ASM_SH_PAGE_H */
diff --git a/arch/sh/include/asm/pci.h b/arch/sh/include/asm/pci.h
index 10a36b1..ad22e88 100644
--- a/arch/sh/include/asm/pci.h
+++ b/arch/sh/include/asm/pci.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_PCI_H
 #define __ASM_SH_PCI_H
 
-#ifdef __KERNEL__
-
 /* Can be used to override the logic in pci_scan_bus for skipping
    already-configured bus numbers - to be used for buggy BIOSes
    or architectures with incomplete PCI setup by the loader */
@@ -96,6 +94,4 @@
 	return channel ? 15 : 14;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_PCI_H */
-
diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h
index d770da3..0e6b0be 100644
--- a/arch/sh/include/asm/pgalloc.h
+++ b/arch/sh/include/asm/pgalloc.h
@@ -3,6 +3,10 @@
 #define __ASM_SH_PGALLOC_H
 
 #include <asm/page.h>
+
+#define __HAVE_ARCH_PMD_ALLOC_ONE
+#define __HAVE_ARCH_PMD_FREE
+#define __HAVE_ARCH_PGD_FREE
 #include <asm-generic/pgalloc.h>
 
 extern pgd_t *pgd_alloc(struct mm_struct *);
diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h
index bf1eb51..08bff93 100644
--- a/arch/sh/include/asm/pgtable-2level.h
+++ b/arch/sh/include/asm/pgtable-2level.h
@@ -2,7 +2,6 @@
 #ifndef __ASM_SH_PGTABLE_2LEVEL_H
 #define __ASM_SH_PGTABLE_2LEVEL_H
 
-#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopmd.h>
 
 /*
diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h
index 779260b..82d7447 100644
--- a/arch/sh/include/asm/pgtable-3level.h
+++ b/arch/sh/include/asm/pgtable-3level.h
@@ -2,7 +2,6 @@
 #ifndef __ASM_SH_PGTABLE_3LEVEL_H
 #define __ASM_SH_PGTABLE_3LEVEL_H
 
-#define __ARCH_USE_5LEVEL_HACK
 #include <asm-generic/pgtable-nopud.h>
 
 /*
@@ -40,13 +39,6 @@
 
 /* only used by the stubbed out hugetlb gup code, should never be called */
 #define pud_page(pud)		NULL
-
-#define pmd_index(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
-{
-	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
-}
-
 #define pud_none(x)	(!pud_val(x))
 #define pud_present(x)	(pud_val(x))
 #define pud_clear(xp)	do { set_pud(xp, __pud(0)); } while (0)
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index cbd0f3c..27751e9 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -76,18 +76,10 @@
 #define PTE_PHYS_MASK		(phys_addr_mask() & PAGE_MASK)
 #define PTE_FLAGS_MASK		(~(PTE_PHYS_MASK) << PAGE_SHIFT)
 
-#ifdef CONFIG_SUPERH32
 #define VMALLOC_START	(P3SEG)
-#else
-#define VMALLOC_START	(0xf0000000)
-#endif
 #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
 
-#if defined(CONFIG_SUPERH32)
 #include <asm/pgtable_32.h>
-#else
-#include <asm/pgtable_64.h>
-#endif
 
 /*
  * SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
@@ -159,15 +151,6 @@
 		prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
 	return __pte_access_permitted(pte, prot);
 }
-#elif defined(CONFIG_SUPERH64)
-static inline bool pte_access_permitted(pte_t pte, bool write)
-{
-	u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
-
-	if (write)
-		prot |= _PAGE_WRITE;
-	return __pte_access_permitted(pte, prot);
-}
 #else
 static inline bool pte_access_permitted(pte_t pte, bool write)
 {
@@ -185,6 +168,4 @@
 #define HAVE_ARCH_UNMAPPED_AREA
 #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 
-#include <asm-generic/pgtable.h>
-
 #endif /* __ASM_SH_PGTABLE_H */
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h
index 29274f0..41be43e 100644
--- a/arch/sh/include/asm/pgtable_32.h
+++ b/arch/sh/include/asm/pgtable_32.h
@@ -401,29 +401,13 @@
 	return pte;
 }
 
-#define pmd_page_vaddr(pmd)	((unsigned long)pmd_val(pmd))
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+	return (unsigned long)pmd_val(pmd);
+}
+
 #define pmd_page(pmd)		(virt_to_page(pmd_val(pmd)))
 
-/* to find an entry in a page-table-directory. */
-#define pgd_index(address)	(((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
-#define __pgd_offset(address)	pgd_index(address)
-
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
-
-#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address)	((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define __pte_offset(address)	pte_index(address)
-
-#define pte_offset_kernel(dir, address) \
-	((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address)		pte_offset_kernel(dir, address)
-#define pte_unmap(pte)		do { } while (0)
-
 #ifdef CONFIG_X2TLB
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
deleted file mode 100644
index 1778bc5..0000000
--- a/arch/sh/include/asm/pgtable_64.h
+++ /dev/null
@@ -1,307 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PGTABLE_64_H
-#define __ASM_SH_PGTABLE_64_H
-
-/*
- * include/asm-sh/pgtable_64.h
- *
- * This file contains the functions and defines necessary to modify and use
- * the SuperH page table tree.
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003, 2004  Paul Mundt
- * Copyright (C) 2003, 2004  Richard Curnow
- */
-#include <linux/threads.h>
-#include <asm/processor.h>
-#include <asm/page.h>
-
-/*
- * Error outputs.
- */
-#define pte_ERROR(e) \
-	printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pgd_ERROR(e) \
-	printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
-
-/*
- * Table setting routines. Used within arch/mm only.
- */
-#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
-
-static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
-{
-	unsigned long long x = ((unsigned long long) pteval.pte_low);
-	unsigned long long *xp = (unsigned long long *) pteptr;
-	/*
-	 * Sign-extend based on NPHYS.
-	 */
-	*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
-}
-#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
-
-/*
- * PGD defines. Top level.
- */
-
-/* To find an entry in a generic PGD. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define __pgd_offset(address) pgd_index(address)
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
-
-/* To find an entry in a kernel PGD. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-#define __pud_offset(address)	(((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
-#define __pmd_offset(address)	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
-
-/*
- * PMD level access routines. Same notes as above.
- */
-#define _PMD_EMPTY		0x0
-/* Either the PMD is empty or present, it's not paged out */
-#define pmd_present(pmd_entry)	(pmd_val(pmd_entry) & _PAGE_PRESENT)
-#define pmd_clear(pmd_entry_p)	(set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
-#define pmd_none(pmd_entry)	(pmd_val((pmd_entry)) == _PMD_EMPTY)
-#define pmd_bad(pmd_entry)	((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
-
-#define pmd_page_vaddr(pmd_entry) \
-	((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
-
-#define pmd_page(pmd) \
-	(virt_to_page(pmd_val(pmd)))
-
-/* PMD to PTE dereferencing */
-#define pte_index(address) \
-		((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
-#define __pte_offset(address)	pte_index(address)
-
-#define pte_offset_kernel(dir, addr) \
-		((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
-
-#define pte_offset_map(dir,addr)	pte_offset_kernel(dir, addr)
-#define pte_unmap(pte)		do { } while (0)
-
-#ifndef __ASSEMBLY__
-/*
- * PTEL coherent flags.
- * See Chapter 17 ST50 CPU Core Volume 1, Architecture.
- */
-/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
-   positions, to avoid expensive bit shuffling on every refill.  The remaining
-   bits are used for s/w purposes and masked out on each refill.
-
-   Note, the PTE slots are used to hold data of type swp_entry_t when a page is
-   swapped out.  Only the _PAGE_PRESENT flag is significant when the page is
-   swapped out, and it must be placed so that it doesn't overlap either the
-   type or offset fields of swp_entry_t.  For x86, offset is at [31:8] and type
-   at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t.  This
-   scheme doesn't map to SH-5 because bit [0] controls cacheability.  So bit
-   [2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
-   into 2 pieces.  That is handled by SWP_ENTRY and SWP_TYPE below. */
-#define _PAGE_WT	0x001  /* CB0: if cacheable, 1->write-thru, 0->write-back */
-#define _PAGE_DEVICE	0x001  /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
-#define _PAGE_CACHABLE	0x002  /* CB1: uncachable/cachable */
-#define _PAGE_PRESENT	0x004  /* software: page referenced */
-#define _PAGE_SIZE0	0x008  /* SZ0-bit : size of page */
-#define _PAGE_SIZE1	0x010  /* SZ1-bit : size of page */
-#define _PAGE_SHARED	0x020  /* software: reflects PTEH's SH */
-#define _PAGE_READ	0x040  /* PR0-bit : read access allowed */
-#define _PAGE_EXECUTE	0x080  /* PR1-bit : execute access allowed */
-#define _PAGE_WRITE	0x100  /* PR2-bit : write access allowed */
-#define _PAGE_USER	0x200  /* PR3-bit : user space access allowed */
-#define _PAGE_DIRTY	0x400  /* software: page accessed in write */
-#define _PAGE_ACCESSED	0x800  /* software: page referenced */
-
-/* Wrapper for extended mode pgprot twiddling */
-#define _PAGE_EXT(x)		((unsigned long long)(x) << 32)
-
-/*
- * We can use the sign-extended bits in the PTEL to get 32 bits of
- * software flags. This works for now because no implementations uses
- * anything above the PPN field.
- */
-#define _PAGE_WIRED	_PAGE_EXT(0x001) /* software: wire the tlb entry */
-#define _PAGE_SPECIAL	_PAGE_EXT(0x002)
-
-#define _PAGE_CLEAR_FLAGS	(_PAGE_PRESENT | _PAGE_SHARED | \
-				 _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
-
-/* Mask which drops software flags */
-#define _PAGE_FLAGS_HARDWARE_MASK	(NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
-
-/*
- * HugeTLB support
- */
-#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
-#define _PAGE_SZHUGE	(_PAGE_SIZE0)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
-#define _PAGE_SZHUGE	(_PAGE_SIZE1)
-#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
-#define _PAGE_SZHUGE	(_PAGE_SIZE0 | _PAGE_SIZE1)
-#endif
-
-/*
- * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
- * to make pte_mkhuge() happy.
- */
-#ifndef _PAGE_SZHUGE
-# define _PAGE_SZHUGE	(0)
-#endif
-
-/*
- * Default flags for a Kernel page.
- * This is fundametally also SHARED because the main use of this define
- * (other than for PGD/PMD entries) is for the VMALLOC pool which is
- * contextless.
- *
- * _PAGE_EXECUTE is required for modules
- *
- */
-#define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
-			 _PAGE_EXECUTE | \
-			 _PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
-			 _PAGE_SHARED)
-
-/* Default flags for a User page */
-#define _PAGE_TABLE	(_KERNPG_TABLE | _PAGE_USER)
-
-#define _PAGE_CHG_MASK	(PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
-			 _PAGE_SPECIAL)
-
-/*
- * We have full permissions (Read/Write/Execute/Shared).
- */
-#define _PAGE_COMMON	(_PAGE_PRESENT | _PAGE_USER | \
-			 _PAGE_CACHABLE | _PAGE_ACCESSED)
-
-#define PAGE_NONE	__pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
-#define PAGE_SHARED	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
-				 _PAGE_SHARED)
-#define PAGE_EXECREAD	__pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
-
-/*
- * We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
- * protection mode for the stack.
- */
-#define PAGE_COPY	PAGE_EXECREAD
-
-#define PAGE_READONLY	__pgprot(_PAGE_COMMON | _PAGE_READ)
-#define PAGE_WRITEONLY	__pgprot(_PAGE_COMMON | _PAGE_WRITE)
-#define PAGE_RWX	__pgprot(_PAGE_COMMON | _PAGE_READ | \
-				 _PAGE_WRITE | _PAGE_EXECUTE)
-#define PAGE_KERNEL	__pgprot(_KERNPG_TABLE)
-
-#define PAGE_KERNEL_NOCACHE \
-			__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
-				 _PAGE_EXECUTE | _PAGE_ACCESSED | \
-				 _PAGE_DIRTY | _PAGE_SHARED)
-
-/* Make it a device mapping for maximum safety (e.g. for mapping device
-   registers into user-space via /dev/map).  */
-#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
-#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
-
-/*
- * PTE level access routines.
- *
- * Note1:
- * It's the tree walk leaf. This is physical address to be stored.
- *
- * Note 2:
- * Regarding the choice of _PTE_EMPTY:
-
-   We must choose a bit pattern that cannot be valid, whether or not the page
-   is present.  bit[2]==1 => present, bit[2]==0 => swapped out.  If swapped
-   out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
-   left for us to select.  If we force bit[7]==0 when swapped out, we could use
-   the combination bit[7,2]=2'b10 to indicate an empty PTE.  Alternatively, if
-   we force bit[7]==1 when swapped out, we can use all zeroes to indicate
-   empty.  This is convenient, because the page tables get cleared to zero
-   when they are allocated.
-
- */
-#define _PTE_EMPTY	0x0
-#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
-#define pte_clear(mm,addr,xp)	(set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
-#define pte_none(x)	(pte_val(x) == _PTE_EMPTY)
-
-/*
- * Some definitions to translate between mem_map, PTEs, and page
- * addresses:
- */
-
-/*
- * Given a PTE, return the index of the mem_map[] entry corresponding
- * to the page frame the PTE. Get the absolute physical address, make
- * a relative physical address and translate it to an index.
- */
-#define pte_pagenr(x)		(((unsigned long) (pte_val(x)) - \
-				 __MEMORY_START) >> PAGE_SHIFT)
-
-/*
- * Given a PTE, return the "struct page *".
- */
-#define pte_page(x)		(mem_map + pte_pagenr(x))
-
-/*
- * Return number of (down rounded) MB corresponding to x pages.
- */
-#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
-
-
-/*
- * The following have defined behavior only work if pte_present() is true.
- */
-static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
-static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
-
-static inline pte_t pte_wrprotect(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkclean(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkold(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkwrite(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
-static inline pte_t pte_mkdirty(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
-static inline pte_t pte_mkyoung(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
-static inline pte_t pte_mkhuge(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
-static inline pte_t pte_mkspecial(pte_t pte)	{ set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
-
-/*
- * Conversion functions: convert a page and protection to a page entry.
- *
- * extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
- */
-#define mk_pte(page,pgprot)							\
-({										\
-	pte_t __pte;								\
-										\
-	set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | 		\
-		__MEMORY_START | pgprot_val((pgprot))));			\
-	__pte;									\
-})
-
-/*
- * This takes a (absolute) physical page address that is used
- * by the remapping functions
- */
-#define mk_pte_phys(physpage, pgprot) \
-({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
-
-static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
-{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
-
-/* Encode and decode a swap entry */
-#define __swp_type(x)			(((x).val & 3) + (((x).val >> 1) & 0x3c))
-#define __swp_offset(x)			((x).val >> 8)
-#define __swp_entry(type, offset)	((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
-#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
-#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
-
-#endif /* !__ASSEMBLY__ */
-
-#define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot)	__pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-
-#endif /* __ASM_SH_PGTABLE_64_H */
diff --git a/arch/sh/include/asm/platform_early.h b/arch/sh/include/asm/platform_early.h
new file mode 100644
index 0000000..fc80213
--- /dev/null
+++ b/arch/sh/include/asm/platform_early.h
@@ -0,0 +1,61 @@
+/* SPDX--License-Identifier: GPL-2.0 */
+
+#ifndef __PLATFORM_EARLY__
+#define __PLATFORM_EARLY__
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+struct sh_early_platform_driver {
+	const char *class_str;
+	struct platform_driver *pdrv;
+	struct list_head list;
+	int requested_id;
+	char *buffer;
+	int bufsize;
+};
+
+#define EARLY_PLATFORM_ID_UNSET -2
+#define EARLY_PLATFORM_ID_ERROR -3
+
+extern int sh_early_platform_driver_register(struct sh_early_platform_driver *epdrv,
+					  char *buf);
+extern void sh_early_platform_add_devices(struct platform_device **devs, int num);
+
+static inline int is_sh_early_platform_device(struct platform_device *pdev)
+{
+	return !pdev->dev.driver;
+}
+
+extern void sh_early_platform_driver_register_all(char *class_str);
+extern int sh_early_platform_driver_probe(char *class_str,
+				       int nr_probe, int user_only);
+
+#define sh_early_platform_init(class_string, platdrv)		\
+	sh_early_platform_init_buffer(class_string, platdrv, NULL, 0)
+
+#ifndef MODULE
+#define sh_early_platform_init_buffer(class_string, platdrv, buf, bufsiz)	\
+static __initdata struct sh_early_platform_driver early_driver = {		\
+	.class_str = class_string,					\
+	.buffer = buf,							\
+	.bufsize = bufsiz,						\
+	.pdrv = platdrv,						\
+	.requested_id = EARLY_PLATFORM_ID_UNSET,			\
+};									\
+static int __init sh_early_platform_driver_setup_func(char *buffer)	\
+{									\
+	return sh_early_platform_driver_register(&early_driver, buffer);	\
+}									\
+early_param(class_string, sh_early_platform_driver_setup_func)
+#else /* MODULE */
+#define sh_early_platform_init_buffer(class_string, platdrv, buf, bufsiz)	\
+static inline char *sh_early_platform_driver_setup_func(void)		\
+{									\
+	return bufsiz ? buf : NULL;					\
+}
+#endif /* MODULE */
+
+#endif /* __PLATFORM_EARLY__ */
diff --git a/arch/sh/include/asm/posix_types.h b/arch/sh/include/asm/posix_types.h
index 0d670fd..f8982b7 100644
--- a/arch/sh/include/asm/posix_types.h
+++ b/arch/sh/include/asm/posix_types.h
@@ -1,6 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-#  include <asm/posix_types_32.h>
-# else
-#  include <asm/posix_types_64.h>
-# endif
+#include <asm/posix_types_32.h>
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index 6fbf8c8..3820d69 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -39,9 +39,6 @@
 	/* SH4AL-DSP types */
 	CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
 
-	/* SH-5 types */
-        CPU_SH5_101, CPU_SH5_103,
-
 	/* Unknown subtype */
 	CPU_SH_NONE
 };
@@ -53,7 +50,6 @@
 	CPU_FAMILY_SH4,
 	CPU_FAMILY_SH4A,
 	CPU_FAMILY_SH4AL_DSP,
-	CPU_FAMILY_SH5,
 	CPU_FAMILY_UNKNOWN,
 };
 
@@ -167,18 +163,12 @@
  */
 #ifdef CONFIG_CPU_SH2A
 extern unsigned int instruction_size(unsigned int insn);
-#elif defined(CONFIG_SUPERH32)
-#define instruction_size(insn)	(2)
 #else
-#define instruction_size(insn)	(4)
+#define instruction_size(insn)	(2)
 #endif
 
 #endif /* __ASSEMBLY__ */
 
-#ifdef CONFIG_SUPERH32
-# include <asm/processor_32.h>
-#else
-# include <asm/processor_64.h>
-#endif
+#include <asm/processor_32.h>
 
 #endif /* __ASM_SH_PROCESSOR_H */
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 0e0ecc0..aa92cc9 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -8,7 +8,6 @@
 
 #ifndef __ASM_SH_PROCESSOR_32_H
 #define __ASM_SH_PROCESSOR_32_H
-#ifdef __KERNEL__
 
 #include <linux/compiler.h>
 #include <linux/linkage.h>
@@ -171,7 +170,7 @@
 #define thread_saved_pc(tsk)	(tsk->thread.pc)
 
 void show_trace(struct task_struct *tsk, unsigned long *sp,
-		struct pt_regs *regs);
+		struct pt_regs *regs, const char *loglvl);
 
 #ifdef CONFIG_DUMP_CODE
 void show_code(struct pt_regs *regs);
@@ -203,5 +202,4 @@
 }
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_PROCESSOR_32_H */
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
deleted file mode 100644
index 53efc9f..0000000
--- a/arch/sh/include/asm/processor_64.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PROCESSOR_64_H
-#define __ASM_SH_PROCESSOR_64_H
-
-/*
- * include/asm-sh/processor_64.h
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- */
-#ifndef __ASSEMBLY__
-
-#include <linux/compiler.h>
-#include <asm/page.h>
-#include <asm/types.h>
-#include <cpu/registers.h>
-
-#endif
-
-/*
- * User space process size: 2GB - 4k.
- */
-#define TASK_SIZE	0x7ffff000UL
-
-#define STACK_TOP	TASK_SIZE
-#define STACK_TOP_MAX	STACK_TOP
-
-/* This decides where the kernel will search for a free chunk of vm
- * space during mmap's.
- */
-#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE / 3)
-
-/*
- * Bit of SR register
- *
- * FD-bit:
- *     When it's set, it means the processor doesn't have right to use FPU,
- *     and it results exception when the floating operation is executed.
- *
- * IMASK-bit:
- *     Interrupt level mask
- *
- * STEP-bit:
- *     Single step bit
- *
- */
-#if defined(CONFIG_SH64_SR_WATCH)
-#define SR_MMU   0x84000000
-#else
-#define SR_MMU   0x80000000
-#endif
-
-#define SR_IMASK 0x000000f0
-#define SR_FD    0x00008000
-#define SR_SSTEP 0x08000000
-
-#ifndef __ASSEMBLY__
-
-/*
- * FPU structure and data : require 8-byte alignment as we need to access it
-   with fld.p, fst.p
- */
-
-struct sh_fpu_hard_struct {
-	unsigned long fp_regs[64];
-	unsigned int fpscr;
-	/* long status; * software status information */
-};
-
-/* Dummy fpu emulator  */
-struct sh_fpu_soft_struct {
-	unsigned long fp_regs[64];
-	unsigned int fpscr;
-	unsigned char lookahead;
-	unsigned long entry_pc;
-};
-
-union thread_xstate {
-	struct sh_fpu_hard_struct hardfpu;
-	struct sh_fpu_soft_struct softfpu;
-	/*
-	 * The structure definitions only produce 32 bit alignment, yet we need
-	 * to access them using 64 bit load/store as well.
-	 */
-	unsigned long long alignment_dummy;
-};
-
-struct thread_struct {
-	unsigned long sp;
-	unsigned long pc;
-
-	/* Various thread flags, see SH_THREAD_xxx */
-	unsigned long flags;
-
-	/* This stores the address of the pt_regs built during a context
-	   switch, or of the register save area built for a kernel mode
-	   exception.  It is used for backtracing the stack of a sleeping task
-	   or one that traps in kernel mode. */
-        struct pt_regs *kregs;
-	/* This stores the address of the pt_regs constructed on entry from
-	   user mode.  It is a fixed value over the lifetime of a process, or
-	   NULL for a kernel thread. */
-	struct pt_regs *uregs;
-
-	unsigned long address;
-	/* Hardware debugging registers may come here */
-
-	/* floating point info */
-	union thread_xstate *xstate;
-
-	/*
-	 * fpu_counter contains the number of consecutive context switches
-	 * that the FPU is used. If this is over a threshold, the lazy fpu
-	 * saving becomes unlazy to save the trap. This is an unsigned char
-	 * so that after 256 times the counter wraps and the behavior turns
-	 * lazy again; this to deal with bursty apps that only use FPU for
-	 * a short time
-	 */
-	unsigned char fpu_counter;
-};
-
-#define INIT_MMAP \
-{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-
-#define INIT_THREAD  {				\
-	.sp		= sizeof(init_stack) +	\
-			  (long) &init_stack,	\
-	.pc		= 0,			\
-        .kregs		= &fake_swapper_regs,	\
-	.uregs	        = NULL,			\
-	.address	= 0,			\
-	.flags		= 0,			\
-}
-
-/*
- * Do necessary setup to start up a newly executed thread.
- */
-#define SR_USER (SR_MMU | SR_FD)
-
-#define start_thread(_regs, new_pc, new_sp)			\
-	_regs->sr = SR_USER;	/* User mode. */		\
-	_regs->pc = new_pc - 4;	/* Compensate syscall exit */	\
-	_regs->pc |= 1;		/* Set SHmedia ! */		\
-	_regs->regs[18] = 0;					\
-	_regs->regs[15] = new_sp
-
-/* Forward declaration, a strange C thing */
-struct task_struct;
-struct mm_struct;
-
-/* Free all resources held by a thread. */
-extern void release_thread(struct task_struct *);
-
-/*
- * FPU lazy state save handling.
- */
-
-static inline void disable_fpu(void)
-{
-	unsigned long long __dummy;
-
-	/* Set FD flag in SR */
-	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
-			     "or	%0, %1, %0\n\t"
-			     "putcon	%0, " __SR "\n\t"
-			     : "=&r" (__dummy)
-			     : "r" (SR_FD));
-}
-
-static inline void enable_fpu(void)
-{
-	unsigned long long __dummy;
-
-	/* Clear out FD flag in SR */
-	__asm__ __volatile__("getcon	" __SR ", %0\n\t"
-			     "and	%0, %1, %0\n\t"
-			     "putcon	%0, " __SR "\n\t"
-			     : "=&r" (__dummy)
-			     : "r" (~SR_FD));
-}
-
-/* Round to nearest, no exceptions on inexact, overflow, underflow,
-   zero-divide, invalid.  Configure option for whether to flush denorms to
-   zero, or except if a denorm is encountered.  */
-#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
-#define FPSCR_INIT  0x00040000
-#else
-#define FPSCR_INIT  0x00000000
-#endif
-
-#ifdef CONFIG_SH_FPU
-/* Initialise the FP state of a task */
-void fpinit(struct sh_fpu_hard_struct *fpregs);
-#else
-#define fpinit(fpregs)	do { } while (0)
-#endif
-
-extern struct task_struct *last_task_used_math;
-
-/*
- * Return saved PC of a blocked thread.
- */
-#define thread_saved_pc(tsk)	(tsk->thread.pc)
-
-extern unsigned long get_wchan(struct task_struct *p);
-
-#define KSTK_EIP(tsk)  ((tsk)->thread.pc)
-#define KSTK_ESP(tsk)  ((tsk)->thread.sp)
-
-#endif	/* __ASSEMBLY__ */
-#endif /* __ASM_SH_PROCESSOR_64_H */
diff --git a/arch/sh/include/asm/ptrace_64.h b/arch/sh/include/asm/ptrace_64.h
deleted file mode 100644
index 6ee0822..0000000
--- a/arch/sh/include/asm/ptrace_64.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_PTRACE_64_H
-#define __ASM_SH_PTRACE_64_H
-
-#include <uapi/asm/ptrace_64.h>
-
-
-#define MAX_REG_OFFSET		offsetof(struct pt_regs, tregs[7])
-static inline long regs_return_value(struct pt_regs *regs)
-{
-	return regs->regs[3];
-}
-
-#endif /* __ASM_SH_PTRACE_64_H */
diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h
index 33d1d28..02e54a3 100644
--- a/arch/sh/include/asm/segment.h
+++ b/arch/sh/include/asm/segment.h
@@ -24,8 +24,7 @@
 #define USER_DS		KERNEL_DS
 #endif
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
-
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define get_fs()	(current_thread_info()->addr_limit)
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
diff --git a/arch/sh/include/asm/sfp-machine.h b/arch/sh/include/asm/sfp-machine.h
index cbc7cf8..2d24234 100644
--- a/arch/sh/include/asm/sfp-machine.h
+++ b/arch/sh/include/asm/sfp-machine.h
@@ -13,6 +13,14 @@
 #ifndef _SFP_MACHINE_H
 #define _SFP_MACHINE_H
 
+#ifdef __BIG_ENDIAN__
+#define __BYTE_ORDER __BIG_ENDIAN
+#define __LITTLE_ENDIAN 0
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#define __BIG_ENDIAN 0
+#endif
+
 #define _FP_W_TYPE_SIZE		32
 #define _FP_W_TYPE		unsigned long
 #define _FP_WS_TYPE		signed long
diff --git a/arch/sh/include/asm/smc37c93x.h b/arch/sh/include/asm/smc37c93x.h
index f054c30..891f2f8 100644
--- a/arch/sh/include/asm/smc37c93x.h
+++ b/arch/sh/include/asm/smc37c93x.h
@@ -112,8 +112,8 @@
 #define FCR_RFRES	0x0200	/* Receiver FIFO reset */
 #define FCR_TFRES	0x0400	/* Transmitter FIFO reset */
 #define FCR_DMA		0x0800	/* DMA mode select */
-#define FCR_RTL		0x4000	/* Receiver triger (LSB) */
-#define FCR_RTM		0x8000	/* Receiver triger (MSB) */
+#define FCR_RTL		0x4000	/* Receiver trigger (LSB) */
+#define FCR_RTM		0x8000	/* Receiver trigger (MSB) */
 
 /* Line Control Register */
 
diff --git a/arch/sh/include/asm/smp.h b/arch/sh/include/asm/smp.h
index 1a0d7cf..199381f 100644
--- a/arch/sh/include/asm/smp.h
+++ b/arch/sh/include/asm/smp.h
@@ -8,7 +8,6 @@
 
 #ifdef CONFIG_SMP
 
-#include <linux/spinlock.h>
 #include <linux/atomic.h>
 #include <asm/current.h>
 #include <asm/percpu.h>
@@ -72,7 +71,7 @@
 
 #define CPU_METHOD_OF_DECLARE(name, _method, _ops)			\
 	static const struct of_cpu_method __cpu_method_of_table_##name	\
-		__used __section(__cpu_method_of_table)			\
+		__used __section("__cpu_method_of_table")		\
 		= { .method = _method, .ops = _ops }
 
 #else
diff --git a/arch/sh/include/asm/sparsemem.h b/arch/sh/include/asm/sparsemem.h
index 4eb8997..4703cbe 100644
--- a/arch/sh/include/asm/sparsemem.h
+++ b/arch/sh/include/asm/sparsemem.h
@@ -2,16 +2,11 @@
 #ifndef __ASM_SH_SPARSEMEM_H
 #define __ASM_SH_SPARSEMEM_H
 
-#ifdef __KERNEL__
 /*
  * SECTION_SIZE_BITS		2^N: how big each section will be
- * MAX_PHYSADDR_BITS		2^N: how much physical address space we have
- * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
+ * MAX_PHYSMEM_BITS		2^N: how much physical address space we have
  */
 #define SECTION_SIZE_BITS	26
-#define MAX_PHYSADDR_BITS	32
 #define MAX_PHYSMEM_BITS	32
 
-#endif
-
 #endif /* __ASM_SH_SPARSEMEM_H */
diff --git a/arch/sh/include/asm/stacktrace.h b/arch/sh/include/asm/stacktrace.h
index 50c173c..4f98cdc 100644
--- a/arch/sh/include/asm/stacktrace.h
+++ b/arch/sh/include/asm/stacktrace.h
@@ -12,8 +12,6 @@
 
 struct stacktrace_ops {
 	void (*address)(void *data, unsigned long address, int reliable);
-	/* On negative return stop dumping */
-	int (*stack)(void *data, char *name);
 };
 
 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
diff --git a/arch/sh/include/asm/string.h b/arch/sh/include/asm/string.h
index 84fc5ed..0f6331e 100644
--- a/arch/sh/include/asm/string.h
+++ b/arch/sh/include/asm/string.h
@@ -1,6 +1,2 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifdef CONFIG_SUPERH32
-# include <asm/string_32.h>
-#else
-# include <asm/string_64.h>
-#endif
+#include <asm/string_32.h>
diff --git a/arch/sh/include/asm/string_32.h b/arch/sh/include/asm/string_32.h
index 3558b1d..a276b19 100644
--- a/arch/sh/include/asm/string_32.h
+++ b/arch/sh/include/asm/string_32.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_STRING_H
 #define __ASM_SH_STRING_H
 
-#ifdef __KERNEL__
-
 /*
  * Copyright (C) 1999 Niibe Yutaka
  * But consider these trivial functions to be public domain.
@@ -28,32 +26,6 @@
 	return __xdest;
 }
 
-#define __HAVE_ARCH_STRNCPY
-static inline char *strncpy(char *__dest, const char *__src, size_t __n)
-{
-	register char *__xdest = __dest;
-	unsigned long __dummy;
-
-	if (__n == 0)
-		return __xdest;
-
-	__asm__ __volatile__(
-		"1:\n"
-		"mov.b	@%1+, %2\n\t"
-		"mov.b	%2, @%0\n\t"
-		"cmp/eq	#0, %2\n\t"
-		"bt/s	2f\n\t"
-		" cmp/eq	%5,%1\n\t"
-		"bf/s	1b\n\t"
-		" add	#1, %0\n"
-		"2:"
-		: "=r" (__dest), "=r" (__src), "=&z" (__dummy)
-		: "0" (__dest), "1" (__src), "r" (__src+__n)
-		: "memory", "t");
-
-	return __xdest;
-}
-
 #define __HAVE_ARCH_STRCMP
 static inline int strcmp(const char *__cs, const char *__ct)
 {
@@ -127,6 +99,4 @@
 #define __HAVE_ARCH_STRLEN
 extern size_t strlen(const char *);
 
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_STRING_H */
diff --git a/arch/sh/include/asm/string_64.h b/arch/sh/include/asm/string_64.h
deleted file mode 100644
index d51d615..0000000
--- a/arch/sh/include/asm/string_64.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_STRING_64_H
-#define __ASM_SH_STRING_64_H
-
-#ifdef __KERNEL__
-
-#define __HAVE_ARCH_MEMSET
-extern void *memset(void *__s, int __c, size_t __count);
-
-#define __HAVE_ARCH_MEMCPY
-extern void *memcpy(void *dest, const void *src, size_t count);
-
-#define __HAVE_ARCH_STRLEN
-extern size_t strlen(const char *);
-
-#define __HAVE_ARCH_STRCPY
-extern char *strcpy(char *__dest, const char *__src);
-
-#endif /* __KERNEL__ */
-
-#endif /* __ASM_SH_STRING_64_H */
diff --git a/arch/sh/include/asm/switch_to.h b/arch/sh/include/asm/switch_to.h
index 9eec80a..bd139bc 100644
--- a/arch/sh/include/asm/switch_to.h
+++ b/arch/sh/include/asm/switch_to.h
@@ -4,13 +4,4 @@
  * Copyright (C) 2003  Paul Mundt
  * Copyright (C) 2004  Richard Curnow
  */
-#ifndef __ASM_SH_SWITCH_TO_H
-#define __ASM_SH_SWITCH_TO_H
-
-#ifdef CONFIG_SUPERH32
-# include <asm/switch_to_32.h>
-#else
-# include <asm/switch_to_64.h>
-#endif
-
-#endif /* __ASM_SH_SWITCH_TO_H */
+#include <asm/switch_to_32.h>
diff --git a/arch/sh/include/asm/switch_to_64.h b/arch/sh/include/asm/switch_to_64.h
deleted file mode 100644
index 2dbf231..0000000
--- a/arch/sh/include/asm/switch_to_64.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- */
-#ifndef __ASM_SH_SWITCH_TO_64_H
-#define __ASM_SH_SWITCH_TO_64_H
-
-struct thread_struct;
-struct task_struct;
-
-/*
- *	switch_to() should switch tasks to task nr n, first
- */
-struct task_struct *sh64_switch_to(struct task_struct *prev,
-				   struct thread_struct *prev_thread,
-				   struct task_struct *next,
-				   struct thread_struct *next_thread);
-
-#define switch_to(prev,next,last)				\
-do {								\
-	if (last_task_used_math != next) {			\
-		struct pt_regs *regs = next->thread.uregs;	\
-		if (regs) regs->sr |= SR_FD;			\
-	}							\
-	last = sh64_switch_to(prev, &prev->thread, next,	\
-			      &next->thread);			\
-} while (0)
-
-
-#endif /* __ASM_SH_SWITCH_TO_64_H */
diff --git a/arch/sh/include/asm/syscall.h b/arch/sh/include/asm/syscall.h
index 90ba000..570699e 100644
--- a/arch/sh/include/asm/syscall.h
+++ b/arch/sh/include/asm/syscall.h
@@ -4,10 +4,6 @@
 
 extern const unsigned long sys_call_table[];
 
-#ifdef CONFIG_SUPERH32
-# include <asm/syscall_32.h>
-#else
-# include <asm/syscall_64.h>
-#endif
+#include <asm/syscall_32.h>
 
 #endif /* __ASM_SH_SYSCALL_H */
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 0b5b8e7..cb51a75 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -40,10 +40,7 @@
 					    struct pt_regs *regs,
 					    int error, long val)
 {
-	if (error)
-		regs->regs[0] = -error;
-	else
-		regs->regs[0] = val;
+	regs->regs[0] = (long) error ?: val;
 }
 
 static inline void syscall_get_arguments(struct task_struct *task,
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
deleted file mode 100644
index 72efcbc..0000000
--- a/arch/sh/include/asm/syscall_64.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALL_64_H
-#define __ASM_SH_SYSCALL_64_H
-
-#include <uapi/linux/audit.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/ptrace.h>
-
-/* The system call number is given by the user in R9 */
-static inline long syscall_get_nr(struct task_struct *task,
-				  struct pt_regs *regs)
-{
-	return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
-}
-
-static inline void syscall_rollback(struct task_struct *task,
-				    struct pt_regs *regs)
-{
-	/*
-	 * XXX: This needs some thought. On SH we don't
-	 * save away the original R9 value anywhere.
-	 */
-}
-
-static inline long syscall_get_error(struct task_struct *task,
-				     struct pt_regs *regs)
-{
-	return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
-}
-
-static inline long syscall_get_return_value(struct task_struct *task,
-					    struct pt_regs *regs)
-{
-	return regs->regs[9];
-}
-
-static inline void syscall_set_return_value(struct task_struct *task,
-					    struct pt_regs *regs,
-					    int error, long val)
-{
-	if (error)
-		regs->regs[9] = -error;
-	else
-		regs->regs[9] = val;
-}
-
-static inline void syscall_get_arguments(struct task_struct *task,
-					 struct pt_regs *regs,
-					 unsigned long *args)
-{
-	memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
-}
-
-static inline void syscall_set_arguments(struct task_struct *task,
-					 struct pt_regs *regs,
-					 const unsigned long *args)
-{
-	memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
-}
-
-static inline int syscall_get_arch(struct task_struct *task)
-{
-	int arch = AUDIT_ARCH_SH;
-
-#ifdef CONFIG_64BIT
-	arch |= __AUDIT_ARCH_64BIT;
-#endif
-#ifdef CONFIG_CPU_LITTLE_ENDIAN
-	arch |= __AUDIT_ARCH_LE;
-#endif
-
-	return arch;
-}
-#endif /* __ASM_SH_SYSCALL_64_H */
diff --git a/arch/sh/include/asm/syscalls.h b/arch/sh/include/asm/syscalls.h
index 995ef04..3871053 100644
--- a/arch/sh/include/asm/syscalls.h
+++ b/arch/sh/include/asm/syscalls.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_SYSCALLS_H
 #define __ASM_SH_SYSCALLS_H
 
-#ifdef __KERNEL__
-
 asmlinkage int old_mmap(unsigned long addr, unsigned long len,
 			unsigned long prot, unsigned long flags,
 			int fd, unsigned long off);
@@ -11,11 +9,6 @@
 			  unsigned long prot, unsigned long flags,
 			  unsigned long fd, unsigned long pgoff);
 
-#ifdef CONFIG_SUPERH32
-# include <asm/syscalls_32.h>
-#else
-# include <asm/syscalls_64.h>
-#endif
+#include <asm/syscalls_32.h>
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_SYSCALLS_H */
diff --git a/arch/sh/include/asm/syscalls_32.h b/arch/sh/include/asm/syscalls_32.h
index 9f9faf6..5c555b8 100644
--- a/arch/sh/include/asm/syscalls_32.h
+++ b/arch/sh/include/asm/syscalls_32.h
@@ -2,8 +2,6 @@
 #ifndef __ASM_SH_SYSCALLS_32_H
 #define __ASM_SH_SYSCALLS_32_H
 
-#ifdef __KERNEL__
-
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <linux/types.h>
@@ -26,5 +24,4 @@
 asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
 				 unsigned long thread_info_flags);
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_SYSCALLS_32_H */
diff --git a/arch/sh/include/asm/syscalls_64.h b/arch/sh/include/asm/syscalls_64.h
deleted file mode 100644
index df42656..0000000
--- a/arch/sh/include/asm/syscalls_64.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_SYSCALLS_64_H
-#define __ASM_SH_SYSCALLS_64_H
-
-#ifdef __KERNEL__
-
-#include <linux/compiler.h>
-#include <linux/linkage.h>
-#include <linux/types.h>
-
-struct pt_regs;
-
-/* Misc syscall related bits */
-asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
-asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
-
-#endif /* __KERNEL__ */
-#endif /* __ASM_SH_SYSCALLS_64_H */
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index cf5c792..243ea51 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -10,8 +10,6 @@
  *  Copyright (C) 2002  David Howells (dhowells@redhat.com)
  *  - Incorporating suggestions made by Linus Torvalds and Dave Miller
  */
-#ifdef __KERNEL__
-
 #include <asm/page.h>
 
 /*
@@ -70,9 +68,7 @@
 static inline struct thread_info *current_thread_info(void)
 {
 	struct thread_info *ti;
-#if defined(CONFIG_SUPERH64)
-	__asm__ __volatile__ ("getcon	cr17, %0" : "=r" (ti));
-#elif defined(CONFIG_CPU_HAS_SR_RB)
+#if defined(CONFIG_CPU_HAS_SR_RB)
 	__asm__ __volatile__ ("stc	r7_bank, %0" : "=r" (ti));
 #else
 	unsigned long __dummy;
@@ -172,7 +168,4 @@
 }
 
 #endif	/* !__ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
-
 #endif /* __ASM_SH_THREAD_INFO_H */
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index bc77f3d..360f713 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -2,10 +2,6 @@
 #ifndef __ASM_SH_TLB_H
 #define __ASM_SH_TLB_H
 
-#ifdef CONFIG_SUPERH64
-# include <asm/tlb_64.h>
-#endif
-
 #ifndef __ASSEMBLY__
 #include <linux/pagemap.h>
 
@@ -14,7 +10,7 @@
 
 #include <asm-generic/tlb.h>
 
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
+#if defined(CONFIG_CPU_SH4)
 extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
 extern void tlb_unwire_entry(void);
 #else
diff --git a/arch/sh/include/asm/tlb_64.h b/arch/sh/include/asm/tlb_64.h
deleted file mode 100644
index 59fa0a2..0000000
--- a/arch/sh/include/asm/tlb_64.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * include/asm-sh/tlb_64.h
- *
- * Copyright (C) 2003  Paul Mundt
- */
-#ifndef __ASM_SH_TLB_64_H
-#define __ASM_SH_TLB_64_H
-
-/* ITLB defines */
-#define ITLB_FIXED	0x00000000	/* First fixed ITLB, see head.S */
-#define ITLB_LAST_VAR_UNRESTRICTED	0x000003F0	/* Last ITLB */
-
-/* DTLB defines */
-#define DTLB_FIXED	0x00800000	/* First fixed DTLB, see head.S */
-#define DTLB_LAST_VAR_UNRESTRICTED	0x008003F0	/* Last DTLB */
-
-#ifndef __ASSEMBLY__
-
-/**
- * for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
- *
- * @tlb:	TLB entry
- */
-#define for_each_dtlb_entry(tlb)		\
-	for (tlb  = cpu_data->dtlb.first;	\
-	     tlb <= cpu_data->dtlb.last;	\
-	     tlb += cpu_data->dtlb.step)
-
-/**
- * for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
- *
- * @tlb:	TLB entry
- */
-#define for_each_itlb_entry(tlb)		\
-	for (tlb  = cpu_data->itlb.first;	\
-	     tlb <= cpu_data->itlb.last;	\
-	     tlb += cpu_data->itlb.step)
-
-/**
- * __flush_tlb_slot - Flushes TLB slot @slot.
- *
- * @slot:	Address of TLB slot.
- */
-static inline void __flush_tlb_slot(unsigned long long slot)
-{
-	__asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
-}
-
-#ifdef CONFIG_MMU
-/* arch/sh64/mm/tlb.c */
-int sh64_tlb_init(void);
-unsigned long long sh64_next_free_dtlb_entry(void);
-unsigned long long sh64_get_wired_dtlb_entry(void);
-int sh64_put_wired_dtlb_entry(unsigned long long entry);
-void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
-			 unsigned long asid, unsigned long paddr);
-void sh64_teardown_tlb_slot(unsigned long long config_addr);
-#else
-#define sh64_tlb_init()					do { } while (0)
-#define sh64_next_free_dtlb_entry()			(0)
-#define sh64_get_wired_dtlb_entry()			(0)
-#define sh64_put_wired_dtlb_entry(entry)		do { } while (0)
-#define sh64_setup_tlb_slot(conf, virt, asid, phys)	do { } while (0)
-#define sh64_teardown_tlb_slot(addr)			do { } while (0)
-#endif /* CONFIG_MMU */
-#endif /* __ASSEMBLY__ */
-#endif /* __ASM_SH_TLB_64_H */
diff --git a/arch/sh/include/asm/traps.h b/arch/sh/include/asm/traps.h
index 8844ed0..ba831bc 100644
--- a/arch/sh/include/asm/traps.h
+++ b/arch/sh/include/asm/traps.h
@@ -4,11 +4,7 @@
 
 #include <linux/compiler.h>
 
-#ifdef CONFIG_SUPERH32
 # include <asm/traps_32.h>
-#else
-# include <asm/traps_64.h>
-#endif
 
 BUILD_TRAP_HANDLER(address_error);
 BUILD_TRAP_HANDLER(debug);
diff --git a/arch/sh/include/asm/traps_64.h b/arch/sh/include/asm/traps_64.h
deleted file mode 100644
index f28db6d..0000000
--- a/arch/sh/include/asm/traps_64.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003  Paul Mundt
- * Copyright (C) 2004  Richard Curnow
- */
-#ifndef __ASM_SH_TRAPS_64_H
-#define __ASM_SH_TRAPS_64_H
-
-#include <cpu/registers.h>
-
-extern void phys_stext(void);
-
-#define lookup_exception_vector()		\
-({						\
-	unsigned long _vec;			\
-						\
-	__asm__ __volatile__ (			\
-		"getcon " __EXPEVT ", %0\n\t"	\
-		: "=r" (_vec)			\
-	);					\
-						\
-	_vec;					\
-})
-
-static inline void trigger_address_error(void)
-{
-	phys_stext();
-}
-
-#define BUILD_TRAP_HANDLER(name)	\
-asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
-#define TRAP_HANDLER_DECL
-
-#endif /* __ASM_SH_TRAPS_64_H */
diff --git a/arch/sh/include/asm/types.h b/arch/sh/include/asm/types.h
index df96c51..68eb24a 100644
--- a/arch/sh/include/asm/types.h
+++ b/arch/sh/include/asm/types.h
@@ -9,13 +9,8 @@
  */
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_SUPERH32
 typedef u16 insn_size_t;
 typedef u32 reg_size_t;
-#else
-typedef u32 insn_size_t;
-typedef u64 reg_size_t;
-#endif
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_SH_TYPES_H */
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 5fe751a..73f3b48 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -96,11 +96,7 @@
 	__pu_err;						\
 })
 
-#ifdef CONFIG_SUPERH32
 # include <asm/uaccess_32.h>
-#else
-# include <asm/uaccess_64.h>
-#endif
 
 extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index 624cf55..5d7ddc0 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -26,6 +26,9 @@
 	case 4:							\
 		__get_user_asm(x, ptr, retval, "l");		\
 		break;						\
+	case 8:							\
+		__get_user_u64(x, ptr, retval);			\
+		break;						\
 	default:						\
 		__get_user_unknown();				\
 		break;						\
@@ -66,6 +69,56 @@
 
 extern void __get_user_unknown(void);
 
+#if defined(CONFIG_CPU_LITTLE_ENDIAN)
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+	"1:\n\t" \
+	"mov.l	%2,%R1\n\t" \
+	"mov.l	%T2,%S1\n\t" \
+	"2:\n" \
+	".section	.fixup,\"ax\"\n" \
+	"3:\n\t" \
+	"mov  #0,%R1\n\t"   \
+	"mov  #0,%S1\n\t"   \
+	"mov.l	4f, %0\n\t" \
+	"jmp	@%0\n\t" \
+	" mov	%3, %0\n\t" \
+	".balign	4\n" \
+	"4:	.long	2b\n\t" \
+	".previous\n" \
+	".section	__ex_table,\"a\"\n\t" \
+	".long	1b, 3b\n\t" \
+	".long	1b + 2, 3b\n\t" \
+	".previous" \
+	:"=&r" (err), "=&r" (x) \
+	:"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#else
+#define __get_user_u64(x, addr, err) \
+({ \
+__asm__ __volatile__( \
+	"1:\n\t" \
+	"mov.l	%2,%S1\n\t" \
+	"mov.l	%T2,%R1\n\t" \
+	"2:\n" \
+	".section	.fixup,\"ax\"\n" \
+	"3:\n\t" \
+	"mov  #0,%S1\n\t"   \
+	"mov  #0,%R1\n\t"   \
+	"mov.l	4f, %0\n\t" \
+	"jmp	@%0\n\t" \
+	" mov	%3, %0\n\t" \
+	".balign	4\n" \
+	"4:	.long	2b\n\t" \
+	".previous\n" \
+	".section	__ex_table,\"a\"\n\t" \
+	".long	1b, 3b\n\t" \
+	".long	1b + 2, 3b\n\t" \
+	".previous" \
+	:"=&r" (err), "=&r" (x) \
+	:"m" (__m(addr)), "i" (-EFAULT), "0" (err)); })
+#endif
+
 #define __put_user_size(x,ptr,size,retval)		\
 do {							\
 	retval = 0;					\
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
deleted file mode 100644
index 0c19d02..0000000
--- a/arch/sh/include/asm/uaccess_64.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_SH_UACCESS_64_H
-#define __ASM_SH_UACCESS_64_H
-
-/*
- * include/asm-sh/uaccess_64.h
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003, 2004  Paul Mundt
- *
- * User space memory access functions
- *
- * Copyright (C) 1999  Niibe Yutaka
- *
- *  Based on:
- *     MIPS implementation version 1.15 by
- *              Copyright (C) 1996, 1997, 1998 by Ralf Baechle
- *     and i386 version.
- */
-
-#define __get_user_size(x,ptr,size,retval)			\
-do {								\
-	retval = 0;						\
-	x = 0;							\
-	switch (size) {						\
-	case 1:							\
-		retval = __get_user_asm_b((void *)&x,		\
-					  (long)ptr);		\
-		break;						\
-	case 2:							\
-		retval = __get_user_asm_w((void *)&x,		\
-					  (long)ptr);		\
-		break;						\
-	case 4:							\
-		retval = __get_user_asm_l((void *)&x,		\
-					  (long)ptr);		\
-		break;						\
-	case 8:							\
-		retval = __get_user_asm_q((void *)&x,		\
-					  (long)ptr);		\
-		break;						\
-	default:						\
-		__get_user_unknown();				\
-		break;						\
-	}							\
-} while (0)
-
-extern long __get_user_asm_b(void *, long);
-extern long __get_user_asm_w(void *, long);
-extern long __get_user_asm_l(void *, long);
-extern long __get_user_asm_q(void *, long);
-extern void __get_user_unknown(void);
-
-#define __put_user_size(x,ptr,size,retval)			\
-do {								\
-	retval = 0;						\
-	switch (size) {						\
-	case 1:							\
-		retval = __put_user_asm_b((void *)&x,		\
-					  (__force long)ptr);	\
-		break;						\
-	case 2:							\
-		retval = __put_user_asm_w((void *)&x,		\
-					  (__force long)ptr);	\
-		break;						\
-	case 4:							\
-		retval = __put_user_asm_l((void *)&x,		\
-					  (__force long)ptr);	\
-		break;						\
-	case 8:							\
-		retval = __put_user_asm_q((void *)&x,		\
-					  (__force long)ptr);	\
-		break;						\
-	default:						\
-		__put_user_unknown();				\
-	}							\
-} while (0)
-
-extern long __put_user_asm_b(void *, long);
-extern long __put_user_asm_w(void *, long);
-extern long __put_user_asm_l(void *, long);
-extern long __put_user_asm_q(void *, long);
-extern void __put_user_unknown(void);
-
-#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/unistd.h b/arch/sh/include/asm/unistd.h
index 9c7d9d9..d6e1262 100644
--- a/arch/sh/include/asm/unistd.h
+++ b/arch/sh/include/asm/unistd.h
@@ -1,9 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-# ifdef CONFIG_SUPERH32
-#  include <asm/unistd_32.h>
-# else
-#  include <asm/unistd_64.h>
-# endif
+#include <asm/unistd_32.h>
 
 #define NR_syscalls	__NR_syscalls
 
diff --git a/arch/sh/include/asm/user.h b/arch/sh/include/asm/user.h
index e97f2ef..7dfd3f6 100644
--- a/arch/sh/include/asm/user.h
+++ b/arch/sh/include/asm/user.h
@@ -28,19 +28,12 @@
  *	to write an integer number of pages.
  */
 
-#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
-struct user_fpu_struct {
-	unsigned long fp_regs[32];
-	unsigned int fpscr;
-};
-#else
 struct user_fpu_struct {
 	unsigned long fp_regs[16];
 	unsigned long xfp_regs[16];
 	unsigned long fpscr;
 	unsigned long fpul;
 };
-#endif
 
 struct user {
 	struct pt_regs	regs;			/* entire machine state */
diff --git a/arch/sh/include/asm/vermagic.h b/arch/sh/include/asm/vermagic.h
new file mode 100644
index 0000000..5b2057c
--- /dev/null
+++ b/arch/sh/include/asm/vermagic.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# ifdef CONFIG_CPU_SH2
+#  define MODULE_PROC_FAMILY "SH2LE "
+# elif defined  CONFIG_CPU_SH3
+#  define MODULE_PROC_FAMILY "SH3LE "
+# elif defined  CONFIG_CPU_SH4
+#  define MODULE_PROC_FAMILY "SH4LE "
+# else
+#  error unknown processor family
+# endif
+#else
+# ifdef CONFIG_CPU_SH2
+#  define MODULE_PROC_FAMILY "SH2BE "
+# elif defined  CONFIG_CPU_SH3
+#  define MODULE_PROC_FAMILY "SH3BE "
+# elif defined  CONFIG_CPU_SH4
+#  define MODULE_PROC_FAMILY "SH4BE "
+# else
+#  error unknown processor family
+# endif
+#endif
+
+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/sh/include/asm/vmalloc.h b/arch/sh/include/asm/vmalloc.h
new file mode 100644
index 0000000..716b774
--- /dev/null
+++ b/arch/sh/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_SH_VMALLOC_H
+#define _ASM_SH_VMALLOC_H
+
+#endif /* _ASM_SH_VMALLOC_H */
diff --git a/arch/sh/include/asm/vmlinux.lds.h b/arch/sh/include/asm/vmlinux.lds.h
index 9929556..8d96c4f 100644
--- a/arch/sh/include/asm/vmlinux.lds.h
+++ b/arch/sh/include/asm/vmlinux.lds.h
@@ -15,12 +15,4 @@
 #define DWARF_EH_FRAME
 #endif
 
-#ifdef CONFIG_SUPERH64
-#define EXTRA_TEXT		\
-	*(.text64)		\
-	*(.text..SHmedia32)
-#else
-#define EXTRA_TEXT
-#endif
-
 #endif /* __ASM_SH_VMLINUX_LDS_H */
diff --git a/arch/sh/include/asm/watchdog.h b/arch/sh/include/asm/watchdog.h
index cecd0fc..b9ca4c9 100644
--- a/arch/sh/include/asm/watchdog.h
+++ b/arch/sh/include/asm/watchdog.h
@@ -8,7 +8,6 @@
  */
 #ifndef __ASM_SH_WATCHDOG_H
 #define __ASM_SH_WATCHDOG_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/io.h>
@@ -157,5 +156,4 @@
 	__raw_writew((WTCSR_HIGH << 8) | (__u16)val, WTCSR);
 }
 #endif /* CONFIG_CPU_SUBTYPE_SH7785 || CONFIG_CPU_SUBTYPE_SH7780 */
-#endif /* __KERNEL__ */
 #endif /* __ASM_SH_WATCHDOG_H */