Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 591d53b..0dbf9c5 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,31 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 generated-y += syscall_table.h
-generic-y += barrier.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
 generic-y += extable.h
-generic-y += futex.h
-generic-y += hw_irq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
 generic-y += kvm_para.h
-generic-y += local.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
-generic-y += percpu.h
-generic-y += preempt.h
-generic-y += sections.h
-generic-y += shmparam.h
 generic-y += spinlock.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
diff --git a/arch/m68k/include/asm/adb_iop.h b/arch/m68k/include/asm/adb_iop.h
index 195d7fb..6aecd02 100644
--- a/arch/m68k/include/asm/adb_iop.h
+++ b/arch/m68k/include/asm/adb_iop.h
@@ -29,6 +29,7 @@
 
 #define ADB_IOP_EXPLICIT	0x80	/* nonzero if explicit command */
 #define ADB_IOP_AUTOPOLL	0x40	/* auto/SRQ polling enabled    */
+#define ADB_IOP_SET_AUTOPOLL	0x20	/* set autopoll device list    */
 #define ADB_IOP_SRQ		0x04	/* SRQ detected                */
 #define ADB_IOP_TIMEOUT		0x02	/* nonzero if timeout          */
 
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 47228b0..756c5cc 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -16,8 +16,6 @@
  * We do not have SMP m68k systems, so we don't have to deal with that.
  */
 
-#define ATOMIC_INIT(i)	{ (i) }
-
 #define atomic_read(v)		READ_ONCE((v)->counter)
 #define atomic_set(v, i)	WRITE_ONCE(((v)->counter), (i))
 
diff --git a/arch/m68k/include/asm/cacheflush_mm.h b/arch/m68k/include/asm/cacheflush_mm.h
index 1e2544e..1ac55e7 100644
--- a/arch/m68k/include/asm/cacheflush_mm.h
+++ b/arch/m68k/include/asm/cacheflush_mm.h
@@ -254,9 +254,11 @@
 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 #define flush_icache_page(vma, page)	__flush_page_to_ram(page_address(page))
 
-extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+extern void flush_icache_user_page(struct vm_area_struct *vma, struct page *page,
 				    unsigned long addr, int len);
 extern void flush_icache_range(unsigned long address, unsigned long endaddr);
+extern void flush_icache_user_range(unsigned long address,
+		unsigned long endaddr);
 
 static inline void copy_to_user_page(struct vm_area_struct *vma,
 				     struct page *page, unsigned long vaddr,
@@ -264,7 +266,7 @@
 {
 	flush_cache_page(vma, vaddr, page_to_pfn(page));
 	memcpy(dst, src, len);
-	flush_icache_user_range(vma, page, vaddr, len);
+	flush_icache_user_page(vma, page, vaddr, len);
 }
 static inline void copy_from_user_page(struct vm_area_struct *vma,
 				       struct page *page, unsigned long vaddr,
diff --git a/arch/m68k/include/asm/cacheflush_no.h b/arch/m68k/include/asm/cacheflush_no.h
index 11e9a9d..2731f07 100644
--- a/arch/m68k/include/asm/cacheflush_no.h
+++ b/arch/m68k/include/asm/cacheflush_no.h
@@ -9,25 +9,8 @@
 #include <asm/mcfsim.h>
 
 #define flush_cache_all()			__flush_cache_all()
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_dup_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr)		do { } while (0)
 #define flush_dcache_range(start, len)		__flush_dcache_all()
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do { } while (0)
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
 #define flush_icache_range(start, len)		__flush_icache_all()
-#define flush_icache_page(vma,pg)		do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
-#define flush_cache_vmap(start, end)		do { } while (0)
-#define flush_cache_vunmap(start, end)		do { } while (0)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-	memcpy(dst, src, len)
 
 void mcf_cache_push(void);
 
@@ -98,4 +81,6 @@
 	__clear_cache_all();
 }
 
+#include <asm-generic/cacheflush.h>
+
 #endif /* _M68KNOMMU_CACHEFLUSH_H */
diff --git a/arch/m68k/include/asm/checksum.h b/arch/m68k/include/asm/checksum.h
index f9b94e4..692e7b6 100644
--- a/arch/m68k/include/asm/checksum.h
+++ b/arch/m68k/include/asm/checksum.h
@@ -30,14 +30,14 @@
  * better 64-bit) boundary
  */
 
-extern __wsum csum_partial_copy_from_user(const void __user *src,
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+#define _HAVE_ARCH_CSUM_AND_COPY
+extern __wsum csum_and_copy_from_user(const void __user *src,
 						void *dst,
-						int len, __wsum sum,
-						int *csum_err);
+						int len);
 
 extern __wsum csum_partial_copy_nocheck(const void *src,
-					      void *dst, int len,
-					      __wsum sum);
+					      void *dst, int len);
 
 /*
  *	This is a version of ip_fast_csum() optimized for IP headers,
diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h
index 38e1d7a..3a3bdcf 100644
--- a/arch/m68k/include/asm/cmpxchg.h
+++ b/arch/m68k/include/asm/cmpxchg.h
@@ -129,14 +129,6 @@
 
 #else
 
-/*
- * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
- * them available.
- */
-#define cmpxchg_local(ptr, o, n)				  	       \
-	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
-			(unsigned long)(n), sizeof(*(ptr))))
-
 #include <asm-generic/cmpxchg.h>
 
 #endif
diff --git a/arch/m68k/include/asm/floppy.h b/arch/m68k/include/asm/floppy.h
index c3b9ad6..a4d0fea 100644
--- a/arch/m68k/include/asm/floppy.h
+++ b/arch/m68k/include/asm/floppy.h
@@ -63,21 +63,21 @@
 }
 
 
-static __inline__ unsigned char fd_inb(int port)
+static __inline__ unsigned char fd_inb(int base, int reg)
 {
 	if(MACH_IS_Q40)
-		return inb_p(port);
+		return inb_p(base + reg);
 	else if(MACH_IS_SUN3X)
-		return sun3x_82072_fd_inb(port);
+		return sun3x_82072_fd_inb(base + reg);
 	return 0;
 }
 
-static __inline__ void fd_outb(unsigned char value, int port)
+static __inline__ void fd_outb(unsigned char value, int base, int reg)
 {
 	if(MACH_IS_Q40)
-		outb_p(value, port);
+		outb_p(value, base + reg);
 	else if(MACH_IS_SUN3X)
-		sun3x_82072_fd_outb(value, port);
+		sun3x_82072_fd_outb(value, base + reg);
 }
 
 
@@ -211,26 +211,27 @@
 		st=1;
 		for(lcount=virtual_dma_count, lptr=virtual_dma_addr;
 		    lcount; lcount--, lptr++) {
-			st=inb(virtual_dma_port+4) & 0xa0 ;
-			if(st != 0xa0)
+			st = inb(virtual_dma_port + FD_STATUS);
+			st &= STATUS_DMA | STATUS_READY;
+			if (st != (STATUS_DMA | STATUS_READY))
 				break;
 			if(virtual_dma_mode)
-				outb_p(*lptr, virtual_dma_port+5);
+				outb_p(*lptr, virtual_dma_port + FD_DATA);
 			else
-				*lptr = inb_p(virtual_dma_port+5);
+				*lptr = inb_p(virtual_dma_port + FD_DATA);
 		}
 
 		virtual_dma_count = lcount;
 		virtual_dma_addr = lptr;
-		st = inb(virtual_dma_port+4);
+		st = inb(virtual_dma_port + FD_STATUS);
 	}
 
 #ifdef TRACE_FLPY_INT
 	calls++;
 #endif
-	if(st == 0x20)
+	if (st == STATUS_DMA)
 		return IRQ_HANDLED;
-	if(!(st & 0x20)) {
+	if (!(st & STATUS_DMA)) {
 		virtual_dma_residue += virtual_dma_count;
 		virtual_dma_count=0;
 #ifdef TRACE_FLPY_INT
diff --git a/arch/m68k/include/asm/hardirq.h b/arch/m68k/include/asm/hardirq.h
deleted file mode 100644
index 1179316..0000000
--- a/arch/m68k/include/asm/hardirq.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __M68K_HARDIRQ_H
-#define __M68K_HARDIRQ_H
-
-#include <linux/threads.h>
-#include <linux/cache.h>
-#include <asm/irq.h>
-
-#ifdef CONFIG_MMU
-
-static inline void ack_bad_irq(unsigned int irq)
-{
-	pr_crit("unexpected IRQ trap at vector %02x\n", irq);
-}
-
-/* entry.S is sensitive to the offsets of these fields */
-typedef struct {
-	unsigned int __softirq_pending;
-} ____cacheline_aligned irq_cpustat_t;
-
-#include <linux/irq_cpustat.h>	/* Standard mappings for irq_cpustat_t above */
-
-#else
-
-#include <asm-generic/hardirq.h>
-
-#endif /* !CONFIG_MMU */
-
-#endif
diff --git a/arch/m68k/include/asm/io_no.h b/arch/m68k/include/asm/io_no.h
index 0498192..2c96e84 100644
--- a/arch/m68k/include/asm/io_no.h
+++ b/arch/m68k/include/asm/io_no.h
@@ -14,15 +14,15 @@
  * that behavior here first before we include asm-generic/io.h.
  */
 #define __raw_readb(addr) \
-    ({ unsigned char __v = (*(volatile unsigned char *) (addr)); __v; })
+    ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
 #define __raw_readw(addr) \
-    ({ unsigned short __v = (*(volatile unsigned short *) (addr)); __v; })
+    ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
 #define __raw_readl(addr) \
-    ({ unsigned int __v = (*(volatile unsigned int *) (addr)); __v; })
+    ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
 
-#define __raw_writeb(b, addr) (void)((*(volatile unsigned char *) (addr)) = (b))
-#define __raw_writew(b, addr) (void)((*(volatile unsigned short *) (addr)) = (b))
-#define __raw_writel(b, addr) (void)((*(volatile unsigned int *) (addr)) = (b))
+#define __raw_writeb(b, addr) (void)((*(__force volatile u8 *) (addr)) = (b))
+#define __raw_writew(b, addr) (void)((*(__force volatile u16 *) (addr)) = (b))
+#define __raw_writel(b, addr) (void)((*(__force volatile u32 *) (addr)) = (b))
 
 #if defined(CONFIG_COLDFIRE)
 /*
@@ -67,7 +67,7 @@
 {
 	if (cf_internalio(addr))
 		return __raw_readw(addr);
-	return __le16_to_cpu(__raw_readw(addr));
+	return swab16(__raw_readw(addr));
 }
 
 #define readl readl
@@ -75,7 +75,7 @@
 {
 	if (cf_internalio(addr))
 		return __raw_readl(addr);
-	return __le32_to_cpu(__raw_readl(addr));
+	return swab32(__raw_readl(addr));
 }
 
 #define writew writew
@@ -84,7 +84,7 @@
 	if (cf_internalio(addr))
 		__raw_writew(value, addr);
 	else
-		__raw_writew(__cpu_to_le16(value), addr);
+		__raw_writew(swab16(value), addr);
 }
 
 #define writel writel
@@ -93,7 +93,7 @@
 	if (cf_internalio(addr))
 		__raw_writel(value, addr);
 	else
-		__raw_writel(__cpu_to_le32(value), addr);
+		__raw_writel(swab32(value), addr);
 }
 
 #else
diff --git a/arch/m68k/include/asm/kmap.h b/arch/m68k/include/asm/kmap.h
index 421b6c9..dec0574 100644
--- a/arch/m68k/include/asm/kmap.h
+++ b/arch/m68k/include/asm/kmap.h
@@ -20,7 +20,6 @@
 			       int cacheflag);
 #define iounmap iounmap
 extern void iounmap(void __iomem *addr);
-extern void __iounmap(void *addr, unsigned long size);
 
 #define ioremap ioremap
 static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
@@ -28,7 +27,6 @@
 	return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
 
-#define ioremap_nocache ioremap
 #define ioremap_uc ioremap
 #define ioremap_wt ioremap_wt
 static inline void __iomem *ioremap_wt(unsigned long physaddr,
diff --git a/arch/m68k/include/asm/m5441xsim.h b/arch/m68k/include/asm/m5441xsim.h
index 4892f31..e091e36 100644
--- a/arch/m68k/include/asm/m5441xsim.h
+++ b/arch/m68k/include/asm/m5441xsim.h
@@ -279,6 +279,13 @@
 #define MCFGPIO_PIN_MAX		87
 
 /*
+ * Phase Locked Loop (PLL)
+ */
+#define MCF_PLL_CR		0xFC0C0000
+#define MCF_PLL_DR		0xFC0C0004
+#define MCF_PLL_SR		0xFC0C0008
+
+/*
  *  DSPI module.
  */
 #define MCFDSPI_BASE0		0xfc05c000
@@ -298,5 +305,13 @@
 #define MCFEDMA_IRQ_INTR16	(MCFINT1_VECBASE + MCFEDMA_EDMA_INTR16)
 #define MCFEDMA_IRQ_INTR56	(MCFINT2_VECBASE + MCFEDMA_EDMA_INTR56)
 #define MCFEDMA_IRQ_ERR	(MCFINT0_VECBASE + MCFINT0_EDMA_ERR)
+/*
+ *  esdhc module.
+ */
+#define MCFSDHC_BASE		0xfc0cc000
+#define MCFSDHC_SIZE		256
+#define MCFINT2_SDHC		31
+#define MCF_IRQ_SDHC		(MCFINT2_VECBASE + MCFINT2_SDHC)
+#define MCFSDHC_CLK		(MCFSDHC_BASE + 0x2c)
 
 #endif /* m5441xsim_h */
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index b34d44d..bc1228e 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -28,26 +28,22 @@
 	return (pmd_t *) pgd;
 }
 
-#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
-#define pmd_alloc_one(mm, address)      ({ BUG(); ((pmd_t *)2); })
+#define pmd_populate(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
 
-#define pmd_populate(mm, pmd, page) (pmd_val(*pmd) = \
-	(unsigned long)(page_address(page)))
+#define pmd_populate_kernel pmd_populate
 
-#define pmd_populate_kernel(mm, pmd, pte) (pmd_val(*pmd) = (unsigned long)(pte))
+#define pmd_pgtable(pmd) pfn_to_virt(pmd_val(pmd) >> PAGE_SHIFT)
 
-#define pmd_pgtable(pmd) pmd_page(pmd)
-
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
 				  unsigned long address)
 {
+	struct page *page = virt_to_page(pgtable);
+
 	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }
 
-#define __pmd_free_tlb(tlb, pmd, address) do { } while (0)
-
-static inline struct page *pte_alloc_one(struct mm_struct *mm)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
 	struct page *page = alloc_pages(GFP_DMA, 0);
 	pte_t *pte;
@@ -59,20 +55,16 @@
 		return NULL;
 	}
 
-	pte = kmap(page);
-	if (pte) {
-		clear_page(pte);
-		__flush_page_to_ram(pte);
-		flush_tlb_kernel_page(pte);
-		nocache_page(pte);
-	}
-	kunmap(page);
+	pte = page_address(page);
+	clear_page(pte);
 
-	return page;
+	return pte;
 }
 
-static inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
 {
+	struct page *page = virt_to_page(pgtable);
+
 	pgtable_pte_page_dtor(page);
 	__free_page(page);
 }
@@ -95,11 +87,9 @@
 	new_pgd = (pgd_t *)__get_free_page(GFP_DMA | __GFP_NOWARN);
 	if (!new_pgd)
 		return NULL;
-	memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE);
+	memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t));
 	memset(new_pgd, 0, PAGE_OFFSET >> PGDIR_SHIFT);
 	return new_pgd;
 }
 
-#define pgd_populate(mm, pmd, pte) BUG()
-
 #endif /* M68K_MCF_PGALLOC_H */
diff --git a/arch/m68k/include/asm/mcf_pgtable.h b/arch/m68k/include/asm/mcf_pgtable.h
index 5d5502c..8d4ec05 100644
--- a/arch/m68k/include/asm/mcf_pgtable.h
+++ b/arch/m68k/include/asm/mcf_pgtable.h
@@ -170,7 +170,7 @@
 }
 
 #define __pte_page(pte)	((unsigned long) (pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd)	((unsigned long) (pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)	((unsigned long) (pmd_val(pmd)))
 
 static inline int pte_none(pte_t pte)
 {
@@ -198,17 +198,9 @@
 #define pmd_present(pmd) (!pmd_none2(&(pmd)))
 static inline void pmd_clear(pmd_t *pmdp) { pmd_val(*pmdp) = 0; }
 
-static inline int pgd_none(pgd_t pgd) { return 0; }
-static inline int pgd_bad(pgd_t pgd) { return 0; }
-static inline int pgd_present(pgd_t pgd) { return 1; }
-static inline void pgd_clear(pgd_t *pgdp) {}
-
 #define pte_ERROR(e) \
 	printk(KERN_ERR "%s:%d: bad pte %08lx.\n",	\
 	__FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-	printk(KERN_ERR "%s:%d: bad pmd %08lx.\n",	\
-	__FILE__, __LINE__, pmd_val(e))
 #define pgd_ERROR(e) \
 	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n",	\
 	__FILE__, __LINE__, pgd_val(e))
@@ -243,11 +235,6 @@
 	return pte_val(pte) & CF_PAGE_ACCESSED;
 }
 
-static inline int pte_special(pte_t pte)
-{
-	return 0;
-}
-
 static inline pte_t pte_wrprotect(pte_t pte)
 {
 	pte_val(pte) &= ~CF_PAGE_WRITABLE;
@@ -320,73 +307,10 @@
 	return pte;
 }
 
-static inline pte_t pte_mkspecial(pte_t pte)
-{
-	return pte;
-}
-
 #define swapper_pg_dir kernel_pg_dir
 extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
 
 /*
- * Find an entry in a pagetable directory.
- */
-#define pgd_index(address)	((address) >> PGDIR_SHIFT)
-#define pgd_offset(mm, address)	((mm)->pgd + pgd_index(address))
-
-/*
- * Find an entry in a kernel pagetable directory.
- */
-#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
-
-/*
- * Find an entry in the second-level pagetable.
- */
-static inline pmd_t *pmd_offset(pgd_t *pgd, unsigned long address)
-{
-	return (pmd_t *) pgd;
-}
-
-/*
- * Find an entry in the third-level pagetable.
- */
-#define __pte_offset(address)	((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir, address) \
-	((pte_t *) __pmd_page(*(dir)) + __pte_offset(address))
-
-/*
- * Disable caching for page at given kernel virtual address.
- */
-static inline void nocache_page(void *vaddr)
-{
-	pgd_t *dir;
-	pmd_t *pmdp;
-	pte_t *ptep;
-	unsigned long addr = (unsigned long) vaddr;
-
-	dir = pgd_offset_k(addr);
-	pmdp = pmd_offset(dir, addr);
-	ptep = pte_offset_kernel(pmdp, addr);
-	*ptep = pte_mknocache(*ptep);
-}
-
-/*
- * Enable caching for page at given kernel virtual address.
- */
-static inline void cache_page(void *vaddr)
-{
-	pgd_t *dir;
-	pmd_t *pmdp;
-	pte_t *ptep;
-	unsigned long addr = (unsigned long) vaddr;
-
-	dir = pgd_offset_k(addr);
-	pmdp = pmd_offset(dir, addr);
-	ptep = pte_offset_kernel(pmdp, addr);
-	*ptep = pte_mkcache(*ptep);
-}
-
-/*
  * Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e))
  */
 #define __swp_type(x)		((x).val & 0xFF)
@@ -398,9 +322,6 @@
 
 #define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 
-#define pte_offset_map(pmdp, addr) ((pte_t *)__pmd_page(*pmdp) + \
-				       __pte_offset(addr))
-#define pte_unmap(pte)		((void) 0)
 #define pfn_pte(pfn, prot)	__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #define pte_pfn(pte)		(pte_val(pte) >> PAGE_SHIFT)
 
diff --git a/arch/m68k/include/asm/mcfclk.h b/arch/m68k/include/asm/mcfclk.h
index 0aca504..722627e 100644
--- a/arch/m68k/include/asm/mcfclk.h
+++ b/arch/m68k/include/asm/mcfclk.h
@@ -30,6 +30,8 @@
 extern struct clk_ops clk_ops1;
 #endif /* MCFPM_PPMCR1 */
 
+extern struct clk_ops clk_ops2;
+
 #define DEFINE_CLK(clk_bank, clk_name, clk_slot, clk_rate) \
 static struct clk __clk_##clk_bank##_##clk_slot = { \
 	.name = clk_name, \
diff --git a/arch/m68k/include/asm/mmu_context.h b/arch/m68k/include/asm/mmu_context.h
index f5b1852..993fd7e 100644
--- a/arch/m68k/include/asm/mmu_context.h
+++ b/arch/m68k/include/asm/mmu_context.h
@@ -100,6 +100,8 @@
 	struct mm_struct *mm;
 	int asid;
 	pgd_t *pgd;
+	p4d_t *p4d;
+	pud_t *pud;
 	pmd_t *pmd;
 	pte_t *pte;
 	unsigned long mmuar;
@@ -127,7 +129,15 @@
 	if (pgd_none(*pgd))
 		goto bug;
 
-	pmd = pmd_offset(pgd, mmuar);
+	p4d = p4d_offset(pgd, mmuar);
+	if (p4d_none(*p4d))
+		goto bug;
+
+	pud = pud_offset(p4d, mmuar);
+	if (pud_none(*pud))
+		goto bug;
+
+	pmd = pmd_offset(pud, mmuar);
 	if (pmd_none(*pmd))
 		goto bug;
 
@@ -212,7 +222,7 @@
 
 #include <asm/setup.h>
 #include <asm/page.h>
-#include <asm/pgalloc.h>
+#include <asm/cacheflush.h>
 
 static inline int init_new_context(struct task_struct *tsk,
 				   struct mm_struct *mm)
diff --git a/arch/m68k/include/asm/module.lds.h b/arch/m68k/include/asm/module.lds.h
new file mode 100644
index 0000000..fda94fa
--- /dev/null
+++ b/arch/m68k/include/asm/module.lds.h
@@ -0,0 +1,7 @@
+SECTIONS {
+	.m68k_fixup : {
+		__start_fixup = .;
+		*(.m68k_fixup)
+		__stop_fixup = .;
+	}
+}
diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h
index acab315..b4fc3b4 100644
--- a/arch/m68k/include/asm/motorola_pgalloc.h
+++ b/arch/m68k/include/asm/motorola_pgalloc.h
@@ -5,93 +5,77 @@
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 
-extern pmd_t *get_pointer_table(void);
-extern int free_pointer_table(pmd_t *);
+extern void mmu_page_ctor(void *page);
+extern void mmu_page_dtor(void *page);
+
+enum m68k_table_types {
+	TABLE_PGD = 0,
+	TABLE_PMD = 0, /* same size as PGD */
+	TABLE_PTE = 1,
+};
+
+extern void init_pointer_table(void *table, int type);
+extern void *get_pointer_table(int type);
+extern int free_pointer_table(void *table, int type);
+
+/*
+ * Allocate and free page tables. The xxx_kernel() versions are
+ * used to allocate a kernel page table - this turns on ASN bits
+ * if any.
+ */
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
-	pte_t *pte;
-
-	pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-	if (pte) {
-		__flush_page_to_ram(pte);
-		flush_tlb_kernel_page(pte);
-		nocache_page(pte);
-	}
-
-	return pte;
+	return get_pointer_table(TABLE_PTE);
 }
 
 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
 {
-	cache_page(pte);
-	free_page((unsigned long) pte);
+	free_pointer_table(pte, TABLE_PTE);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-	struct page *page;
-	pte_t *pte;
-
-	page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
-	if(!page)
-		return NULL;
-	if (!pgtable_pte_page_ctor(page)) {
-		__free_page(page);
-		return NULL;
-	}
-
-	pte = kmap(page);
-	__flush_page_to_ram(pte);
-	flush_tlb_kernel_page(pte);
-	nocache_page(pte);
-	kunmap(page);
-	return page;
+	return get_pointer_table(TABLE_PTE);
 }
 
-static inline void pte_free(struct mm_struct *mm, pgtable_t page)
+static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable)
 {
-	pgtable_pte_page_dtor(page);
-	cache_page(kmap(page));
-	kunmap(page);
-	__free_page(page);
+	free_pointer_table(pgtable, TABLE_PTE);
 }
 
-static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable,
 				  unsigned long address)
 {
-	pgtable_pte_page_dtor(page);
-	cache_page(kmap(page));
-	kunmap(page);
-	__free_page(page);
+	free_pointer_table(pgtable, TABLE_PTE);
 }
 
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-	return get_pointer_table();
+	return get_pointer_table(TABLE_PMD);
 }
 
 static inline int pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
-	return free_pointer_table(pmd);
+	return free_pointer_table(pmd, TABLE_PMD);
 }
 
 static inline int __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
 				 unsigned long address)
 {
-	return free_pointer_table(pmd);
+	return free_pointer_table(pmd, TABLE_PMD);
 }
 
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
-	pmd_free(mm, (pmd_t *)pgd);
+	free_pointer_table(pgd, TABLE_PGD);
 }
 
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-	return (pgd_t *)get_pointer_table();
+	return get_pointer_table(TABLE_PGD);
 }
 
 
@@ -102,13 +86,13 @@
 
 static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t page)
 {
-	pmd_set(pmd, page_address(page));
+	pmd_set(pmd, page);
 }
-#define pmd_pgtable(pmd) pmd_page(pmd)
+#define pmd_pgtable(pmd) ((pgtable_t)pmd_page_vaddr(pmd))
 
-static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 {
-	pgd_set(pgd, pmd);
+	pud_set(pud, pmd);
 }
 
 #endif /* _MOTOROLA_PGALLOC_H */
diff --git a/arch/m68k/include/asm/motorola_pgtable.h b/arch/m68k/include/asm/motorola_pgtable.h
index 7f66a7b..8076467 100644
--- a/arch/m68k/include/asm/motorola_pgtable.h
+++ b/arch/m68k/include/asm/motorola_pgtable.h
@@ -23,7 +23,18 @@
 #define _DESCTYPE_MASK	0x003
 
 #define _CACHEMASK040	(~0x060)
-#define _TABLE_MASK	(0xfffffe00)
+
+/*
+ * Currently set to the minimum alignment of table pointers (256 bytes).
+ * The hardware only uses the low 4 bits for state:
+ *
+ *    3 - Used
+ *    2 - Write Protected
+ *  0,1 - Descriptor Type
+ *
+ * and has the rest of the bits reserved.
+ */
+#define _TABLE_MASK	(0xffffff00)
 
 #define _PAGE_TABLE	(_PAGE_SHORT)
 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_NOCACHE)
@@ -108,23 +119,17 @@
 
 static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 {
-	unsigned long ptbl = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
-	unsigned long *ptr = pmdp->pmd;
-	short i = 16;
-	while (--i >= 0) {
-		*ptr++ = ptbl;
-		ptbl += (sizeof(pte_t)*PTRS_PER_PTE/16);
-	}
+	pmd_val(*pmdp) = virt_to_phys(ptep) | _PAGE_TABLE | _PAGE_ACCESSED;
 }
 
-static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
+static inline void pud_set(pud_t *pudp, pmd_t *pmdp)
 {
-	pgd_val(*pgdp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
+	pud_val(*pudp) = _PAGE_TABLE | _PAGE_ACCESSED | __pa(pmdp);
 }
 
 #define __pte_page(pte) ((unsigned long)__va(pte_val(pte) & PAGE_MASK))
-#define __pmd_page(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
-#define __pgd_page(pgd) ((unsigned long)__va(pgd_val(pgd) & _TABLE_MASK))
+#define pmd_page_vaddr(pmd) ((unsigned long)__va(pmd_val(pmd) & _TABLE_MASK))
+#define pud_page_vaddr(pud) ((unsigned long)__va(pud_val(pud) & _TABLE_MASK))
 
 
 #define pte_none(pte)		(!pte_val(pte))
@@ -138,20 +143,21 @@
 #define pmd_none(pmd)		(!pmd_val(pmd))
 #define pmd_bad(pmd)		((pmd_val(pmd) & _DESCTYPE_MASK) != _PAGE_TABLE)
 #define pmd_present(pmd)	(pmd_val(pmd) & _PAGE_TABLE)
-#define pmd_clear(pmdp) ({			\
-	unsigned long *__ptr = pmdp->pmd;	\
-	short __i = 16;				\
-	while (--__i >= 0)			\
-		*__ptr++ = 0;			\
-})
-#define pmd_page(pmd)		virt_to_page(__va(pmd_val(pmd)))
+#define pmd_clear(pmdp)		({ pmd_val(*pmdp) = 0; })
+
+/*
+ * m68k does not have huge pages (020/030 actually could), but generic code
+ * expects pmd_page() to exists, only to then DCE it all. Provide a dummy to
+ * make the compiler happy.
+ */
+#define pmd_page(pmd)		NULL
 
 
-#define pgd_none(pgd)		(!pgd_val(pgd))
-#define pgd_bad(pgd)		((pgd_val(pgd) & _DESCTYPE_MASK) != _PAGE_TABLE)
-#define pgd_present(pgd)	(pgd_val(pgd) & _PAGE_TABLE)
-#define pgd_clear(pgdp)		({ pgd_val(*pgdp) = 0; })
-#define pgd_page(pgd)		(mem_map + ((unsigned long)(__va(pgd_val(pgd)) - PAGE_OFFSET) >> PAGE_SHIFT))
+#define pud_none(pud)		(!pud_val(pud))
+#define pud_bad(pud)		((pud_val(pud) & _DESCTYPE_MASK) != _PAGE_TABLE)
+#define pud_present(pud)	(pud_val(pud) & _PAGE_TABLE)
+#define pud_clear(pudp)		({ pud_val(*pudp) = 0; })
+#define pud_page(pud)		(mem_map + ((unsigned long)(__va(pud_val(pud)) - PAGE_OFFSET) >> PAGE_SHIFT))
 
 #define pte_ERROR(e) \
 	printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
@@ -168,7 +174,6 @@
 static inline int pte_write(pte_t pte)		{ return !(pte_val(pte) & _PAGE_RONLY); }
 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_special(pte_t pte)	{ return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) |= _PAGE_RONLY; return pte; }
 static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
@@ -186,85 +191,10 @@
 	pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
 	return pte;
 }
-static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
-
-#define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
-
-#define pgd_index(address)     ((address) >> PGDIR_SHIFT)
-
-/* to find an entry in a page-table-directory */
-static inline pgd_t *pgd_offset(const struct mm_struct *mm,
-				unsigned long address)
-{
-	return mm->pgd + pgd_index(address);
-}
 
 #define swapper_pg_dir kernel_pg_dir
 extern pgd_t kernel_pg_dir[128];
 
-static inline pgd_t *pgd_offset_k(unsigned long address)
-{
-	return kernel_pg_dir + (address >> PGDIR_SHIFT);
-}
-
-
-/* Find an entry in the second-level page table.. */
-static inline pmd_t *pmd_offset(pgd_t *dir, unsigned long address)
-{
-	return (pmd_t *)__pgd_page(*dir) + ((address >> PMD_SHIFT) & (PTRS_PER_PMD-1));
-}
-
-/* Find an entry in the third-level page table.. */
-static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address)
-{
-	return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
-}
-
-#define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
-#define pte_unmap(pte)		((void)0)
-
-/*
- * Allocate and free page tables. The xxx_kernel() versions are
- * used to allocate a kernel page table - this turns on ASN bits
- * if any.
- */
-
-/* Prior to calling these routines, the page should have been flushed
- * from both the cache and ATC, or the CPU might not notice that the
- * cache setting for the page has been changed. -jskov
- */
-static inline void nocache_page(void *vaddr)
-{
-	unsigned long addr = (unsigned long)vaddr;
-
-	if (CPU_IS_040_OR_060) {
-		pgd_t *dir;
-		pmd_t *pmdp;
-		pte_t *ptep;
-
-		dir = pgd_offset_k(addr);
-		pmdp = pmd_offset(dir, addr);
-		ptep = pte_offset_kernel(pmdp, addr);
-		*ptep = pte_mknocache(*ptep);
-	}
-}
-
-static inline void cache_page(void *vaddr)
-{
-	unsigned long addr = (unsigned long)vaddr;
-
-	if (CPU_IS_040_OR_060) {
-		pgd_t *dir;
-		pmd_t *pmdp;
-		pte_t *ptep;
-
-		dir = pgd_offset_k(addr);
-		pmdp = pmd_offset(dir, addr);
-		ptep = pte_offset_kernel(pmdp, addr);
-		*ptep = pte_mkcache(*ptep);
-	}
-}
-
 /* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
 #define __swp_type(x)		(((x).val >> 4) & 0xff)
 #define __swp_offset(x)		((x).val >> 12)
diff --git a/arch/m68k/include/asm/page.h b/arch/m68k/include/asm/page.h
index 700d819..2614a12 100644
--- a/arch/m68k/include/asm/page.h
+++ b/arch/m68k/include/asm/page.h
@@ -21,19 +21,32 @@
 /*
  * These are used to make use of C type-checking..
  */
+#if !defined(CONFIG_MMU) || CONFIG_PGTABLE_LEVELS == 3
+typedef struct { unsigned long pmd; } pmd_t;
+#define pmd_val(x)	((&x)->pmd)
+#define __pmd(x)	((pmd_t) { (x) } )
+#endif
+
 typedef struct { unsigned long pte; } pte_t;
-typedef struct { unsigned long pmd[16]; } pmd_t;
 typedef struct { unsigned long pgd; } pgd_t;
 typedef struct { unsigned long pgprot; } pgprot_t;
+
+#if defined(CONFIG_SUN3)
+/*
+ * Sun3 still uses the asm-generic/pgalloc.h code and thus needs this
+ * definition. It would be possible to unify Sun3 and ColdFire pgalloc and have
+ * all of m68k use the same type.
+ */
 typedef struct page *pgtable_t;
+#else
+typedef pte_t *pgtable_t;
+#endif
 
 #define pte_val(x)	((x).pte)
-#define pmd_val(x)	((&x)->pmd[0])
 #define pgd_val(x)	((x).pgd)
 #define pgprot_val(x)	((x).pgprot)
 
 #define __pte(x)	((pte_t) { (x) } )
-#define __pmd(x)	((pmd_t) { { (x) }, })
 #define __pgd(x)	((pgd_t) { (x) } )
 #define __pgprot(x)	((pgprot_t) { (x) } )
 
@@ -52,9 +65,6 @@
 #define __phys_to_pfn(paddr)	((unsigned long)((paddr) >> PAGE_SHIFT))
 #define __pfn_to_phys(pfn)	PFN_PHYS(pfn)
 
-#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
-				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
 #include <asm-generic/getorder.h>
 
 #endif /* _M68K_PAGE_H */
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h
index 646c174..aca22c2 100644
--- a/arch/m68k/include/asm/pgtable_mm.h
+++ b/arch/m68k/include/asm/pgtable_mm.h
@@ -2,7 +2,12 @@
 #ifndef _M68K_PGTABLE_H
 #define _M68K_PGTABLE_H
 
-#include <asm-generic/4level-fixup.h>
+
+#if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
+#include <asm-generic/pgtable-nopmd.h>
+#else
+#include <asm-generic/pgtable-nopud.h>
+#endif
 
 #include <asm/setup.h>
 
@@ -30,10 +35,8 @@
 
 
 /* PMD_SHIFT determines the size of the area a second-level page table can map */
-#ifdef CONFIG_SUN3
-#define PMD_SHIFT       17
-#else
-#define PMD_SHIFT	22
+#if CONFIG_PGTABLE_LEVELS == 3
+#define PMD_SHIFT	18
 #endif
 #define PMD_SIZE	(1UL << PMD_SHIFT)
 #define PMD_MASK	(~(PMD_SIZE-1))
@@ -64,8 +67,8 @@
 #define PTRS_PER_PMD	1
 #define PTRS_PER_PGD	1024
 #else
-#define PTRS_PER_PTE	1024
-#define PTRS_PER_PMD	8
+#define PTRS_PER_PTE	64
+#define PTRS_PER_PMD	128
 #define PTRS_PER_PGD	128
 #endif
 #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
@@ -73,8 +76,8 @@
 
 /* Virtual address region for use by kernel_map() */
 #ifdef CONFIG_SUN3
-#define KMAP_START     0x0DC00000
-#define KMAP_END       0x0E000000
+#define KMAP_START	0x0dc00000
+#define KMAP_END	0x0e000000
 #elif defined(CONFIG_COLDFIRE)
 #define KMAP_START	0xe0000000
 #define KMAP_END	0xf0000000
@@ -173,7 +176,6 @@
 #define pgprot_dmacoherent(prot)	pgprot_dmacoherent(prot)
 
 #endif /* CONFIG_COLDFIRE */
-#include <asm-generic/pgtable.h>
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _M68K_PGTABLE_H */
diff --git a/arch/m68k/include/asm/pgtable_no.h b/arch/m68k/include/asm/pgtable_no.h
index c18165b..87151d6 100644
--- a/arch/m68k/include/asm/pgtable_no.h
+++ b/arch/m68k/include/asm/pgtable_no.h
@@ -2,7 +2,7 @@
 #ifndef _M68KNOMMU_PGTABLE_H
 #define _M68KNOMMU_PGTABLE_H
 
-#include <asm-generic/4level-fixup.h>
+#include <asm-generic/pgtable-nopud.h>
 
 /*
  * (C) Copyright 2000-2002, Greg Ungerer <gerg@snapgear.com>
@@ -53,6 +53,4 @@
 #define	KMAP_START	0
 #define	KMAP_END	0xffffffff
 
-#include <asm-generic/pgtable.h>
-
 #endif /* _M68KNOMMU_PGTABLE_H */
diff --git a/arch/m68k/include/asm/raw_io.h b/arch/m68k/include/asm/raw_io.h
index 8a6dc6e..80eb239 100644
--- a/arch/m68k/include/asm/raw_io.h
+++ b/arch/m68k/include/asm/raw_io.h
@@ -17,21 +17,21 @@
  * two accesses to memory, which may be undesirable for some devices.
  */
 #define in_8(addr) \
-    ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
+    ({ u8 __v = (*(__force volatile u8 *) (unsigned long)(addr)); __v; })
 #define in_be16(addr) \
-    ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
+    ({ u16 __v = (*(__force volatile u16 *) (unsigned long)(addr)); __v; })
 #define in_be32(addr) \
-    ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
+    ({ u32 __v = (*(__force volatile u32 *) (unsigned long)(addr)); __v; })
 #define in_le16(addr) \
-    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (addr)); __v; })
+    ({ u16 __v = le16_to_cpu(*(__force volatile __le16 *) (unsigned long)(addr)); __v; })
 #define in_le32(addr) \
-    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (addr)); __v; })
+    ({ u32 __v = le32_to_cpu(*(__force volatile __le32 *) (unsigned long)(addr)); __v; })
 
-#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (unsigned long)(addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (unsigned long)(addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (unsigned long)(addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile __le16 *) (unsigned long)(addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile __le32 *) (unsigned long)(addr)) = cpu_to_le32(l))
 
 #define raw_inb in_8
 #define raw_inw in_be16
@@ -80,14 +80,14 @@
 	({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
 
 #define rom_out_8(addr, b)	\
-	({u8 __w, __v = (b);  u32 _addr = ((u32) (addr)); \
+	({u8 __maybe_unused __w, __v = (b);  u32 _addr = ((u32) (addr)); \
 	__w = ((*(__force volatile u8 *)  ((_addr | 0x10000) + (__v<<1)))); })
 #define rom_out_be16(addr, w)	\
-	({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
+	({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
 	__w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v & 0xFF)<<1)))); \
 	__w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v >> 8)<<1)))); })
 #define rom_out_le16(addr, w)	\
-	({u16 __w, __v = (w); u32 _addr = ((u32) (addr)); \
+	({u16 __maybe_unused __w, __v = (w); u32 _addr = ((u32) (addr)); \
 	__w = ((*(__force volatile u16 *) ((_addr & 0xFFFF0000UL) + ((__v >> 8)<<1)))); \
 	__w = ((*(__force volatile u16 *) ((_addr | 0x10000) + ((__v & 0xFF)<<1)))); })
 
diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h
index c668655..2b5e68a 100644
--- a/arch/m68k/include/asm/segment.h
+++ b/arch/m68k/include/asm/segment.h
@@ -52,7 +52,7 @@
 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
 #endif
 
-#define segment_eq(a, b) ((a).seg == (b).seg)
+#define uaccess_kernel()	(get_fs().seg == KERNEL_DS.seg)
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h
index 8561211..000f648 100644
--- a/arch/m68k/include/asm/sun3_pgalloc.h
+++ b/arch/m68k/include/asm/sun3_pgalloc.h
@@ -13,12 +13,10 @@
 
 #include <asm/tlb.h>
 
-#include <asm-generic/pgalloc.h>	/* for pte_{alloc,free}_one */
+#include <asm-generic/pgalloc.h>
 
 extern const char bad_pmd_string[];
 
-#define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
-
 #define __pte_free_tlb(tlb,pte,addr)			\
 do {							\
 	pgtable_pte_page_dtor(pte);			\
@@ -41,12 +39,6 @@
  * inside the pgd, so has no extra memory associated with it.
  */
 #define pmd_free(mm, x)			do { } while (0)
-#define __pmd_free_tlb(tlb, x, addr)	do { } while (0)
-
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
-{
-        free_page((unsigned long) pgd);
-}
 
 static inline pgd_t * pgd_alloc(struct mm_struct *mm)
 {
@@ -58,6 +50,4 @@
      return new_pgd;
 }
 
-#define pgd_populate(mm, pmd, pte) BUG()
-
 #endif /* SUN3_PGALLOC_H */
diff --git a/arch/m68k/include/asm/sun3_pgtable.h b/arch/m68k/include/asm/sun3_pgtable.h
index c987d50..5b24283 100644
--- a/arch/m68k/include/asm/sun3_pgtable.h
+++ b/arch/m68k/include/asm/sun3_pgtable.h
@@ -110,15 +110,13 @@
 
 #define pmd_set(pmdp,ptep) do {} while (0)
 
-static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp)
-{
-	pgd_val(*pgdp) = virt_to_phys(pmdp);
-}
-
 #define __pte_page(pte) \
 ((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT))
-#define __pmd_page(pmd) \
-((unsigned long) __va (pmd_val (pmd) & PAGE_MASK))
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
+}
 
 static inline int pte_none (pte_t pte) { return !pte_val (pte); }
 static inline int pte_present (pte_t pte) { return pte_val (pte) & SUN3_PAGE_VALID; }
@@ -132,7 +130,7 @@
 ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; })
 
 #define pte_page(pte)		virt_to_page(__pte_page(pte))
-#define pmd_page(pmd)		virt_to_page(__pmd_page(pmd))
+#define pmd_page(pmd)		virt_to_page(pmd_page_vaddr(pmd))
 
 
 static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
@@ -145,16 +143,9 @@
 #define pmd_present(pmd) (!pmd_none2(&(pmd)))
 static inline void pmd_clear (pmd_t *pmdp) { pmd_val (*pmdp) = 0; }
 
-static inline int pgd_none (pgd_t pgd) { return 0; }
-static inline int pgd_bad (pgd_t pgd) { return 0; }
-static inline int pgd_present (pgd_t pgd) { return 1; }
-static inline void pgd_clear (pgd_t *pgdp) {}
-
 
 #define pte_ERROR(e) \
 	pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#define pmd_ERROR(e) \
-	pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
 #define pgd_ERROR(e) \
 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
 
@@ -167,7 +158,6 @@
 static inline int pte_write(pte_t pte)		{ return pte_val(pte) & SUN3_PAGE_WRITEABLE; }
 static inline int pte_dirty(pte_t pte)		{ return pte_val(pte) & SUN3_PAGE_MODIFIED; }
 static inline int pte_young(pte_t pte)		{ return pte_val(pte) & SUN3_PAGE_ACCESSED; }
-static inline int pte_special(pte_t pte)	{ return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)	{ pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
 static inline pte_t pte_mkclean(pte_t pte)	{ pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
@@ -180,32 +170,10 @@
 //static inline pte_t pte_mkcache(pte_t pte)	{ pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
 // until then, use:
 static inline pte_t pte_mkcache(pte_t pte)	{ return pte; }
-static inline pte_t pte_mkspecial(pte_t pte)	{ return pte; }
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
 
-/* Find an entry in a pagetable directory. */
-#define pgd_index(address)     ((address) >> PGDIR_SHIFT)
-
-#define pgd_offset(mm, address) \
-((mm)->pgd + pgd_index(address))
-
-/* Find an entry in a kernel pagetable directory. */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
-
-/* Find an entry in the second-level pagetable. */
-static inline pmd_t *pmd_offset (pgd_t *pgd, unsigned long address)
-{
-	return (pmd_t *) pgd;
-}
-
-/* Find an entry in the third-level pagetable. */
-#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
-#define pte_offset_kernel(pmd, address) ((pte_t *) __pmd_page(*pmd) + pte_index(address))
-#define pte_offset_map(pmd, address) ((pte_t *)page_address(pmd_page(*pmd)) + pte_index(address))
-#define pte_unmap(pte) do { } while (0)
-
 /* Macros to (de)construct the fake PTEs representing swap pages. */
 #define __swp_type(x)		((x).val & 0x7F)
 #define __swp_offset(x)		(((x).val) >> 7)
diff --git a/arch/m68k/include/asm/sun3xflop.h b/arch/m68k/include/asm/sun3xflop.h
index ef04c43..93f2a84 100644
--- a/arch/m68k/include/asm/sun3xflop.h
+++ b/arch/m68k/include/asm/sun3xflop.h
@@ -10,8 +10,8 @@
 #ifndef __ASM_SUN3X_FLOPPY_H
 #define __ASM_SUN3X_FLOPPY_H
 
+#include <linux/pgtable.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/sun3x.h>
 
diff --git a/arch/m68k/include/asm/thread_info.h b/arch/m68k/include/asm/thread_info.h
index 015f1ca..3689c67 100644
--- a/arch/m68k/include/asm/thread_info.h
+++ b/arch/m68k/include/asm/thread_info.h
@@ -68,4 +68,12 @@
 #define TIF_MEMDIE		16	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	18	/* restore signal mask in do_signal */
 
+#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+#define _TIF_DELAYED_TRACE	(1 << TIF_DELAYED_TRACE)
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_MEMDIE		(1 << TIF_MEMDIE)
+#define _TIF_RESTORE_SIGMASK	(1 << TIF_RESTORE_SIGMASK)
+
 #endif	/* _ASM_M68K_THREAD_INFO_H */
diff --git a/arch/m68k/include/asm/tlbflush.h b/arch/m68k/include/asm/tlbflush.h
index 191e75a..5337bc2 100644
--- a/arch/m68k/include/asm/tlbflush.h
+++ b/arch/m68k/include/asm/tlbflush.h
@@ -85,10 +85,10 @@
 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
 {
 	if (vma->vm_mm == current->active_mm) {
-		mm_segment_t old_fs = get_fs();
-		set_fs(USER_DS);
+		mm_segment_t old_fs = force_uaccess_begin();
+
 		__flush_tlb_one(addr);
-		set_fs(old_fs);
+		force_uaccess_end(old_fs);
 	}
 }
 
diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h
index e896466..f98208c 100644
--- a/arch/m68k/include/asm/uaccess.h
+++ b/arch/m68k/include/asm/uaccess.h
@@ -1,7 +1,397 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifdef __uClinux__
-#include <asm/uaccess_no.h>
-#else
-#include <asm/uaccess_mm.h>
-#endif
+#ifndef __M68K_UACCESS_H
+#define __M68K_UACCESS_H
+
+#ifdef CONFIG_MMU
+
+/*
+ * User space memory access functions
+ */
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <asm/segment.h>
 #include <asm/extable.h>
+
+/* We let the MMU do all checking */
+static inline int access_ok(const void __user *addr,
+			    unsigned long size)
+{
+	return 1;
+}
+
+/*
+ * Not all varients of the 68k family support the notion of address spaces.
+ * The traditional 680x0 parts do, and they use the sfc/dfc registers and
+ * the "moves" instruction to access user space from kernel space. Other
+ * family members like ColdFire don't support this, and only have a single
+ * address space, and use the usual "move" instruction for user space access.
+ *
+ * Outside of this difference the user space access functions are the same.
+ * So lets keep the code simple and just define in what we need to use.
+ */
+#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
+#define	MOVES	"moves"
+#else
+#define	MOVES	"move"
+#endif
+
+extern int __put_user_bad(void);
+extern int __get_user_bad(void);
+
+#define __put_user_asm(res, x, ptr, bwl, reg, err)	\
+asm volatile ("\n"					\
+	"1:	"MOVES"."#bwl"	%2,%1\n"		\
+	"2:\n"						\
+	"	.section .fixup,\"ax\"\n"		\
+	"	.even\n"				\
+	"10:	moveq.l	%3,%0\n"			\
+	"	jra 2b\n"				\
+	"	.previous\n"				\
+	"\n"						\
+	"	.section __ex_table,\"a\"\n"		\
+	"	.align	4\n"				\
+	"	.long	1b,10b\n"			\
+	"	.long	2b,10b\n"			\
+	"	.previous"				\
+	: "+d" (res), "=m" (*(ptr))			\
+	: #reg (x), "i" (err))
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ */
+
+#define __put_user(x, ptr)						\
+({									\
+	typeof(*(ptr)) __pu_val = (x);					\
+	int __pu_err = 0;						\
+	__chk_user_ptr(ptr);						\
+	switch (sizeof (*(ptr))) {					\
+	case 1:								\
+		__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT);	\
+		break;							\
+	case 2:								\
+		__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT);	\
+		break;							\
+	case 4:								\
+		__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT);	\
+		break;							\
+	case 8:								\
+ 	    {								\
+ 		const void __user *__pu_ptr = (ptr);			\
+		asm volatile ("\n"					\
+			"1:	"MOVES".l	%2,(%1)+\n"		\
+			"2:	"MOVES".l	%R2,(%1)\n"		\
+			"3:\n"						\
+			"	.section .fixup,\"ax\"\n"		\
+			"	.even\n"				\
+			"10:	movel %3,%0\n"				\
+			"	jra 3b\n"				\
+			"	.previous\n"				\
+			"\n"						\
+			"	.section __ex_table,\"a\"\n"		\
+			"	.align 4\n"				\
+			"	.long 1b,10b\n"				\
+			"	.long 2b,10b\n"				\
+			"	.long 3b,10b\n"				\
+			"	.previous"				\
+			: "+d" (__pu_err), "+a" (__pu_ptr)		\
+			: "r" (__pu_val), "i" (-EFAULT)			\
+			: "memory");					\
+		break;							\
+	    }								\
+	default:							\
+		__pu_err = __put_user_bad();				\
+		break;							\
+	}								\
+	__pu_err;							\
+})
+#define put_user(x, ptr)	__put_user(x, ptr)
+
+
+#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({		\
+	type __gu_val;							\
+	asm volatile ("\n"						\
+		"1:	"MOVES"."#bwl"	%2,%1\n"			\
+		"2:\n"							\
+		"	.section .fixup,\"ax\"\n"			\
+		"	.even\n"					\
+		"10:	move.l	%3,%0\n"				\
+		"	sub.l	%1,%1\n"				\
+		"	jra	2b\n"					\
+		"	.previous\n"					\
+		"\n"							\
+		"	.section __ex_table,\"a\"\n"			\
+		"	.align	4\n"					\
+		"	.long	1b,10b\n"				\
+		"	.previous"					\
+		: "+d" (res), "=&" #reg (__gu_val)			\
+		: "m" (*(ptr)), "i" (err));				\
+	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
+})
+
+#define __get_user(x, ptr)						\
+({									\
+	int __gu_err = 0;						\
+	__chk_user_ptr(ptr);						\
+	switch (sizeof(*(ptr))) {					\
+	case 1:								\
+		__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);	\
+		break;							\
+	case 2:								\
+		__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);	\
+		break;							\
+	case 4:								\
+		__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);	\
+		break;							\
+	case 8: {							\
+		const void __user *__gu_ptr = (ptr);			\
+		union {							\
+			u64 l;						\
+			__typeof__(*(ptr)) t;				\
+		} __gu_val;						\
+		asm volatile ("\n"					\
+			"1:	"MOVES".l	(%2)+,%1\n"		\
+			"2:	"MOVES".l	(%2),%R1\n"		\
+			"3:\n"						\
+			"	.section .fixup,\"ax\"\n"		\
+			"	.even\n"				\
+			"10:	move.l	%3,%0\n"			\
+			"	sub.l	%1,%1\n"			\
+			"	sub.l	%R1,%R1\n"			\
+			"	jra	3b\n"				\
+			"	.previous\n"				\
+			"\n"						\
+			"	.section __ex_table,\"a\"\n"		\
+			"	.align	4\n"				\
+			"	.long	1b,10b\n"			\
+			"	.long	2b,10b\n"			\
+			"	.previous"				\
+			: "+d" (__gu_err), "=&r" (__gu_val.l),		\
+			  "+a" (__gu_ptr)				\
+			: "i" (-EFAULT)					\
+			: "memory");					\
+		(x) = __gu_val.t;					\
+		break;							\
+	}								\
+	default:							\
+		__gu_err = __get_user_bad();				\
+		break;							\
+	}								\
+	__gu_err;							\
+})
+#define get_user(x, ptr) __get_user(x, ptr)
+
+unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
+unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
+
+#define __suffix0
+#define __suffix1 b
+#define __suffix2 w
+#define __suffix4 l
+
+#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
+	asm volatile ("\n"						\
+		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
+		"	move."#s1"	%3,(%1)+\n"			\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
+		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
+		"	move."#s2"	%3,(%1)+\n"			\
+		"	.ifnc	\""#s3"\",\"\"\n"			\
+		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
+		"	move."#s3"	%3,(%1)+\n"			\
+		"	.endif\n"					\
+		"	.endif\n"					\
+		"4:\n"							\
+		"	.section __ex_table,\"a\"\n"			\
+		"	.align	4\n"					\
+		"	.long	1b,10f\n"				\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
+		"	.long	2b,20f\n"				\
+		"	.ifnc	\""#s3"\",\"\"\n"			\
+		"	.long	3b,30f\n"				\
+		"	.endif\n"					\
+		"	.endif\n"					\
+		"	.previous\n"					\
+		"\n"							\
+		"	.section .fixup,\"ax\"\n"			\
+		"	.even\n"					\
+		"10:	addq.l #"#n1",%0\n"				\
+		"	.ifnc	\""#s2"\",\"\"\n"			\
+		"20:	addq.l #"#n2",%0\n"				\
+		"	.ifnc	\""#s3"\",\"\"\n"			\
+		"30:	addq.l #"#n3",%0\n"				\
+		"	.endif\n"					\
+		"	.endif\n"					\
+		"	jra	4b\n"					\
+		"	.previous\n"					\
+		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
+		: : "memory")
+
+#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
+	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
+#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
+	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
+					__suffix##n1, __suffix##n2, __suffix##n3)
+
+static __always_inline unsigned long
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	unsigned long res = 0, tmp;
+
+	switch (n) {
+	case 1:
+		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
+		break;
+	case 2:
+		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
+		break;
+	case 3:
+		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
+		break;
+	case 4:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
+		break;
+	case 5:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
+		break;
+	case 6:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
+		break;
+	case 7:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
+		break;
+	case 8:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
+		break;
+	case 9:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
+		break;
+	case 10:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
+		break;
+	case 12:
+		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
+		break;
+	default:
+		/* we limit the inlined version to 3 moves */
+		return __generic_copy_from_user(to, from, n);
+	}
+
+	return res;
+}
+
+#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
+	asm volatile ("\n"						\
+		"	move."#s1"	(%2)+,%3\n"			\
+		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
+		"12:	move."#s2"	(%2)+,%3\n"			\
+		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
+		"22:\n"							\
+		"	.ifnc	\""#s3"\",\"\"\n"			\
+		"	move."#s3"	(%2)+,%3\n"			\
+		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
+		"32:\n"							\
+		"	.endif\n"					\
+		"4:\n"							\
+		"\n"							\
+		"	.section __ex_table,\"a\"\n"			\
+		"	.align	4\n"					\
+		"	.long	11b,5f\n"				\
+		"	.long	12b,5f\n"				\
+		"	.long	21b,5f\n"				\
+		"	.long	22b,5f\n"				\
+		"	.ifnc	\""#s3"\",\"\"\n"			\
+		"	.long	31b,5f\n"				\
+		"	.long	32b,5f\n"				\
+		"	.endif\n"					\
+		"	.previous\n"					\
+		"\n"							\
+		"	.section .fixup,\"ax\"\n"			\
+		"	.even\n"					\
+		"5:	moveq.l	#"#n",%0\n"				\
+		"	jra	4b\n"					\
+		"	.previous\n"					\
+		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
+		: : "memory")
+
+static __always_inline unsigned long
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	unsigned long res = 0, tmp;
+
+	switch (n) {
+	case 1:
+		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
+		break;
+	case 2:
+		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
+		break;
+	case 3:
+		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
+		break;
+	case 4:
+		__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
+		break;
+	case 5:
+		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
+		break;
+	case 6:
+		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
+		break;
+	case 7:
+		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
+		break;
+	case 8:
+		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
+		break;
+	case 9:
+		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
+		break;
+	case 10:
+		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
+		break;
+	case 12:
+		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
+		break;
+	default:
+		/* limit the inlined version to 3 moves */
+		return __generic_copy_to_user(to, from, n);
+	}
+
+	return res;
+}
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+	if (__builtin_constant_p(n))
+		return __constant_copy_from_user(to, from, n);
+	return __generic_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n))
+		return __constant_copy_to_user(to, from, n);
+	return __generic_copy_to_user(to, from, n);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+#define user_addr_max() \
+	(uaccess_kernel() ? ~0UL : TASK_SIZE)
+
+extern long strncpy_from_user(char *dst, const char __user *src, long count);
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+unsigned long __clear_user(void __user *to, unsigned long n);
+
+#define clear_user	__clear_user
+
+#else /* !CONFIG_MMU */
+#include <asm-generic/uaccess.h>
+#endif
+
+#endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
deleted file mode 100644
index 7e85de9..0000000
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ /dev/null
@@ -1,390 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __M68K_UACCESS_H
-#define __M68K_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/compiler.h>
-#include <linux/types.h>
-#include <asm/segment.h>
-
-/* We let the MMU do all checking */
-static inline int access_ok(const void __user *addr,
-			    unsigned long size)
-{
-	return 1;
-}
-
-/*
- * Not all varients of the 68k family support the notion of address spaces.
- * The traditional 680x0 parts do, and they use the sfc/dfc registers and
- * the "moves" instruction to access user space from kernel space. Other
- * family members like ColdFire don't support this, and only have a single
- * address space, and use the usual "move" instruction for user space access.
- *
- * Outside of this difference the user space access functions are the same.
- * So lets keep the code simple and just define in what we need to use.
- */
-#ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
-#define	MOVES	"moves"
-#else
-#define	MOVES	"move"
-#endif
-
-extern int __put_user_bad(void);
-extern int __get_user_bad(void);
-
-#define __put_user_asm(res, x, ptr, bwl, reg, err)	\
-asm volatile ("\n"					\
-	"1:	"MOVES"."#bwl"	%2,%1\n"		\
-	"2:\n"						\
-	"	.section .fixup,\"ax\"\n"		\
-	"	.even\n"				\
-	"10:	moveq.l	%3,%0\n"			\
-	"	jra 2b\n"				\
-	"	.previous\n"				\
-	"\n"						\
-	"	.section __ex_table,\"a\"\n"		\
-	"	.align	4\n"				\
-	"	.long	1b,10b\n"			\
-	"	.long	2b,10b\n"			\
-	"	.previous"				\
-	: "+d" (res), "=m" (*(ptr))			\
-	: #reg (x), "i" (err))
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- */
-
-#define __put_user(x, ptr)						\
-({									\
-	typeof(*(ptr)) __pu_val = (x);					\
-	int __pu_err = 0;						\
-	__chk_user_ptr(ptr);						\
-	switch (sizeof (*(ptr))) {					\
-	case 1:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT);	\
-		break;							\
-	case 2:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, w, r, -EFAULT);	\
-		break;							\
-	case 4:								\
-		__put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT);	\
-		break;							\
-	case 8:								\
- 	    {								\
- 		const void __user *__pu_ptr = (ptr);			\
-		asm volatile ("\n"					\
-			"1:	"MOVES".l	%2,(%1)+\n"		\
-			"2:	"MOVES".l	%R2,(%1)\n"		\
-			"3:\n"						\
-			"	.section .fixup,\"ax\"\n"		\
-			"	.even\n"				\
-			"10:	movel %3,%0\n"				\
-			"	jra 3b\n"				\
-			"	.previous\n"				\
-			"\n"						\
-			"	.section __ex_table,\"a\"\n"		\
-			"	.align 4\n"				\
-			"	.long 1b,10b\n"				\
-			"	.long 2b,10b\n"				\
-			"	.long 3b,10b\n"				\
-			"	.previous"				\
-			: "+d" (__pu_err), "+a" (__pu_ptr)		\
-			: "r" (__pu_val), "i" (-EFAULT)			\
-			: "memory");					\
-		break;							\
-	    }								\
-	default:							\
-		__pu_err = __put_user_bad();				\
-		break;							\
-	}								\
-	__pu_err;							\
-})
-#define put_user(x, ptr)	__put_user(x, ptr)
-
-
-#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({		\
-	type __gu_val;							\
-	asm volatile ("\n"						\
-		"1:	"MOVES"."#bwl"	%2,%1\n"			\
-		"2:\n"							\
-		"	.section .fixup,\"ax\"\n"			\
-		"	.even\n"					\
-		"10:	move.l	%3,%0\n"				\
-		"	sub.l	%1,%1\n"				\
-		"	jra	2b\n"					\
-		"	.previous\n"					\
-		"\n"							\
-		"	.section __ex_table,\"a\"\n"			\
-		"	.align	4\n"					\
-		"	.long	1b,10b\n"				\
-		"	.previous"					\
-		: "+d" (res), "=&" #reg (__gu_val)			\
-		: "m" (*(ptr)), "i" (err));				\
-	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
-})
-
-#define __get_user(x, ptr)						\
-({									\
-	int __gu_err = 0;						\
-	__chk_user_ptr(ptr);						\
-	switch (sizeof(*(ptr))) {					\
-	case 1:								\
-		__get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT);	\
-		break;							\
-	case 2:								\
-		__get_user_asm(__gu_err, x, ptr, u16, w, r, -EFAULT);	\
-		break;							\
-	case 4:								\
-		__get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT);	\
-		break;							\
-	case 8: {							\
-		const void *__gu_ptr = (ptr);				\
-		union {							\
-			u64 l;						\
-			__typeof__(*(ptr)) t;				\
-		} __gu_val;						\
-		asm volatile ("\n"					\
-			"1:	"MOVES".l	(%2)+,%1\n"		\
-			"2:	"MOVES".l	(%2),%R1\n"		\
-			"3:\n"						\
-			"	.section .fixup,\"ax\"\n"		\
-			"	.even\n"				\
-			"10:	move.l	%3,%0\n"			\
-			"	sub.l	%1,%1\n"			\
-			"	sub.l	%R1,%R1\n"			\
-			"	jra	3b\n"				\
-			"	.previous\n"				\
-			"\n"						\
-			"	.section __ex_table,\"a\"\n"		\
-			"	.align	4\n"				\
-			"	.long	1b,10b\n"			\
-			"	.long	2b,10b\n"			\
-			"	.previous"				\
-			: "+d" (__gu_err), "=&r" (__gu_val.l),		\
-			  "+a" (__gu_ptr)				\
-			: "i" (-EFAULT)					\
-			: "memory");					\
-		(x) = __gu_val.t;					\
-		break;							\
-	}								\
-	default:							\
-		__gu_err = __get_user_bad();				\
-		break;							\
-	}								\
-	__gu_err;							\
-})
-#define get_user(x, ptr) __get_user(x, ptr)
-
-unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
-unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
-
-#define __suffix0
-#define __suffix1 b
-#define __suffix2 w
-#define __suffix4 l
-
-#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
-	asm volatile ("\n"						\
-		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
-		"	move."#s1"	%3,(%1)+\n"			\
-		"	.ifnc	\""#s2"\",\"\"\n"			\
-		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
-		"	move."#s2"	%3,(%1)+\n"			\
-		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
-		"	move."#s3"	%3,(%1)+\n"			\
-		"	.endif\n"					\
-		"	.endif\n"					\
-		"4:\n"							\
-		"	.section __ex_table,\"a\"\n"			\
-		"	.align	4\n"					\
-		"	.long	1b,10f\n"				\
-		"	.ifnc	\""#s2"\",\"\"\n"			\
-		"	.long	2b,20f\n"				\
-		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"	.long	3b,30f\n"				\
-		"	.endif\n"					\
-		"	.endif\n"					\
-		"	.previous\n"					\
-		"\n"							\
-		"	.section .fixup,\"ax\"\n"			\
-		"	.even\n"					\
-		"10:	addq.l #"#n1",%0\n"				\
-		"	.ifnc	\""#s2"\",\"\"\n"			\
-		"20:	addq.l #"#n2",%0\n"				\
-		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"30:	addq.l #"#n3",%0\n"				\
-		"	.endif\n"					\
-		"	.endif\n"					\
-		"	jra	4b\n"					\
-		"	.previous\n"					\
-		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
-		: : "memory")
-
-#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
-	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
-#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
-	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
-					__suffix##n1, __suffix##n2, __suffix##n3)
-
-static __always_inline unsigned long
-__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	unsigned long res = 0, tmp;
-
-	switch (n) {
-	case 1:
-		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
-		break;
-	case 2:
-		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
-		break;
-	case 3:
-		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
-		break;
-	case 4:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
-		break;
-	case 5:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
-		break;
-	case 6:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
-		break;
-	case 7:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
-		break;
-	case 8:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
-		break;
-	case 9:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
-		break;
-	case 10:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
-		break;
-	case 12:
-		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
-		break;
-	default:
-		/* we limit the inlined version to 3 moves */
-		return __generic_copy_from_user(to, from, n);
-	}
-
-	return res;
-}
-
-#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
-	asm volatile ("\n"						\
-		"	move."#s1"	(%2)+,%3\n"			\
-		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
-		"12:	move."#s2"	(%2)+,%3\n"			\
-		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
-		"22:\n"							\
-		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"	move."#s3"	(%2)+,%3\n"			\
-		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
-		"32:\n"							\
-		"	.endif\n"					\
-		"4:\n"							\
-		"\n"							\
-		"	.section __ex_table,\"a\"\n"			\
-		"	.align	4\n"					\
-		"	.long	11b,5f\n"				\
-		"	.long	12b,5f\n"				\
-		"	.long	21b,5f\n"				\
-		"	.long	22b,5f\n"				\
-		"	.ifnc	\""#s3"\",\"\"\n"			\
-		"	.long	31b,5f\n"				\
-		"	.long	32b,5f\n"				\
-		"	.endif\n"					\
-		"	.previous\n"					\
-		"\n"							\
-		"	.section .fixup,\"ax\"\n"			\
-		"	.even\n"					\
-		"5:	moveq.l	#"#n",%0\n"				\
-		"	jra	4b\n"					\
-		"	.previous\n"					\
-		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
-		: : "memory")
-
-static __always_inline unsigned long
-__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	unsigned long res = 0, tmp;
-
-	switch (n) {
-	case 1:
-		__put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1);
-		break;
-	case 2:
-		__put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, r, 2);
-		break;
-	case 3:
-		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
-		break;
-	case 4:
-		__put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4);
-		break;
-	case 5:
-		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
-		break;
-	case 6:
-		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
-		break;
-	case 7:
-		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
-		break;
-	case 8:
-		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
-		break;
-	case 9:
-		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
-		break;
-	case 10:
-		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
-		break;
-	case 12:
-		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
-		break;
-	default:
-		/* limit the inlined version to 3 moves */
-		return __generic_copy_to_user(to, from, n);
-	}
-
-	return res;
-}
-
-static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	if (__builtin_constant_p(n))
-		return __constant_copy_from_user(to, from, n);
-	return __generic_copy_from_user(to, from, n);
-}
-
-static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	if (__builtin_constant_p(n))
-		return __constant_copy_to_user(to, from, n);
-	return __generic_copy_to_user(to, from, n);
-}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-#define user_addr_max() \
-	(uaccess_kernel() ? ~0UL : TASK_SIZE)
-
-extern long strncpy_from_user(char *dst, const char __user *src, long count);
-extern __must_check long strnlen_user(const char __user *str, long n);
-
-unsigned long __clear_user(void __user *to, unsigned long n);
-
-#define clear_user	__clear_user
-
-#endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h
deleted file mode 100644
index 0134008..0000000
--- a/arch/m68k/include/asm/uaccess_no.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __M68KNOMMU_UACCESS_H
-#define __M68KNOMMU_UACCESS_H
-
-/*
- * User space memory access functions
- */
-#include <linux/mm.h>
-#include <linux/string.h>
-
-#include <asm/segment.h>
-
-#define access_ok(addr,size)	_access_ok((unsigned long)(addr),(size))
-
-/*
- * It is not enough to just have access_ok check for a real RAM address.
- * This would disallow the case of code/ro-data running XIP in flash/rom.
- * Ideally we would check the possible flash ranges too, but that is
- * currently not so easy.
- */
-static inline int _access_ok(unsigned long addr, unsigned long size)
-{
-	return 1;
-}
-
-/*
- * These are the main single-value transfer routines.  They automatically
- * use the right size if we just have the right pointer type.
- */
-
-#define put_user(x, ptr)				\
-({							\
-    int __pu_err = 0;					\
-    typeof(*(ptr)) __pu_val = (x);			\
-    switch (sizeof (*(ptr))) {				\
-    case 1:						\
-	__put_user_asm(__pu_err, __pu_val, ptr, b);	\
-	break;						\
-    case 2:						\
-	__put_user_asm(__pu_err, __pu_val, ptr, w);	\
-	break;						\
-    case 4:						\
-	__put_user_asm(__pu_err, __pu_val, ptr, l);	\
-	break;						\
-    case 8:						\
-	memcpy(ptr, &__pu_val, sizeof (*(ptr))); \
-	break;						\
-    default:						\
-	__pu_err = __put_user_bad();			\
-	break;						\
-    }							\
-    __pu_err;						\
-})
-#define __put_user(x, ptr) put_user(x, ptr)
-
-extern int __put_user_bad(void);
-
-/*
- * Tell gcc we read from memory instead of writing: this is because
- * we do not write to any memory gcc knows about, so there are no
- * aliasing issues.
- */
-
-#define __ptr(x) ((unsigned long *)(x))
-
-#define __put_user_asm(err,x,ptr,bwl)				\
-	__asm__ ("move" #bwl " %0,%1"				\
-		: /* no outputs */						\
-		:"d" (x),"m" (*__ptr(ptr)) : "memory")
-
-#define get_user(x, ptr)					\
-({								\
-    int __gu_err = 0;						\
-    typeof(x) __gu_val = 0;					\
-    switch (sizeof(*(ptr))) {					\
-    case 1:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, b, "=d");	\
-	break;							\
-    case 2:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, w, "=r");	\
-	break;							\
-    case 4:							\
-	__get_user_asm(__gu_err, __gu_val, ptr, l, "=r");	\
-	break;							\
-    case 8:							\
-	memcpy((void *) &__gu_val, ptr, sizeof (*(ptr)));	\
-	break;							\
-    default:							\
-	__gu_val = 0;						\
-	__gu_err = __get_user_bad();				\
-	break;							\
-    }								\
-    (x) = (typeof(*(ptr))) __gu_val;				\
-    __gu_err;							\
-})
-#define __get_user(x, ptr) get_user(x, ptr)
-
-extern int __get_user_bad(void);
-
-#define __get_user_asm(err,x,ptr,bwl,reg)			\
-	__asm__ ("move" #bwl " %1,%0"				\
-		 : "=d" (x)					\
-		 : "m" (*__ptr(ptr)))
-
-static inline unsigned long
-raw_copy_from_user(void *to, const void __user *from, unsigned long n)
-{
-	memcpy(to, (__force const void *)from, n);
-	return 0;
-}
-
-static inline unsigned long
-raw_copy_to_user(void __user *to, const void *from, unsigned long n)
-{
-	memcpy((__force void *)to, from, n);
-	return 0;
-}
-#define INLINE_COPY_FROM_USER
-#define INLINE_COPY_TO_USER
-
-/*
- * Copy a null terminated string from userspace.
- */
-
-static inline long
-strncpy_from_user(char *dst, const char *src, long count)
-{
-	char *tmp;
-	strncpy(dst, src, count);
-	for (tmp = dst; *tmp && count > 0; tmp++, count--)
-		;
-	return(tmp - dst); /* DAVIDM should we count a NUL ?  check getname */
-}
-
-/*
- * Return the size of a string (including the ending 0)
- *
- * Return 0 on exception, a value greater than N if too long
- */
-static inline long strnlen_user(const char *src, long n)
-{
-	return(strlen(src) + 1); /* DAVIDM make safer */
-}
-
-/*
- * Zero Userspace
- */
-
-static inline unsigned long
-__clear_user(void *to, unsigned long n)
-{
-	memset(to, 0, n);
-	return 0;
-}
-
-#define	clear_user(to,n)	__clear_user(to,n)
-
-#endif /* _M68KNOMMU_UACCESS_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 2e0047c..4ae5241 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -30,5 +30,6 @@
 #define __ARCH_WANT_SYS_SIGPROCMASK
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+#define __ARCH_WANT_SYS_CLONE3
 
 #endif /* _ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/include/asm/vmalloc.h b/arch/m68k/include/asm/vmalloc.h
new file mode 100644
index 0000000..bc1dca6
--- /dev/null
+++ b/arch/m68k/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_M68K_VMALLOC_H
+#define _ASM_M68K_VMALLOC_H
+
+#endif /* _ASM_M68K_VMALLOC_H */