v4.19.13 snapshot.
diff --git a/arch/m68k/kernel/.gitignore b/arch/m68k/kernel/.gitignore
new file mode 100644
index 0000000..c5f676c
--- /dev/null
+++ b/arch/m68k/kernel/.gitignore
@@ -0,0 +1 @@
+vmlinux.lds
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile
new file mode 100644
index 0000000..dbac7f8
--- /dev/null
+++ b/arch/m68k/kernel/Makefile
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the linux kernel.
+#
+
+extra-$(CONFIG_AMIGA)	:= head.o
+extra-$(CONFIG_ATARI)	:= head.o
+extra-$(CONFIG_MAC)	:= head.o
+extra-$(CONFIG_APOLLO)	:= head.o
+extra-$(CONFIG_VME)	:= head.o
+extra-$(CONFIG_HP300)	:= head.o
+extra-$(CONFIG_Q40)	:= head.o
+extra-$(CONFIG_SUN3X)	:= head.o
+extra-$(CONFIG_SUN3)	:= sun3-head.o
+extra-y			+= vmlinux.lds
+
+obj-y	:= entry.o irq.o module.o process.o ptrace.o
+obj-y	+= setup.o signal.o sys_m68k.o syscalltable.o time.o traps.o
+
+obj-$(CONFIG_MMU_MOTOROLA) += ints.o vectors.o
+obj-$(CONFIG_MMU_SUN3) += ints.o vectors.o
+obj-$(CONFIG_PCI) += pcibios.o
+
+obj-$(CONFIG_HAS_DMA)	+= dma.o
+
+obj-$(CONFIG_KEXEC)		+= machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_BOOTINFO_PROC)	+= bootinfo_proc.o
+obj-$(CONFIG_UBOOT)		+= uboot.o
+
+obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c
new file mode 100644
index 0000000..ccea355
--- /dev/null
+++ b/arch/m68k/kernel/asm-offsets.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This program is used to generate definitions needed by
+ * assembly language modules.
+ *
+ * We use the technique used in the OSF Mach kernel code:
+ * generate asm statements containing #defines,
+ * compile this file to assembler, and then extract the
+ * #defines from the assembly-language output.
+ */
+
+#define ASM_OFFSETS_C
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/kbuild.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/amigahw.h>
+#include <linux/font.h>
+
+int main(void)
+{
+	/* offsets into the task struct */
+	DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+	DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+	DEFINE(TASK_STACK, offsetof(struct task_struct, stack));
+
+	/* offsets into the thread struct */
+	DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+	DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
+	DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
+	DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
+	DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
+	DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
+	DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
+	DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
+	DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
+
+	/* offsets into the thread_info struct */
+	DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
+	DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
+
+	/* offsets into the pt_regs */
+	DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
+	DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
+	DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
+	DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
+	DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
+	DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
+	DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
+	DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
+	DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
+	DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
+	DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
+	DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
+
+	/* bitfields are a bit difficult */
+#ifdef CONFIG_COLDFIRE
+	DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2);
+#else
+	DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
+#endif
+
+	/* offsets into the irq_cpustat_t struct */
+	DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
+
+	/* signal defines */
+	DEFINE(LSIGSEGV, SIGSEGV);
+	DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
+	DEFINE(LSIGTRAP, SIGTRAP);
+	DEFINE(LTRAP_TRACE, TRAP_TRACE);
+
+#ifdef CONFIG_MMU
+	/* offsets into the bi_record struct */
+	DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
+	DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
+	DEFINE(BIR_DATA, offsetof(struct bi_record, data));
+
+	/* offsets into the font_desc struct */
+	DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
+	DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
+	DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
+	DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
+	DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
+	DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
+
+	/* offsets into the custom struct */
+	DEFINE(CUSTOMBASE, &amiga_custom);
+	DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
+	DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
+	DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
+	DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
+	DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
+	DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
+	DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
+	DEFINE(CIAABASE, &ciaa);
+	DEFINE(CIABBASE, &ciab);
+	DEFINE(C_PRA, offsetof(struct CIA, pra));
+	DEFINE(ZTWOBASE, zTwoBase);
+
+	/* enum m68k_fixup_type */
+	DEFINE(M68K_FIXUP_MEMOFFSET, m68k_fixup_memoffset);
+#endif
+
+	return 0;
+}
diff --git a/arch/m68k/kernel/bootinfo_proc.c b/arch/m68k/kernel/bootinfo_proc.c
new file mode 100644
index 0000000..3b9cab8
--- /dev/null
+++ b/arch/m68k/kernel/bootinfo_proc.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on arch/arm/kernel/atags_proc.c
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/printk.h>
+#include <linux/proc_fs.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <asm/bootinfo.h>
+#include <asm/byteorder.h>
+
+
+static char bootinfo_tmp[1536] __initdata;
+
+static void *bootinfo_copy;
+static size_t bootinfo_size;
+
+static ssize_t bootinfo_read(struct file *file, char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	return simple_read_from_buffer(buf, count, ppos, bootinfo_copy,
+				       bootinfo_size);
+}
+
+static const struct file_operations bootinfo_fops = {
+	.read = bootinfo_read,
+	.llseek = default_llseek,
+};
+
+void __init save_bootinfo(const struct bi_record *bi)
+{
+	const void *start = bi;
+	size_t size = sizeof(bi->tag);
+
+	while (be16_to_cpu(bi->tag) != BI_LAST) {
+		uint16_t n = be16_to_cpu(bi->size);
+		size += n;
+		bi = (struct bi_record *)((unsigned long)bi + n);
+	}
+
+	if (size > sizeof(bootinfo_tmp)) {
+		pr_err("Cannot save %zu bytes of bootinfo\n", size);
+		return;
+	}
+
+	pr_info("Saving %zu bytes of bootinfo\n", size);
+	memcpy(bootinfo_tmp, start, size);
+	bootinfo_size = size;
+}
+
+static int __init init_bootinfo_procfs(void)
+{
+	/*
+	 * This cannot go into save_bootinfo() because kmalloc and proc don't
+	 * work yet when it is called.
+	 */
+	struct proc_dir_entry *pde;
+
+	if (!bootinfo_size)
+		return -EINVAL;
+
+	bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL);
+	if (!bootinfo_copy)
+		return -ENOMEM;
+
+	pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_fops, NULL);
+	if (!pde) {
+		kfree(bootinfo_copy);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+arch_initcall(init_bootinfo_procfs);
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
new file mode 100644
index 0000000..e99993c
--- /dev/null
+++ b/arch/m68k/kernel/dma.c
@@ -0,0 +1,125 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#undef DEBUG
+
+#include <linux/dma-noncoherent.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+
+#include <asm/pgalloc.h>
+
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		gfp_t flag, unsigned long attrs)
+{
+	struct page *page, **map;
+	pgprot_t pgprot;
+	void *addr;
+	int i, order;
+
+	pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
+
+	size = PAGE_ALIGN(size);
+	order = get_order(size);
+
+	page = alloc_pages(flag, order);
+	if (!page)
+		return NULL;
+
+	*handle = page_to_phys(page);
+	map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
+	if (!map) {
+		__free_pages(page, order);
+		return NULL;
+	}
+	split_page(page, order);
+
+	order = 1 << order;
+	size >>= PAGE_SHIFT;
+	map[0] = page;
+	for (i = 1; i < size; i++)
+		map[i] = page + i;
+	for (; i < order; i++)
+		__free_page(page + i);
+	pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
+	if (CPU_IS_040_OR_060)
+		pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
+	else
+		pgprot_val(pgprot) |= _PAGE_NOCACHE030;
+	addr = vmap(map, size, VM_MAP, pgprot);
+	kfree(map);
+
+	return addr;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *addr,
+		dma_addr_t handle, unsigned long attrs)
+{
+	pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
+	vfree(addr);
+}
+
+#else
+
+#include <asm/cacheflush.h>
+
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+		gfp_t gfp, unsigned long attrs)
+{
+	void *ret;
+
+	if (dev == NULL || (*dev->dma_mask < 0xffffffff))
+		gfp |= GFP_DMA;
+	ret = (void *)__get_free_pages(gfp, get_order(size));
+
+	if (ret != NULL) {
+		memset(ret, 0, size);
+		*dma_handle = virt_to_phys(ret);
+	}
+	return ret;
+}
+
+void arch_dma_free(struct device *dev, size_t size, void *vaddr,
+		dma_addr_t dma_handle, unsigned long attrs)
+{
+	free_pages((unsigned long)vaddr, get_order(size));
+}
+
+#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
+
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t handle,
+		size_t size, enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+	case DMA_TO_DEVICE:
+		cache_push(handle, size);
+		break;
+	case DMA_FROM_DEVICE:
+		cache_clear(handle, size);
+		break;
+	default:
+		pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
+				   dir);
+		break;
+	}
+}
+
+void arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+	if (pdev->dev.coherent_dma_mask == DMA_MASK_NONE &&
+	    pdev->dev.dma_mask == NULL) {
+		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	}
+}
diff --git a/arch/m68k/kernel/early_printk.c b/arch/m68k/kernel/early_printk.c
new file mode 100644
index 0000000..7d3fe08
--- /dev/null
+++ b/arch/m68k/kernel/early_printk.c
@@ -0,0 +1,67 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Finn Thain
+ */
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/setup.h>
+
+extern void mvme16x_cons_write(struct console *co,
+			       const char *str, unsigned count);
+
+asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
+
+static void __ref debug_cons_write(struct console *c,
+				   const char *s, unsigned n)
+{
+#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
+      defined(CONFIG_COLDFIRE))
+	if (MACH_IS_MVME16x)
+		mvme16x_cons_write(c, s, n);
+	else
+		debug_cons_nputs(s, n);
+#endif
+}
+
+static struct console early_console_instance = {
+	.name  = "debug",
+	.write = debug_cons_write,
+	.flags = CON_PRINTBUFFER | CON_BOOT,
+	.index = -1
+};
+
+static int __init setup_early_printk(char *buf)
+{
+	if (early_console || buf)
+		return 0;
+
+	early_console = &early_console_instance;
+	register_console(early_console);
+
+	return 0;
+}
+early_param("earlyprintk", setup_early_printk);
+
+/*
+ * debug_cons_nputs() defined in arch/m68k/kernel/head.S cannot be called
+ * after init sections are discarded (for platforms that use it).
+ */
+#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
+      defined(CONFIG_COLDFIRE))
+
+static int __init unregister_early_console(void)
+{
+	if (!early_console || MACH_IS_MVME16x)
+		return 0;
+
+	return unregister_console(early_console);
+}
+late_initcall(unregister_early_console);
+
+#endif
diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S
new file mode 100644
index 0000000..97cd3ea
--- /dev/null
+++ b/arch/m68k/kernel/entry.S
@@ -0,0 +1,429 @@
+/* -*- mode: asm -*-
+ *
+ *  linux/arch/m68k/kernel/entry.S
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file README.legal in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ */
+
+/*
+ * entry.S  contains the system-call and fault low-level handling routines.
+ * This also contains the timer-interrupt handler, as well as all interrupts
+ * and faults that can result in a task-switch.
+ *
+ * NOTE: This code handles signal-recognition, which happens every time
+ * after a timer-interrupt and after each system call.
+ *
+ */
+
+/*
+ * 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
+ *               all pointers that used to be 'current' are now entry
+ *               number 0 in the 'current_set' list.
+ *
+ *  6/05/00 RZ:	 addedd writeback completion after return from sighandler
+ *		 for 68040
+ */
+
+#include <linux/linkage.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/segment.h>
+#include <asm/traps.h>
+#include <asm/unistd.h>
+#include <asm/asm-offsets.h>
+#include <asm/entry.h>
+
+.globl system_call, buserr, trap, resume
+.globl sys_call_table
+.globl __sys_fork, __sys_clone, __sys_vfork
+.globl bad_interrupt
+.globl auto_irqhandler_fixup
+.globl user_irqvec_fixup
+
+.text
+ENTRY(__sys_fork)
+	SAVE_SWITCH_STACK
+	jbsr	sys_fork
+	lea     %sp@(24),%sp
+	rts
+
+ENTRY(__sys_clone)
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	jbsr	m68k_clone
+	lea     %sp@(28),%sp
+	rts
+
+ENTRY(__sys_vfork)
+	SAVE_SWITCH_STACK
+	jbsr	sys_vfork
+	lea     %sp@(24),%sp
+	rts
+
+ENTRY(sys_sigreturn)
+	SAVE_SWITCH_STACK
+	movel	%sp,%sp@-		  | switch_stack pointer
+	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+	jbsr	do_sigreturn
+	addql	#8,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(sys_rt_sigreturn)
+	SAVE_SWITCH_STACK
+	movel	%sp,%sp@-		  | switch_stack pointer
+	pea	%sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer
+	jbsr	do_rt_sigreturn
+	addql	#8,%sp
+	RESTORE_SWITCH_STACK
+	rts
+
+ENTRY(buserr)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	jbsr	buserr_c
+	addql	#4,%sp
+	jra	ret_from_exception
+
+ENTRY(trap)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@-		| stack frame pointer argument
+	jbsr	trap_c
+	addql	#4,%sp
+	jra	ret_from_exception
+
+	| After a fork we jump here directly from resume,
+	| so that %d1 contains the previous task
+	| schedule_tail now used regardless of CONFIG_SMP
+ENTRY(ret_from_fork)
+	movel	%d1,%sp@-
+	jsr	schedule_tail
+	addql	#4,%sp
+	jra	ret_from_exception
+
+ENTRY(ret_from_kernel_thread)
+	| a3 contains the kernel thread payload, d7 - its argument
+	movel	%d1,%sp@-
+	jsr	schedule_tail
+	movel	%d7,(%sp)
+	jsr	%a3@
+	addql	#4,%sp
+	jra	ret_from_exception
+
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
+
+#ifdef TRAP_DBG_INTERRUPT
+
+.globl dbginterrupt
+ENTRY(dbginterrupt)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+	movel	%sp,%sp@- 		/* stack frame pointer argument */
+	jsr	dbginterrupt_c
+	addql	#4,%sp
+	jra	ret_from_exception
+#endif
+
+ENTRY(reschedule)
+	/* save top of frame */
+	pea	%sp@
+	jbsr	set_esp0
+	addql	#4,%sp
+	pea	ret_from_exception
+	jmp	schedule
+
+ENTRY(ret_from_user_signal)
+	moveq #__NR_sigreturn,%d0
+	trap #0
+
+ENTRY(ret_from_user_rt_signal)
+	movel #__NR_rt_sigreturn,%d0
+	trap #0
+
+#else
+
+do_trace_entry:
+	movel	#-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
+	subql	#4,%sp
+	SAVE_SWITCH_STACK
+	jbsr	syscall_trace
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+	movel	%sp@(PT_OFF_ORIG_D0),%d0
+	cmpl	#NR_syscalls,%d0
+	jcs	syscall
+badsys:
+	movel	#-ENOSYS,%sp@(PT_OFF_D0)
+	jra	ret_from_syscall
+
+do_trace_exit:
+	subql	#4,%sp
+	SAVE_SWITCH_STACK
+	jbsr	syscall_trace
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+	jra	.Lret_from_exception
+
+ENTRY(ret_from_signal)
+	movel	%curptr@(TASK_STACK),%a1
+	tstb	%a1@(TINFO_FLAGS+2)
+	jge	1f
+	jbsr	syscall_trace
+1:	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+/* on 68040 complete pending writebacks if any */
+#ifdef CONFIG_M68040
+	bfextu	%sp@(PT_OFF_FORMATVEC){#0,#4},%d0
+	subql	#7,%d0				| bus error frame ?
+	jbne	1f
+	movel	%sp,%sp@-
+	jbsr	berr_040cleanup
+	addql	#4,%sp
+1:
+#endif
+	jra	.Lret_from_exception
+
+ENTRY(system_call)
+	SAVE_ALL_SYS
+
+	GET_CURRENT(%d1)
+	movel	%d1,%a1
+
+	| save top of frame
+	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+
+	| syscall trace?
+	tstb	%a1@(TINFO_FLAGS+2)
+	jmi	do_trace_entry
+	cmpl	#NR_syscalls,%d0
+	jcc	badsys
+syscall:
+	jbsr	@(sys_call_table,%d0:l:4)@(0)
+	movel	%d0,%sp@(PT_OFF_D0)	| save the return value
+ret_from_syscall:
+	|oriw	#0x0700,%sr
+	movel	%curptr@(TASK_STACK),%a1
+	movew	%a1@(TINFO_FLAGS+2),%d0
+	jne	syscall_exit_work
+1:	RESTORE_ALL
+
+syscall_exit_work:
+	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
+	bnes	1b			| if so, skip resched, signals
+	lslw	#1,%d0
+	jcs	do_trace_exit
+	jmi	do_delayed_trace
+	lslw	#8,%d0
+	jne	do_signal_return
+	pea	resume_userspace
+	jra	schedule
+
+
+ENTRY(ret_from_exception)
+.Lret_from_exception:
+	btst	#5,%sp@(PT_OFF_SR)	| check if returning to kernel
+	bnes	1f			| if so, skip resched, signals
+	| only allow interrupts when we are really the last one on the
+	| kernel stack, otherwise stack overflow can occur during
+	| heavy interrupt load
+	andw	#ALLOWINT,%sr
+
+resume_userspace:
+	movel	%curptr@(TASK_STACK),%a1
+	moveb	%a1@(TINFO_FLAGS+3),%d0
+	jne	exit_work
+1:	RESTORE_ALL
+
+exit_work:
+	| save top of frame
+	movel	%sp,%curptr@(TASK_THREAD+THREAD_ESP0)
+	lslb	#1,%d0
+	jne	do_signal_return
+	pea	resume_userspace
+	jra	schedule
+
+
+do_signal_return:
+	|andw	#ALLOWINT,%sr
+	subql	#4,%sp			| dummy return address
+	SAVE_SWITCH_STACK
+	pea	%sp@(SWITCH_STACK_SIZE)
+	bsrl	do_notify_resume
+	addql	#4,%sp
+	RESTORE_SWITCH_STACK
+	addql	#4,%sp
+	jbra	resume_userspace
+
+do_delayed_trace:
+	bclr	#7,%sp@(PT_OFF_SR)	| clear trace bit in SR
+	pea	1			| send SIGTRAP
+	movel	%curptr,%sp@-
+	pea	LSIGTRAP
+	jbsr	send_sig
+	addql	#8,%sp
+	addql	#4,%sp
+	jbra	resume_userspace
+
+
+/* This is the main interrupt handler for autovector interrupts */
+
+ENTRY(auto_inthandler)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+					|  put exception # in d0
+	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
+	subw	#VEC_SPUR,%d0
+
+	movel	%sp,%sp@-
+	movel	%d0,%sp@-		|  put vector # on stack
+auto_irqhandler_fixup = . + 2
+	jsr	do_IRQ			|  process the IRQ
+	addql	#8,%sp			|  pop parameters off stack
+	jra	ret_from_exception
+
+/* Handler for user defined interrupt vectors */
+
+ENTRY(user_inthandler)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+					|  put exception # in d0
+	bfextu	%sp@(PT_OFF_FORMATVEC){#4,#10},%d0
+user_irqvec_fixup = . + 2
+	subw	#VEC_USER,%d0
+
+	movel	%sp,%sp@-
+	movel	%d0,%sp@-		|  put vector # on stack
+	jsr	do_IRQ			|  process the IRQ
+	addql	#8,%sp			|  pop parameters off stack
+	jra	ret_from_exception
+
+/* Handler for uninitialized and spurious interrupts */
+
+ENTRY(bad_inthandler)
+	SAVE_ALL_INT
+	GET_CURRENT(%d0)
+
+	movel	%sp,%sp@-
+	jsr	handle_badint
+	addql	#4,%sp
+	jra	ret_from_exception
+
+resume:
+	/*
+	 * Beware - when entering resume, prev (the current task) is
+	 * in a0, next (the new task) is in a1,so don't change these
+	 * registers until their contents are no longer needed.
+	 */
+
+	/* save sr */
+	movew	%sr,%a0@(TASK_THREAD+THREAD_SR)
+
+	/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
+	movec	%sfc,%d0
+	movew	%d0,%a0@(TASK_THREAD+THREAD_FS)
+
+	/* save usp */
+	/* it is better to use a movel here instead of a movew 8*) */
+	movec	%usp,%d0
+	movel	%d0,%a0@(TASK_THREAD+THREAD_USP)
+
+	/* save non-scratch registers on stack */
+	SAVE_SWITCH_STACK
+
+	/* save current kernel stack pointer */
+	movel	%sp,%a0@(TASK_THREAD+THREAD_KSP)
+
+	/* save floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+	tstl	m68k_fputype
+	jeq	3f
+#endif
+	fsave	%a0@(TASK_THREAD+THREAD_FPSTATE)
+
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+	btst	#3,m68k_cputype+3
+	beqs	1f
+#endif
+	/* The 060 FPU keeps status in bits 15-8 of the first longword */
+	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE+2)
+	jeq	3f
+#if !defined(CPU_M68060_ONLY)
+	jra	2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1:	tstb	%a0@(TASK_THREAD+THREAD_FPSTATE)
+	jeq	3f
+#endif
+2:	fmovemx	%fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
+	fmoveml	%fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
+3:
+#endif	/* CONFIG_M68KFPU_EMU_ONLY */
+	/* Return previous task in %d1 */
+	movel	%curptr,%d1
+
+	/* switch to new task (a1 contains new task) */
+	movel	%a1,%curptr
+
+	/* restore floating point context */
+#ifndef CONFIG_M68KFPU_EMU_ONLY
+#ifdef CONFIG_M68KFPU_EMU
+	tstl	m68k_fputype
+	jeq	4f
+#endif
+#if defined(CONFIG_M68060)
+#if !defined(CPU_M68060_ONLY)
+	btst	#3,m68k_cputype+3
+	beqs	1f
+#endif
+	/* The 060 FPU keeps status in bits 15-8 of the first longword */
+	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE+2)
+	jeq	3f
+#if !defined(CPU_M68060_ONLY)
+	jra	2f
+#endif
+#endif /* CONFIG_M68060 */
+#if !defined(CPU_M68060_ONLY)
+1:	tstb	%a1@(TASK_THREAD+THREAD_FPSTATE)
+	jeq	3f
+#endif
+2:	fmovemx	%a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
+	fmoveml	%a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
+3:	frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
+4:
+#endif	/* CONFIG_M68KFPU_EMU_ONLY */
+
+	/* restore the kernel stack pointer */
+	movel	%a1@(TASK_THREAD+THREAD_KSP),%sp
+
+	/* restore non-scratch registers */
+	RESTORE_SWITCH_STACK
+
+	/* restore user stack pointer */
+	movel	%a1@(TASK_THREAD+THREAD_USP),%a0
+	movel	%a0,%usp
+
+	/* restore fs (sfc,%dfc) */
+	movew	%a1@(TASK_THREAD+THREAD_FS),%a0
+	movec	%a0,%sfc
+	movec	%a0,%dfc
+
+	/* restore status register */
+	movew	%a1@(TASK_THREAD+THREAD_SR),%sr
+
+	rts
+
+#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
new file mode 100644
index 0000000..a547884
--- /dev/null
+++ b/arch/m68k/kernel/head.S
@@ -0,0 +1,3867 @@
+/* -*- mode: asm -*-
+**
+** head.S -- This file contains the initial boot code for the
+**	     Linux/68k kernel.
+**
+** Copyright 1993 by Hamish Macdonald
+**
+** 68040 fixes by Michael Rausch
+** 68060 fixes by Roman Hodek
+** MMU cleanup by Randy Thelen
+** Final MMU cleanup by Roman Zippel
+**
+** Atari support by Andreas Schwab, using ideas of Robert de Vries
+** and Bjoern Brauel
+** VME Support by Richard Hirst
+**
+** 94/11/14 Andreas Schwab: put kernel at PAGESIZE
+** 94/11/18 Andreas Schwab: remove identity mapping of STRAM for Atari
+** ++ Bjoern & Roman: ATARI-68040 support for the Medusa
+** 95/11/18 Richard Hirst: Added MVME166 support
+** 96/04/26 Guenther Kelleter: fixed identity mapping for Falcon with
+**			      Magnum- and FX-alternate ram
+** 98/04/25 Phil Blundell: added HP300 support
+** 1998/08/30 David Kilzer: Added support for font_desc structures
+**            for linux-2.1.115
+** 1999/02/11  Richard Zidlicky: added Q40 support (initial version 99/01/01)
+** 2004/05/13 Kars de Jong: Finalised HP300 support
+**
+** This file is subject to the terms and conditions of the GNU General Public
+** License. See the file README.legal in the main directory of this archive
+** for more details.
+**
+*/
+
+/*
+ * Linux startup code.
+ *
+ * At this point, the boot loader has:
+ * Disabled interrupts
+ * Disabled caches
+ * Put us in supervisor state.
+ *
+ * The kernel setup code takes the following steps:
+ * .  Raise interrupt level
+ * .  Set up initial kernel memory mapping.
+ *    .  This sets up a mapping of the 4M of memory the kernel is located in.
+ *    .  It also does a mapping of any initial machine specific areas.
+ * .  Enable the MMU
+ * .  Enable cache memories
+ * .  Jump to kernel startup
+ *
+ * Much of the file restructuring was to accomplish:
+ * 1) Remove register dependency through-out the file.
+ * 2) Increase use of subroutines to perform functions
+ * 3) Increase readability of the code
+ *
+ * Of course, readability is a subjective issue, so it will never be
+ * argued that that goal was accomplished.  It was merely a goal.
+ * A key way to help make code more readable is to give good
+ * documentation.  So, the first thing you will find is exaustive
+ * write-ups on the structure of the file, and the features of the
+ * functional subroutines.
+ *
+ * General Structure:
+ * ------------------
+ *	Without a doubt the single largest chunk of head.S is spent
+ * mapping the kernel and I/O physical space into the logical range
+ * for the kernel.
+ *	There are new subroutines and data structures to make MMU
+ * support cleaner and easier to understand.
+ *	First, you will find a routine call "mmu_map" which maps
+ * a logical to a physical region for some length given a cache
+ * type on behalf of the caller.  This routine makes writing the
+ * actual per-machine specific code very simple.
+ *	A central part of the code, but not a subroutine in itself,
+ * is the mmu_init code which is broken down into mapping the kernel
+ * (the same for all machines) and mapping machine-specific I/O
+ * regions.
+ *	Also, there will be a description of engaging the MMU and
+ * caches.
+ *	You will notice that there is a chunk of code which
+ * can emit the entire MMU mapping of the machine.  This is present
+ * only in debug modes and can be very helpful.
+ *	Further, there is a new console driver in head.S that is
+ * also only engaged in debug mode.  Currently, it's only supported
+ * on the Macintosh class of machines.  However, it is hoped that
+ * others will plug-in support for specific machines.
+ *
+ * ######################################################################
+ *
+ * mmu_map
+ * -------
+ *	mmu_map was written for two key reasons.  First, it was clear
+ * that it was very difficult to read the previous code for mapping
+ * regions of memory.  Second, the Macintosh required such extensive
+ * memory allocations that it didn't make sense to propagate the
+ * existing code any further.
+ *	mmu_map requires some parameters:
+ *
+ *	mmu_map (logical, physical, length, cache_type)
+ *
+ *	While this essentially describes the function in the abstract, you'll
+ * find more indepth description of other parameters at the implementation site.
+ *
+ * mmu_get_root_table_entry
+ * ------------------------
+ * mmu_get_ptr_table_entry
+ * -----------------------
+ * mmu_get_page_table_entry
+ * ------------------------
+ *
+ *	These routines are used by other mmu routines to get a pointer into
+ * a table, if necessary a new table is allocated. These routines are working
+ * basically like pmd_alloc() and pte_alloc() in <asm/pgtable.h>. The root
+ * table needs of course only to be allocated once in mmu_get_root_table_entry,
+ * so that here also some mmu specific initialization is done. The second page
+ * at the start of the kernel (the first page is unmapped later) is used for
+ * the kernel_pg_dir. It must be at a position known at link time (as it's used
+ * to initialize the init task struct) and since it needs special cache
+ * settings, it's the easiest to use this page, the rest of the page is used
+ * for further pointer tables.
+ * mmu_get_page_table_entry allocates always a whole page for page tables, this
+ * means 1024 pages and so 4MB of memory can be mapped. It doesn't make sense
+ * to manage page tables in smaller pieces as nearly all mappings have that
+ * size.
+ *
+ * ######################################################################
+ *
+ *
+ * ######################################################################
+ *
+ * mmu_engage
+ * ----------
+ *	Thanks to a small helping routine enabling the mmu got quite simple
+ * and there is only one way left. mmu_engage makes a complete a new mapping
+ * that only includes the absolute necessary to be able to jump to the final
+ * position and to restore the original mapping.
+ * As this code doesn't need a transparent translation register anymore this
+ * means all registers are free to be used by machines that needs them for
+ * other purposes.
+ *
+ * ######################################################################
+ *
+ * mmu_print
+ * ---------
+ *	This algorithm will print out the page tables of the system as
+ * appropriate for an 030 or an 040.  This is useful for debugging purposes
+ * and as such is enclosed in #ifdef MMU_PRINT/#endif clauses.
+ *
+ * ######################################################################
+ *
+ * console_init
+ * ------------
+ *	The console is also able to be turned off.  The console in head.S
+ * is specifically for debugging and can be very useful.  It is surrounded by
+ * #ifdef / #endif clauses so it doesn't have to ship in known-good
+ * kernels.  It's basic algorithm is to determine the size of the screen
+ * (in height/width and bit depth) and then use that information for
+ * displaying an 8x8 font or an 8x16 (widthxheight).  I prefer the 8x8 for
+ * debugging so I can see more good data.  But it was trivial to add support
+ * for both fonts, so I included it.
+ *	Also, the algorithm for plotting pixels is abstracted so that in
+ * theory other platforms could add support for different kinds of frame
+ * buffers.  This could be very useful.
+ *
+ * console_put_penguin
+ * -------------------
+ *	An important part of any Linux bring up is the penguin and there's
+ * nothing like getting the Penguin on the screen!  This algorithm will work
+ * on any machine for which there is a console_plot_pixel.
+ *
+ * console_scroll
+ * --------------
+ *	My hope is that the scroll algorithm does the right thing on the
+ * various platforms, but it wouldn't be hard to add the test conditions
+ * and new code if it doesn't.
+ *
+ * console_putc
+ * -------------
+ *
+ * ######################################################################
+ *
+ *	Register usage has greatly simplified within head.S. Every subroutine
+ * saves and restores all registers that it modifies (except it returns a
+ * value in there of course). So the only register that needs to be initialized
+ * is the stack pointer.
+ * All other init code and data is now placed in the init section, so it will
+ * be automatically freed at the end of the kernel initialization.
+ *
+ * ######################################################################
+ *
+ * options
+ * -------
+ *	There are many options available in a build of this file.  I've
+ * taken the time to describe them here to save you the time of searching
+ * for them and trying to understand what they mean.
+ *
+ * CONFIG_xxx:	These are the obvious machine configuration defines created
+ * during configuration.  These are defined in autoconf.h.
+ *
+ * CONSOLE_DEBUG:  Only supports a Mac frame buffer but could easily be
+ * extended to support other platforms.
+ *
+ * TEST_MMU:	This is a test harness for running on any given machine but
+ * getting an MMU dump for another class of machine.  The classes of machines
+ * that can be tested are any of the makes (Atari, Amiga, Mac, VME, etc.)
+ * and any of the models (030, 040, 060, etc.).
+ *
+ *	NOTE:	TEST_MMU is NOT permanent!  It is scheduled to be removed
+ *		When head.S boots on Atari, Amiga, Macintosh, and VME
+ *		machines.  At that point the underlying logic will be
+ *		believed to be solid enough to be trusted, and TEST_MMU
+ *		can be dropped.  Do note that that will clean up the
+ *		head.S code significantly as large blocks of #if/#else
+ *		clauses can be removed.
+ *
+ * MMU_NOCACHE_KERNEL:	On the Macintosh platform there was an inquiry into
+ * determing why devices don't appear to work.  A test case was to remove
+ * the cacheability of the kernel bits.
+ *
+ * MMU_PRINT:	There is a routine built into head.S that can display the
+ * MMU data structures.  It outputs its result through the serial_putc
+ * interface.  So where ever that winds up driving data, that's where the
+ * mmu struct will appear.
+ *
+ * SERIAL_DEBUG:	There are a series of putc() macro statements
+ * scattered through out the code to give progress of status to the
+ * person sitting at the console.  This constant determines whether those
+ * are used.
+ *
+ * DEBUG:	This is the standard DEBUG flag that can be set for building
+ *		the kernel.  It has the effect adding additional tests into
+ *		the code.
+ *
+ * FONT_6x11:
+ * FONT_8x8:
+ * FONT_8x16:
+ *		In theory these could be determined at run time or handed
+ *		over by the booter.  But, let's be real, it's a fine hard
+ *		coded value.  (But, you will notice the code is run-time
+ *		flexible!)  A pointer to the font's struct font_desc
+ *		is kept locally in Lconsole_font.  It is used to determine
+ *		font size information dynamically.
+ *
+ * Atari constants:
+ * USE_PRINTER:	Use the printer port for serial debug.
+ * USE_SCC_B:	Use the SCC port A (Serial2) for serial debug.
+ * USE_SCC_A:	Use the SCC port B (Modem2) for serial debug.
+ * USE_MFP:	Use the ST-MFP port (Modem1) for serial debug.
+ *
+ * Macintosh constants:
+ * MAC_USE_SCC_A: Use SCC port A (modem) for serial debug.
+ * MAC_USE_SCC_B: Use SCC port B (printer) for serial debug.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/bootinfo.h>
+#include <asm/bootinfo-amiga.h>
+#include <asm/bootinfo-atari.h>
+#include <asm/bootinfo-hp300.h>
+#include <asm/bootinfo-mac.h>
+#include <asm/bootinfo-q40.h>
+#include <asm/bootinfo-vme.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#ifdef CONFIG_MAC
+#  include <asm/machw.h>
+#endif
+
+#ifdef CONFIG_EARLY_PRINTK
+#  define SERIAL_DEBUG
+#  if defined(CONFIG_MAC) && defined(CONFIG_FONT_SUPPORT)
+#    define CONSOLE_DEBUG
+#  endif
+#endif
+
+#undef MMU_PRINT
+#undef MMU_NOCACHE_KERNEL
+#undef DEBUG
+
+/*
+ * For the head.S console, there are three supported fonts, 6x11, 8x16 and 8x8.
+ * The 8x8 font is harder to read but fits more on the screen.
+ */
+#define FONT_8x8	/* default */
+/* #define FONT_8x16 */	/* 2nd choice */
+/* #define FONT_6x11 */	/* 3rd choice */
+
+.globl kernel_pg_dir
+.globl availmem
+.globl m68k_init_mapped_size
+.globl m68k_pgtable_cachemode
+.globl m68k_supervisor_cachemode
+#ifdef CONFIG_MVME16x
+.globl mvme_bdid
+#endif
+#ifdef CONFIG_Q40
+.globl q40_mem_cptr
+#endif
+
+CPUTYPE_040	= 1	/* indicates an 040 */
+CPUTYPE_060	= 2	/* indicates an 060 */
+CPUTYPE_0460	= 3	/* if either above are set, this is set */
+CPUTYPE_020	= 4	/* indicates an 020 */
+
+/* Translation control register */
+TC_ENABLE = 0x8000
+TC_PAGE8K = 0x4000
+TC_PAGE4K = 0x0000
+
+/* Transparent translation registers */
+TTR_ENABLE	= 0x8000	/* enable transparent translation */
+TTR_ANYMODE	= 0x4000	/* user and kernel mode access */
+TTR_KERNELMODE	= 0x2000	/* only kernel mode access */
+TTR_USERMODE	= 0x0000	/* only user mode access */
+TTR_CI		= 0x0400	/* inhibit cache */
+TTR_RW		= 0x0200	/* read/write mode */
+TTR_RWM		= 0x0100	/* read/write mask */
+TTR_FCB2	= 0x0040	/* function code base bit 2 */
+TTR_FCB1	= 0x0020	/* function code base bit 1 */
+TTR_FCB0	= 0x0010	/* function code base bit 0 */
+TTR_FCM2	= 0x0004	/* function code mask bit 2 */
+TTR_FCM1	= 0x0002	/* function code mask bit 1 */
+TTR_FCM0	= 0x0001	/* function code mask bit 0 */
+
+/* Cache Control registers */
+CC6_ENABLE_D	= 0x80000000	/* enable data cache (680[46]0) */
+CC6_FREEZE_D	= 0x40000000	/* freeze data cache (68060) */
+CC6_ENABLE_SB	= 0x20000000	/* enable store buffer (68060) */
+CC6_PUSH_DPI	= 0x10000000	/* disable CPUSH invalidation (68060) */
+CC6_HALF_D	= 0x08000000	/* half-cache mode for data cache (68060) */
+CC6_ENABLE_B	= 0x00800000	/* enable branch cache (68060) */
+CC6_CLRA_B	= 0x00400000	/* clear all entries in branch cache (68060) */
+CC6_CLRU_B	= 0x00200000	/* clear user entries in branch cache (68060) */
+CC6_ENABLE_I	= 0x00008000	/* enable instruction cache (680[46]0) */
+CC6_FREEZE_I	= 0x00004000	/* freeze instruction cache (68060) */
+CC6_HALF_I	= 0x00002000	/* half-cache mode for instruction cache (68060) */
+CC3_ALLOC_WRITE	= 0x00002000	/* write allocate mode(68030) */
+CC3_ENABLE_DB	= 0x00001000	/* enable data burst (68030) */
+CC3_CLR_D	= 0x00000800	/* clear data cache (68030) */
+CC3_CLRE_D	= 0x00000400	/* clear entry in data cache (68030) */
+CC3_FREEZE_D	= 0x00000200	/* freeze data cache (68030) */
+CC3_ENABLE_D	= 0x00000100	/* enable data cache (68030) */
+CC3_ENABLE_IB	= 0x00000010	/* enable instruction burst (68030) */
+CC3_CLR_I	= 0x00000008	/* clear instruction cache (68030) */
+CC3_CLRE_I	= 0x00000004	/* clear entry in instruction cache (68030) */
+CC3_FREEZE_I	= 0x00000002	/* freeze instruction cache (68030) */
+CC3_ENABLE_I	= 0x00000001	/* enable instruction cache (68030) */
+
+/* Miscellaneous definitions */
+PAGESIZE	= 4096
+PAGESHIFT	= 12
+
+ROOT_TABLE_SIZE	= 128
+PTR_TABLE_SIZE	= 128
+PAGE_TABLE_SIZE	= 64
+ROOT_INDEX_SHIFT = 25
+PTR_INDEX_SHIFT  = 18
+PAGE_INDEX_SHIFT = 12
+
+#ifdef DEBUG
+/* When debugging use readable names for labels */
+#ifdef __STDC__
+#define L(name) .head.S.##name
+#else
+#define L(name) .head.S./**/name
+#endif
+#else
+#ifdef __STDC__
+#define L(name) .L##name
+#else
+#define L(name) .L/**/name
+#endif
+#endif
+
+/* The __INITDATA stuff is a no-op when ftrace or kgdb are turned on */
+#ifndef __INITDATA
+#define __INITDATA	.data
+#define __FINIT		.previous
+#endif
+
+/* Several macros to make the writing of subroutines easier:
+ * - func_start marks the beginning of the routine which setups the frame
+ *   register and saves the registers, it also defines another macro
+ *   to automatically restore the registers again.
+ * - func_return marks the end of the routine and simply calls the prepared
+ *   macro to restore registers and jump back to the caller.
+ * - func_define generates another macro to automatically put arguments
+ *   onto the stack call the subroutine and cleanup the stack again.
+ */
+
+/* Within subroutines these macros can be used to access the arguments
+ * on the stack. With STACK some allocated memory on the stack can be
+ * accessed and ARG0 points to the return address (used by mmu_engage).
+ */
+#define	STACK	%a6@(stackstart)
+#define ARG0	%a6@(4)
+#define ARG1	%a6@(8)
+#define ARG2	%a6@(12)
+#define ARG3	%a6@(16)
+#define ARG4	%a6@(20)
+
+.macro	func_start	name,saveregs,stack=0
+L(\name):
+	linkw	%a6,#-\stack
+	moveml	\saveregs,%sp@-
+.set	stackstart,-\stack
+
+.macro	func_return_\name
+	moveml	%sp@+,\saveregs
+	unlk	%a6
+	rts
+.endm
+.endm
+
+.macro	func_return	name
+	func_return_\name
+.endm
+
+.macro	func_call	name
+	jbsr	L(\name)
+.endm
+
+.macro	move_stack	nr,arg1,arg2,arg3,arg4
+.if	\nr
+	move_stack	"(\nr-1)",\arg2,\arg3,\arg4
+	movel	\arg1,%sp@-
+.endif
+.endm
+
+.macro	func_define	name,nr=0
+.macro	\name	arg1,arg2,arg3,arg4
+	move_stack	\nr,\arg1,\arg2,\arg3,\arg4
+	func_call	\name
+.if	\nr
+	lea	%sp@(\nr*4),%sp
+.endif
+.endm
+.endm
+
+func_define	mmu_map,4
+func_define	mmu_map_tt,4
+func_define	mmu_fixup_page_mmu_cache,1
+func_define	mmu_temp_map,2
+func_define	mmu_engage
+func_define	mmu_get_root_table_entry,1
+func_define	mmu_get_ptr_table_entry,2
+func_define	mmu_get_page_table_entry,2
+func_define	mmu_print
+func_define	get_new_page
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_define	set_leds
+#endif
+
+.macro	mmu_map_eq	arg1,arg2,arg3
+	mmu_map	\arg1,\arg1,\arg2,\arg3
+.endm
+
+.macro	get_bi_record	record
+	pea	\record
+	func_call	get_bi_record
+	addql	#4,%sp
+.endm
+
+func_define	serial_putc,1
+func_define	console_putc,1
+
+func_define	console_init
+func_define	console_put_penguin
+func_define	console_plot_pixel,3
+func_define	console_scroll
+
+.macro	putc	ch
+#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
+	pea	\ch
+#endif
+#ifdef CONSOLE_DEBUG
+	func_call	console_putc
+#endif
+#ifdef SERIAL_DEBUG
+	func_call	serial_putc
+#endif
+#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
+	addql	#4,%sp
+#endif
+.endm
+
+.macro	dputc	ch
+#ifdef DEBUG
+	putc	\ch
+#endif
+.endm
+
+func_define	putn,1
+
+.macro	dputn	nr
+#ifdef DEBUG
+	putn	\nr
+#endif
+.endm
+
+.macro	puts		string
+#if defined(CONSOLE_DEBUG) || defined(SERIAL_DEBUG)
+	__INITDATA
+.Lstr\@:
+	.string	"\string"
+	__FINIT
+	pea	%pc@(.Lstr\@)
+	func_call	puts
+	addql	#4,%sp
+#endif
+.endm
+
+.macro	dputs	string
+#ifdef DEBUG
+	puts	"\string"
+#endif
+.endm
+
+#define is_not_amiga(lab) cmpl &MACH_AMIGA,%pc@(m68k_machtype); jne lab
+#define is_not_atari(lab) cmpl &MACH_ATARI,%pc@(m68k_machtype); jne lab
+#define is_not_mac(lab) cmpl &MACH_MAC,%pc@(m68k_machtype); jne lab
+#define is_not_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jne lab
+#define is_not_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jne lab
+#define is_not_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jne lab
+#define is_mvme147(lab) cmpl &MACH_MVME147,%pc@(m68k_machtype); jeq lab
+#define is_mvme16x(lab) cmpl &MACH_MVME16x,%pc@(m68k_machtype); jeq lab
+#define is_bvme6000(lab) cmpl &MACH_BVME6000,%pc@(m68k_machtype); jeq lab
+#define is_not_hp300(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); jne lab
+#define is_not_apollo(lab) cmpl &MACH_APOLLO,%pc@(m68k_machtype); jne lab
+#define is_not_q40(lab) cmpl &MACH_Q40,%pc@(m68k_machtype); jne lab
+#define is_not_sun3x(lab) cmpl &MACH_SUN3X,%pc@(m68k_machtype); jne lab
+
+#define hasnt_leds(lab) cmpl &MACH_HP300,%pc@(m68k_machtype); \
+			jeq 42f; \
+			cmpl &MACH_APOLLO,%pc@(m68k_machtype); \
+			jne lab ;\
+		42:\
+
+#define is_040_or_060(lab)	btst &CPUTYPE_0460,%pc@(L(cputype)+3); jne lab
+#define is_not_040_or_060(lab)	btst &CPUTYPE_0460,%pc@(L(cputype)+3); jeq lab
+#define is_040(lab)		btst &CPUTYPE_040,%pc@(L(cputype)+3); jne lab
+#define is_060(lab)		btst &CPUTYPE_060,%pc@(L(cputype)+3); jne lab
+#define is_not_060(lab)		btst &CPUTYPE_060,%pc@(L(cputype)+3); jeq lab
+#define is_020(lab)		btst &CPUTYPE_020,%pc@(L(cputype)+3); jne lab
+#define is_not_020(lab)		btst &CPUTYPE_020,%pc@(L(cputype)+3); jeq lab
+
+/* On the HP300 we use the on-board LEDs for debug output before
+   the console is running.  Writing a 1 bit turns the corresponding LED
+   _off_ - on the 340 bit 7 is towards the back panel of the machine.  */
+.macro	leds	mask
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+	hasnt_leds(.Lled\@)
+	pea	\mask
+	func_call	set_leds
+	addql	#4,%sp
+.Lled\@:
+#endif
+.endm
+
+__HEAD
+ENTRY(_stext)
+/*
+ * Version numbers of the bootinfo interface
+ * The area from _stext to _start will later be used as kernel pointer table
+ */
+	bras	1f	/* Jump over bootinfo version numbers */
+
+	.long	BOOTINFOV_MAGIC
+	.long	MACH_AMIGA, AMIGA_BOOTI_VERSION
+	.long	MACH_ATARI, ATARI_BOOTI_VERSION
+	.long	MACH_MVME147, MVME147_BOOTI_VERSION
+	.long	MACH_MVME16x, MVME16x_BOOTI_VERSION
+	.long	MACH_BVME6000, BVME6000_BOOTI_VERSION
+	.long	MACH_MAC, MAC_BOOTI_VERSION
+	.long	MACH_Q40, Q40_BOOTI_VERSION
+	.long	MACH_HP300, HP300_BOOTI_VERSION
+	.long	0
+1:	jra	__start
+
+.equ	kernel_pg_dir,_stext
+
+.equ	.,_stext+PAGESIZE
+
+ENTRY(_start)
+	jra	__start
+__INIT
+ENTRY(__start)
+/*
+ * Setup initial stack pointer
+ */
+	lea	%pc@(_stext),%sp
+
+/*
+ * Record the CPU and machine type.
+ */
+	get_bi_record	BI_MACHTYPE
+	lea	%pc@(m68k_machtype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_FPUTYPE
+	lea	%pc@(m68k_fputype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MMUTYPE
+	lea	%pc@(m68k_mmutype),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_CPUTYPE
+	lea	%pc@(m68k_cputype),%a1
+	movel	%a0@,%a1@
+
+	leds	0x1
+
+#ifdef CONFIG_MAC
+/*
+ * For Macintosh, we need to determine the display parameters early (at least
+ * while debugging it).
+ */
+
+	is_not_mac(L(test_notmac))
+
+	get_bi_record	BI_MAC_VADDR
+	lea	%pc@(L(mac_videobase)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VDEPTH
+	lea	%pc@(L(mac_videodepth)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VDIM
+	lea	%pc@(L(mac_dimensions)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_VROW
+	lea	%pc@(L(mac_rowbytes)),%a1
+	movel	%a0@,%a1@
+
+	get_bi_record	BI_MAC_SCCBASE
+	lea	%pc@(L(mac_sccbase)),%a1
+	movel	%a0@,%a1@
+
+L(test_notmac):
+#endif /* CONFIG_MAC */
+
+
+/*
+ * There are ultimately two pieces of information we want for all kinds of
+ * processors CpuType and CacheBits.  The CPUTYPE was passed in from booter
+ * and is converted here from a booter type definition to a separate bit
+ * number which allows for the standard is_0x0 macro tests.
+ */
+	movel	%pc@(m68k_cputype),%d0
+	/*
+	 * Assume it's an 030
+	 */
+	clrl	%d1
+
+	/*
+	 * Test the BootInfo cputype for 060
+	 */
+	btst	#CPUB_68060,%d0
+	jeq	1f
+	bset	#CPUTYPE_060,%d1
+	bset	#CPUTYPE_0460,%d1
+	jra	3f
+1:
+	/*
+	 * Test the BootInfo cputype for 040
+	 */
+	btst	#CPUB_68040,%d0
+	jeq	2f
+	bset	#CPUTYPE_040,%d1
+	bset	#CPUTYPE_0460,%d1
+	jra	3f
+2:
+	/*
+	 * Test the BootInfo cputype for 020
+	 */
+	btst	#CPUB_68020,%d0
+	jeq	3f
+	bset	#CPUTYPE_020,%d1
+	jra	3f
+3:
+	/*
+	 * Record the cpu type
+	 */
+	lea	%pc@(L(cputype)),%a0
+	movel	%d1,%a0@
+
+	/*
+	 * NOTE:
+	 *
+	 * Now the macros are valid:
+	 *	is_040_or_060
+	 *	is_not_040_or_060
+	 *	is_040
+	 *	is_060
+	 *	is_not_060
+	 */
+
+	/*
+	 * Determine the cache mode for pages holding MMU tables
+	 * and for supervisor mode, unused for '020 and '030
+	 */
+	clrl	%d0
+	clrl	%d1
+
+	is_not_040_or_060(L(save_cachetype))
+
+	/*
+	 * '040 or '060
+	 * d1 := cacheable write-through
+	 * NOTE: The 68040 manual strongly recommends non-cached for MMU tables,
+	 * but we have been using write-through since at least 2.0.29 so I
+	 * guess it is OK.
+	 */
+#ifdef CONFIG_060_WRITETHROUGH
+	/*
+	 * If this is a 68060 board using drivers with cache coherency
+	 * problems, then supervisor memory accesses need to be write-through
+	 * also; otherwise, we want copyback.
+	 */
+
+	is_not_060(1f)
+	movel	#_PAGE_CACHE040W,%d0
+	jra	L(save_cachetype)
+#endif /* CONFIG_060_WRITETHROUGH */
+1:
+	movew	#_PAGE_CACHE040,%d0
+
+	movel	#_PAGE_CACHE040W,%d1
+
+L(save_cachetype):
+	/* Save cache mode for supervisor mode and page tables
+	 */
+	lea	%pc@(m68k_supervisor_cachemode),%a0
+	movel	%d0,%a0@
+	lea	%pc@(m68k_pgtable_cachemode),%a0
+	movel	%d1,%a0@
+
+/*
+ * raise interrupt level
+ */
+	movew	#0x2700,%sr
+
+/*
+   If running on an Atari, determine the I/O base of the
+   serial port and test if we are running on a Medusa or Hades.
+   This test is necessary here, because on the Hades the serial
+   port is only accessible in the high I/O memory area.
+
+   The test whether it is a Medusa is done by writing to the byte at
+   phys. 0x0. This should result in a bus error on all other machines.
+
+   ...should, but doesn't. The Afterburner040 for the Falcon has the
+   same behaviour (0x0..0x7 are no ROM shadow). So we have to do
+   another test to distinguish Medusa and AB040. This is a
+   read attempt for 0x00ff82fe phys. that should bus error on a Falcon
+   (+AB040), but is in the range where the Medusa always asserts DTACK.
+
+   The test for the Hades is done by reading address 0xb0000000. This
+   should give a bus error on the Medusa.
+ */
+
+#ifdef CONFIG_ATARI
+	is_not_atari(L(notypetest))
+
+	/* get special machine type (Medusa/Hades/AB40) */
+	moveq	#0,%d3 /* default if tag doesn't exist */
+	get_bi_record	BI_ATARI_MCH_TYPE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(atari_mch_type),%a0
+	movel	%d3,%a0@
+1:
+	/* On the Hades, the iobase must be set up before opening the
+	 * serial port. There are no I/O regs at 0x00ffxxxx at all. */
+	moveq	#0,%d0
+	cmpl	#ATARI_MACH_HADES,%d3
+	jbne	1f
+	movel	#0xff000000,%d0		/* Hades I/O base addr: 0xff000000 */
+1:	lea     %pc@(L(iobase)),%a0
+	movel   %d0,%a0@
+
+L(notypetest):
+#endif
+
+#ifdef CONFIG_VME
+	is_mvme147(L(getvmetype))
+	is_bvme6000(L(getvmetype))
+	is_not_mvme16x(L(gvtdone))
+
+	/* See if the loader has specified the BI_VME_TYPE tag.  Recent
+	 * versions of VMELILO and TFTPLILO do this.  We have to do this
+	 * early so we know how to handle console output.  If the tag
+	 * doesn't exist then we use the Bug for output on MVME16x.
+	 */
+L(getvmetype):
+	get_bi_record	BI_VME_TYPE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(vme_brdtype),%a0
+	movel	%d3,%a0@
+1:
+#ifdef CONFIG_MVME16x
+	is_not_mvme16x(L(gvtdone))
+
+	/* Need to get the BRD_ID info to differentiate between 162, 167,
+	 * etc.  This is available as a BI_VME_BRDINFO tag with later
+	 * versions of VMELILO and TFTPLILO, otherwise we call the Bug.
+	 */
+	get_bi_record	BI_VME_BRDINFO
+	tstl	%d0
+	jpl	1f
+
+	/* Get pointer to board ID data from Bug */
+	movel	%d2,%sp@-
+	trap	#15
+	.word	0x70		/* trap 0x70 - .BRD_ID */
+	movel	%sp@+,%a0
+1:
+	lea	%pc@(mvme_bdid),%a1
+	/* Structure is 32 bytes long */
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+	movel	%a0@+,%a1@+
+#endif
+
+L(gvtdone):
+
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(L(nothp))
+
+	/* Get the address of the UART for serial debugging */
+	get_bi_record	BI_HP300_UART_ADDR
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(L(uartbase)),%a0
+	movel	%d3,%a0@
+	get_bi_record	BI_HP300_UART_SCODE
+	tstl	%d0
+	jbmi	1f
+	movel	%a0@,%d3
+	lea	%pc@(L(uart_scode)),%a0
+	movel	%d3,%a0@
+1:
+L(nothp):
+#endif
+
+/*
+ * Initialize serial port
+ */
+	jbsr	L(serial_init)
+
+/*
+ * Initialize console
+ */
+#ifdef CONFIG_MAC
+	is_not_mac(L(nocon))
+#  ifdef CONSOLE_DEBUG
+	console_init
+#    ifdef CONFIG_LOGO
+	console_put_penguin
+#    endif /* CONFIG_LOGO */
+#  endif /* CONSOLE_DEBUG */
+L(nocon):
+#endif /* CONFIG_MAC */
+
+
+	putc	'\n'
+	putc	'A'
+	leds	0x2
+	dputn	%pc@(L(cputype))
+	dputn	%pc@(m68k_supervisor_cachemode)
+	dputn	%pc@(m68k_pgtable_cachemode)
+	dputc	'\n'
+
+/*
+ * Save physical start address of kernel
+ */
+	lea	%pc@(L(phys_kernel_start)),%a0
+	lea	%pc@(_stext),%a1
+	subl	#_stext,%a1
+	addl	#PAGE_OFFSET,%a1
+	movel	%a1,%a0@
+
+	putc	'B'
+
+	leds	0x4
+
+/*
+ *	mmu_init
+ *
+ *	This block of code does what's necessary to map in the various kinds
+ *	of machines for execution of Linux.
+ *	First map the first 4, 8, or 16 MB of kernel code & data
+ */
+
+	get_bi_record BI_MEMCHUNK
+	movel	%a0@(4),%d0
+	movel	#16*1024*1024,%d1
+	cmpl	%d0,%d1
+	jls	1f
+	lsrl	#1,%d1
+	cmpl	%d0,%d1
+	jls	1f
+	lsrl	#1,%d1
+1:
+	lea	%pc@(m68k_init_mapped_size),%a0
+	movel	%d1,%a0@
+	mmu_map	#PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
+		%pc@(m68k_supervisor_cachemode)
+
+	putc	'C'
+
+#ifdef CONFIG_AMIGA
+
+L(mmu_init_amiga):
+
+	is_not_amiga(L(mmu_init_not_amiga))
+/*
+ * mmu_init_amiga
+ */
+
+	putc	'D'
+
+	is_not_040_or_060(1f)
+
+	/*
+	 * 040: Map the 16Meg range physical 0x0 up to logical 0x8000.0000
+	 */
+	mmu_map		#0x80000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+	/*
+	 * Map the Zorro III I/O space with transparent translation
+	 * for frame buffer memory etc.
+	 */
+	mmu_map_tt	#1,#0x40000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+1:
+	/*
+	 * 030:	Map the 32Meg range physical 0x0 up to logical 0x8000.0000
+	 */
+	mmu_map		#0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+	mmu_map_tt	#1,#0x40000000,#0x20000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_amiga):
+#endif
+
+#ifdef CONFIG_ATARI
+
+L(mmu_init_atari):
+
+	is_not_atari(L(mmu_init_not_atari))
+
+	putc	'E'
+
+/* On the Atari, we map the I/O region (phys. 0x00ffxxxx) by mapping
+   the last 16 MB of virtual address space to the first 16 MB (i.e.
+   0xffxxxxxx -> 0x00xxxxxx). For this, an additional pointer table is
+   needed. I/O ranges are marked non-cachable.
+
+   For the Medusa it is better to map the I/O region transparently
+   (i.e. 0xffxxxxxx -> 0xffxxxxxx), because some I/O registers are
+   accessible only in the high area.
+
+   On the Hades all I/O registers are only accessible in the high
+   area.
+*/
+
+	/* I/O base addr for non-Medusa, non-Hades: 0x00000000 */
+	moveq	#0,%d0
+	movel	%pc@(atari_mch_type),%d3
+	cmpl	#ATARI_MACH_MEDUSA,%d3
+	jbeq	2f
+	cmpl	#ATARI_MACH_HADES,%d3
+	jbne	1f
+2:	movel	#0xff000000,%d0 /* Medusa/Hades base addr: 0xff000000 */
+1:	movel	%d0,%d3
+
+	is_040_or_060(L(spata68040))
+
+	/* Map everything non-cacheable, though not all parts really
+	 * need to disable caches (crucial only for 0xff8000..0xffffff
+	 * (standard I/O) and 0xf00000..0xf3ffff (IDE)). The remainder
+	 * isn't really used, except for sometimes peeking into the
+	 * ROMs (mirror at phys. 0x0), so caching isn't necessary for
+	 * this. */
+	mmu_map	#0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(spata68040):
+
+	mmu_map	#0xff000000,%d3,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_atari):
+#endif
+
+#ifdef CONFIG_Q40
+	is_not_q40(L(notq40))
+	/*
+	 * add transparent mapping for 0xff00 0000 - 0xffff ffff
+	 * non-cached serialized etc..
+	 * this includes master chip, DAC, RTC and ISA ports
+	 * 0xfe000000-0xfeffffff is for screen and ROM
+	 */
+
+	putc    'Q'
+
+	mmu_map_tt	#0,#0xfe000000,#0x01000000,#_PAGE_CACHE040W
+	mmu_map_tt	#1,#0xff000000,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(notq40):
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(L(nothp300))
+
+	/* On the HP300, we map the ROM, INTIO and DIO regions (phys. 0x00xxxxxx)
+	 * by mapping 32MB (on 020/030) or 16 MB (on 040) from 0xf0xxxxxx -> 0x00xxxxxx).
+	 * The ROM mapping is needed because the LEDs are mapped there too.
+	 */
+
+	is_040(1f)
+
+	/*
+	 * 030: Map the 32Meg range physical 0x0 up to logical 0xf000.0000
+	 */
+	mmu_map	#0xf0000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+1:
+	/*
+	 * 040: Map the 16Meg range physical 0x0 up to logical 0xf000.0000
+	 */
+	mmu_map #0xf0000000,#0,#0x01000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(nothp300):
+#endif /* CONFIG_HP300 */
+
+#ifdef CONFIG_MVME147
+
+	is_not_mvme147(L(not147))
+
+	/*
+	 * On MVME147 we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now,
+	 * so we can access on-board i/o areas.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE030
+
+	jbra	L(mmu_init_done)
+
+L(not147):
+#endif /* CONFIG_MVME147 */
+
+#ifdef CONFIG_MVME16x
+
+	is_not_mvme16x(L(not16x))
+
+	/*
+	 * On MVME16x we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now.
+	 * Supervisor only access, so transparent mapping doesn't
+	 * clash with User code virtual address space.
+	 * this covers IO devices, PROM and SRAM.  The PROM and SRAM
+	 * mapping is needed to allow 167Bug to run.
+	 * IO is in the range 0xfff00000 to 0xfffeffff.
+	 * PROM is 0xff800000->0xffbfffff and SRAM is
+	 * 0xffe00000->0xffe1ffff.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(not16x):
+#endif	/* CONFIG_MVME162 | CONFIG_MVME167 */
+
+#ifdef CONFIG_BVME6000
+
+	is_not_bvme6000(L(not6000))
+
+	/*
+	 * On BVME6000 we have already created kernel page tables for
+	 * 4MB of RAM at address 0, so now need to do a transparent
+	 * mapping of the top of memory space.  Make it 0.5GByte for now,
+	 * so we can access on-board i/o areas.
+	 * Supervisor only access, so transparent mapping doesn't
+	 * clash with User code virtual address space.
+	 */
+
+	mmu_map_tt	#1,#0xe0000000,#0x20000000,#_PAGE_NOCACHE_S
+
+	jbra	L(mmu_init_done)
+
+L(not6000):
+#endif /* CONFIG_BVME6000 */
+
+/*
+ * mmu_init_mac
+ *
+ * The Macintosh mappings are less clear.
+ *
+ * Even as of this writing, it is unclear how the
+ * Macintosh mappings will be done.  However, as
+ * the first author of this code I'm proposing the
+ * following model:
+ *
+ * Map the kernel (that's already done),
+ * Map the I/O (on most machines that's the
+ * 0x5000.0000 ... 0x5300.0000 range,
+ * Map the video frame buffer using as few pages
+ * as absolutely (this requirement mostly stems from
+ * the fact that when the frame buffer is at
+ * 0x0000.0000 then we know there is valid RAM just
+ * above the screen that we don't want to waste!).
+ *
+ * By the way, if the frame buffer is at 0x0000.0000
+ * then the Macintosh is known as an RBV based Mac.
+ *
+ * By the way 2, the code currently maps in a bunch of
+ * regions.  But I'd like to cut that out.  (And move most
+ * of the mappings up into the kernel proper ... or only
+ * map what's necessary.)
+ */
+
+#ifdef CONFIG_MAC
+
+L(mmu_init_mac):
+
+	is_not_mac(L(mmu_init_not_mac))
+
+	putc	'F'
+
+	is_not_040_or_060(1f)
+
+	moveq	#_PAGE_NOCACHE_S,%d3
+	jbra	2f
+1:
+	moveq	#_PAGE_NOCACHE030,%d3
+2:
+	/*
+	 * Mac Note: screen address of logical 0xF000.0000 -> <screen physical>
+	 *	     we simply map the 4MB that contains the videomem
+	 */
+
+	movel	#VIDEOMEMMASK,%d0
+	andl	%pc@(L(mac_videobase)),%d0
+
+	mmu_map		#VIDEOMEMBASE,%d0,#VIDEOMEMSIZE,%d3
+	/* ROM from 4000 0000 to 4200 0000 (only for mac_reset()) */
+	mmu_map_eq	#0x40000000,#0x02000000,%d3
+	/* IO devices (incl. serial port) from 5000 0000 to 5300 0000 */
+	mmu_map_eq	#0x50000000,#0x03000000,%d3
+	/* Nubus slot space (video at 0xF0000000, rom at 0xF0F80000) */
+	mmu_map_tt	#1,#0xf8000000,#0x08000000,%d3
+
+	jbra	L(mmu_init_done)
+
+L(mmu_init_not_mac):
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(L(notsun3x))
+
+	/* oh, the pain..  We're gonna want the prom code after
+	 * starting the MMU, so we copy the mappings, translating
+	 * from 8k -> 4k pages as we go.
+	 */
+
+	/* copy maps from 0xfee00000 to 0xff000000 */
+	movel	#0xfee00000, %d0
+	moveq	#ROOT_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	movel	#0xfee00000, %d0
+	moveq	#PTR_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1, %d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	movel	#0xfee00000, %d0
+	moveq	#PAGE_INDEX_SHIFT, %d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1, %d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	/* this is where the prom page table lives */
+	movel	0xfefe00d4, %a1
+	movel	%a1@, %a1
+
+	movel	#((0x200000 >> 13)-1), %d1
+
+1:
+	movel	%a1@+, %d3
+	movel	%d3,%a0@+
+	addl	#0x1000,%d3
+	movel	%d3,%a0@+
+
+	dbra	%d1,1b
+
+	/* setup tt1 for I/O */
+	mmu_map_tt	#1,#0x40000000,#0x40000000,#_PAGE_NOCACHE_S
+	jbra	L(mmu_init_done)
+
+L(notsun3x):
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(L(notapollo))
+
+	putc	'P'
+	mmu_map         #0x80000000,#0,#0x02000000,#_PAGE_NOCACHE030
+
+L(notapollo):
+	jbra	L(mmu_init_done)
+#endif
+
+L(mmu_init_done):
+
+	putc	'G'
+	leds	0x8
+
+/*
+ * mmu_fixup
+ *
+ * On the 040 class machines, all pages that are used for the
+ * mmu have to be fixed up. According to Motorola, pages holding mmu
+ * tables should be non-cacheable on a '040 and write-through on a
+ * '060. But analysis of the reasons for this, and practical
+ * experience, showed that write-through also works on a '040.
+ *
+ * Allocated memory so far goes from kernel_end to memory_start that
+ * is used for all kind of tables, for that the cache attributes
+ * are now fixed.
+ */
+L(mmu_fixup):
+
+	is_not_040_or_060(L(mmu_fixup_done))
+
+#ifdef MMU_NOCACHE_KERNEL
+	jbra	L(mmu_fixup_done)
+#endif
+
+	/* first fix the page at the start of the kernel, that
+	 * contains also kernel_pg_dir.
+	 */
+	movel	%pc@(L(phys_kernel_start)),%d0
+	subl	#PAGE_OFFSET,%d0
+	lea	%pc@(_stext),%a0
+	subl	%d0,%a0
+	mmu_fixup_page_mmu_cache	%a0
+
+	movel	%pc@(L(kernel_end)),%a0
+	subl	%d0,%a0
+	movel	%pc@(L(memory_start)),%a1
+	subl	%d0,%a1
+	bra	2f
+1:
+	mmu_fixup_page_mmu_cache	%a0
+	addw	#PAGESIZE,%a0
+2:
+	cmpl	%a0,%a1
+	jgt	1b
+
+L(mmu_fixup_done):
+
+#ifdef MMU_PRINT
+	mmu_print
+#endif
+
+/*
+ * mmu_engage
+ *
+ * This chunk of code performs the gruesome task of engaging the MMU.
+ * The reason its gruesome is because when the MMU becomes engaged it
+ * maps logical addresses to physical addresses.  The Program Counter
+ * register is then passed through the MMU before the next instruction
+ * is fetched (the instruction following the engage MMU instruction).
+ * This may mean one of two things:
+ * 1. The Program Counter falls within the logical address space of
+ *    the kernel of which there are two sub-possibilities:
+ *    A. The PC maps to the correct instruction (logical PC == physical
+ *       code location), or
+ *    B. The PC does not map through and the processor will read some
+ *       data (or instruction) which is not the logically next instr.
+ *    As you can imagine, A is good and B is bad.
+ * Alternatively,
+ * 2. The Program Counter does not map through the MMU.  The processor
+ *    will take a Bus Error.
+ * Clearly, 2 is bad.
+ * It doesn't take a wiz kid to figure you want 1.A.
+ * This code creates that possibility.
+ * There are two possible 1.A. states (we now ignore the other above states):
+ * A. The kernel is located at physical memory addressed the same as
+ *    the logical memory for the kernel, i.e., 0x01000.
+ * B. The kernel is located some where else.  e.g., 0x0400.0000
+ *
+ *    Under some conditions the Macintosh can look like A or B.
+ * [A friend and I once noted that Apple hardware engineers should be
+ * wacked twice each day: once when they show up at work (as in, Whack!,
+ * "This is for the screwy hardware we know you're going to design today."),
+ * and also at the end of the day (as in, Whack! "I don't know what
+ * you designed today, but I'm sure it wasn't good."). -- rst]
+ *
+ * This code works on the following premise:
+ * If the kernel start (%d5) is within the first 16 Meg of RAM,
+ * then create a mapping for the kernel at logical 0x8000.0000 to
+ * the physical location of the pc.  And, create a transparent
+ * translation register for the first 16 Meg.  Then, after the MMU
+ * is engaged, the PC can be moved up into the 0x8000.0000 range
+ * and then the transparent translation can be turned off and then
+ * the PC can jump to the correct logical location and it will be
+ * home (finally).  This is essentially the code that the Amiga used
+ * to use.  Now, it's generalized for all processors.  Which means
+ * that a fresh (but temporary) mapping has to be created.  The mapping
+ * is made in page 0 (an as of yet unused location -- except for the
+ * stack!).  This temporary mapping will only require 1 pointer table
+ * and a single page table (it can map 256K).
+ *
+ * OK, alternatively, imagine that the Program Counter is not within
+ * the first 16 Meg.  Then, just use Transparent Translation registers
+ * to do the right thing.
+ *
+ * Last, if _start is already at 0x01000, then there's nothing special
+ * to do (in other words, in a degenerate case of the first case above,
+ * do nothing).
+ *
+ * Let's do it.
+ *
+ *
+ */
+
+	putc	'H'
+
+	mmu_engage
+
+/*
+ * After this point no new memory is allocated and
+ * the start of available memory is stored in availmem.
+ * (The bootmem allocator requires now the physicall address.)
+ */
+
+	movel	L(memory_start),availmem
+
+#ifdef CONFIG_AMIGA
+	is_not_amiga(1f)
+	/* fixup the Amiga custom register location before printing */
+	clrl	L(custom)
+1:
+#endif
+
+#ifdef CONFIG_ATARI
+	is_not_atari(1f)
+	/* fixup the Atari iobase register location before printing */
+	movel	#0xff000000,L(iobase)
+1:
+#endif
+
+#ifdef CONFIG_MAC
+	is_not_mac(1f)
+	movel	#~VIDEOMEMMASK,%d0
+	andl	L(mac_videobase),%d0
+	addl	#VIDEOMEMBASE,%d0
+	movel	%d0,L(mac_videobase)
+#ifdef CONSOLE_DEBUG
+	movel	%pc@(L(phys_kernel_start)),%d0
+	subl	#PAGE_OFFSET,%d0
+	subl	%d0,L(console_font)
+	subl	%d0,L(console_font_data)
+#endif
+	orl	#0x50000000,L(mac_sccbase)
+1:
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(2f)
+	/*
+	 * Fix up the iobase register to point to the new location of the LEDs.
+	 */
+	movel	#0xf0000000,L(iobase)
+
+	/*
+	 * Energise the FPU and caches.
+	 */
+	is_040(1f)
+	movel	#0x60,0xf05f400c
+	jbra	2f
+
+	/*
+	 * 040: slightly different, apparently.
+	 */
+1:	movew	#0,0xf05f400e
+	movew	#0x64,0xf05f400e
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(1f)
+
+	/* enable copro */
+	oriw	#0x4000,0x61000000
+1:
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(1f)
+
+	/*
+	 * Fix up the iobase before printing
+	 */
+	movel	#0x80000000,L(iobase)
+1:
+#endif
+
+	putc	'I'
+	leds	0x10
+
+/*
+ * Enable caches
+ */
+
+	is_not_040_or_060(L(cache_not_680460))
+
+L(cache680460):
+	.chip	68040
+	nop
+	cpusha	%bc
+	nop
+
+	is_060(L(cache68060))
+
+	movel	#CC6_ENABLE_D+CC6_ENABLE_I,%d0
+	/* MMU stuff works in copyback mode now, so enable the cache */
+	movec	%d0,%cacr
+	jra	L(cache_done)
+
+L(cache68060):
+	movel	#CC6_ENABLE_D+CC6_ENABLE_I+CC6_ENABLE_SB+CC6_PUSH_DPI+CC6_ENABLE_B+CC6_CLRA_B,%d0
+	/* MMU stuff works in copyback mode now, so enable the cache */
+	movec	%d0,%cacr
+	/* enable superscalar dispatch in PCR */
+	moveq	#1,%d0
+	.chip	68060
+	movec	%d0,%pcr
+
+	jbra	L(cache_done)
+L(cache_not_680460):
+L(cache68030):
+	.chip	68030
+	movel	#CC3_ENABLE_DB+CC3_CLR_D+CC3_ENABLE_D+CC3_ENABLE_IB+CC3_CLR_I+CC3_ENABLE_I,%d0
+	movec	%d0,%cacr
+
+	jra	L(cache_done)
+	.chip	68k
+L(cache_done):
+
+	putc	'J'
+
+/*
+ * Setup initial stack pointer
+ */
+	lea	init_task,%curptr
+	lea	init_thread_union+THREAD_SIZE,%sp
+
+	putc	'K'
+
+	subl	%a6,%a6		/* clear a6 for gdb */
+
+/*
+ * The new 64bit printf support requires an early exception initialization.
+ */
+	jbsr	base_trap_init
+
+/* jump to the kernel start */
+
+	putc	'\n'
+	leds	0x55
+
+	jbsr	start_kernel
+
+/*
+ * Find a tag record in the bootinfo structure
+ * The bootinfo structure is located right after the kernel
+ * Returns: d0: size (-1 if not found)
+ *          a0: data pointer (end-of-records if not found)
+ */
+func_start	get_bi_record,%d1
+
+	movel	ARG1,%d0
+	lea	%pc@(_end),%a0
+1:	tstw	%a0@(BIR_TAG)
+	jeq	3f
+	cmpw	%a0@(BIR_TAG),%d0
+	jeq	2f
+	addw	%a0@(BIR_SIZE),%a0
+	jra	1b
+2:	moveq	#0,%d0
+	movew	%a0@(BIR_SIZE),%d0
+	lea	%a0@(BIR_DATA),%a0
+	jra	4f
+3:	moveq	#-1,%d0
+	lea	%a0@(BIR_SIZE),%a0
+4:
+func_return	get_bi_record
+
+
+/*
+ *	MMU Initialization Begins Here
+ *
+ *	The structure of the MMU tables on the 68k machines
+ *	is thus:
+ *	Root Table
+ *		Logical addresses are translated through
+ *	a hierarchical translation mechanism where the high-order
+ *	seven bits of the logical address (LA) are used as an
+ *	index into the "root table."  Each entry in the root
+ *	table has a bit which specifies if it's a valid pointer to a
+ *	pointer table.  Each entry defines a 32KMeg range of memory.
+ *	If an entry is invalid then that logical range of 32M is
+ *	invalid and references to that range of memory (when the MMU
+ *	is enabled) will fault.  If the entry is valid, then it does
+ *	one of two things.  On 040/060 class machines, it points to
+ *	a pointer table which then describes more finely the memory
+ *	within that 32M range.  On 020/030 class machines, a technique
+ *	called "early terminating descriptors" are used.  This technique
+ *	allows an entire 32Meg to be described by a single entry in the
+ *	root table.  Thus, this entry in the root table, contains the
+ *	physical address of the memory or I/O at the logical address
+ *	which the entry represents and it also contains the necessary
+ *	cache bits for this region.
+ *
+ *	Pointer Tables
+ *		Per the Root Table, there will be one or more
+ *	pointer tables.  Each pointer table defines a 32M range.
+ *	Not all of the 32M range need be defined.  Again, the next
+ *	seven bits of the logical address are used an index into
+ *	the pointer table to point to page tables (if the pointer
+ *	is valid).  There will undoubtedly be more than one
+ *	pointer table for the kernel because each pointer table
+ *	defines a range of only 32M.  Valid pointer table entries
+ *	point to page tables, or are early terminating entries
+ *	themselves.
+ *
+ *	Page Tables
+ *		Per the Pointer Tables, each page table entry points
+ *	to the physical page in memory that supports the logical
+ *	address that translates to the particular index.
+ *
+ *	In short, the Logical Address gets translated as follows:
+ *		bits 31..26 - index into the Root Table
+ *		bits 25..18 - index into the Pointer Table
+ *		bits 17..12 - index into the Page Table
+ *		bits 11..0  - offset into a particular 4K page
+ *
+ *	The algorithms which follows do one thing: they abstract
+ *	the MMU hardware.  For example, there are three kinds of
+ *	cache settings that are relevant.  Either, memory is
+ *	being mapped in which case it is either Kernel Code (or
+ *	the RamDisk) or it is MMU data.  On the 030, the MMU data
+ *	option also describes the kernel.  Or, I/O is being mapped
+ *	in which case it has its own kind of cache bits.  There
+ *	are constants which abstract these notions from the code that
+ *	actually makes the call to map some range of memory.
+ *
+ *
+ *
+ */
+
+#ifdef MMU_PRINT
+/*
+ *	mmu_print
+ *
+ *	This algorithm will print out the current MMU mappings.
+ *
+ *	Input:
+ *		%a5 points to the root table.  Everything else is calculated
+ *			from this.
+ */
+
+#define mmu_next_valid		0
+#define mmu_start_logical	4
+#define mmu_next_logical	8
+#define mmu_start_physical	12
+#define mmu_next_physical	16
+
+#define MMU_PRINT_INVALID		-1
+#define MMU_PRINT_VALID			1
+#define MMU_PRINT_UNINITED		0
+
+#define putZc(z,n)		jbne 1f; putc z; jbra 2f; 1: putc n; 2:
+
+func_start	mmu_print,%a0-%a6/%d0-%d7
+
+	movel	%pc@(L(kernel_pgdir_ptr)),%a5
+	lea	%pc@(L(mmu_print_data)),%a0
+	movel	#MMU_PRINT_UNINITED,%a0@(mmu_next_valid)
+
+	is_not_040_or_060(mmu_030_print)
+
+mmu_040_print:
+	puts	"\nMMU040\n"
+	puts	"rp:"
+	putn	%a5
+	putc	'\n'
+#if 0
+	/*
+	 * The following #if/#endif block is a tight algorithm for dumping the 040
+	 * MMU Map in gory detail.  It really isn't that practical unless the
+	 * MMU Map algorithm appears to go awry and you need to debug it at the
+	 * entry per entry level.
+	 */
+	movel	#ROOT_TABLE_SIZE,%d5
+#if 0
+	movel	%a5@+,%d7		| Burn an entry to skip the kernel mappings,
+	subql	#1,%d5			| they (might) work
+#endif
+1:	tstl	%d5
+	jbeq	mmu_print_done
+	subq	#1,%d5
+	movel	%a5@+,%d7
+	btst	#1,%d7
+	jbeq	1b
+
+2:	putn	%d7
+	andil	#0xFFFFFE00,%d7
+	movel	%d7,%a4
+	movel	#PTR_TABLE_SIZE,%d4
+	putc	' '
+3:	tstl	%d4
+	jbeq	11f
+	subq	#1,%d4
+	movel	%a4@+,%d7
+	btst	#1,%d7
+	jbeq	3b
+
+4:	putn	%d7
+	andil	#0xFFFFFF00,%d7
+	movel	%d7,%a3
+	movel	#PAGE_TABLE_SIZE,%d3
+5:	movel	#8,%d2
+6:	tstl	%d3
+	jbeq	31f
+	subq	#1,%d3
+	movel	%a3@+,%d6
+	btst	#0,%d6
+	jbeq	6b
+7:	tstl	%d2
+	jbeq	8f
+	subq	#1,%d2
+	putc	' '
+	jbra	91f
+8:	putc	'\n'
+	movel	#8+1+8+1+1,%d2
+9:	putc	' '
+	dbra	%d2,9b
+	movel	#7,%d2
+91:	putn	%d6
+	jbra	6b
+
+31:	putc	'\n'
+	movel	#8+1,%d2
+32:	putc	' '
+	dbra	%d2,32b
+	jbra	3b
+
+11:	putc	'\n'
+	jbra	1b
+#endif /* MMU 040 Dumping code that's gory and detailed */
+
+	lea	%pc@(kernel_pg_dir),%a5
+	movel	%a5,%a0			/* a0 has the address of the root table ptr */
+	movel	#0x00000000,%a4		/* logical address */
+	moveql	#0,%d0
+40:
+	/* Increment the logical address and preserve in d5 */
+	movel	%a4,%d5
+	addil	#PAGESIZE<<13,%d5
+	movel	%a0@+,%d6
+	btst	#1,%d6
+	jbne	41f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	48f
+41:
+	movel	#0,%d1
+	andil	#0xfffffe00,%d6
+	movel	%d6,%a1
+42:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<6,%d5
+	movel	%a1@+,%d6
+	btst	#1,%d6
+	jbne	43f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	47f
+43:
+	movel	#0,%d2
+	andil	#0xffffff00,%d6
+	movel	%d6,%a2
+44:
+	movel	%a4,%d5
+	addil	#PAGESIZE,%d5
+	movel	%a2@+,%d6
+	btst	#0,%d6
+	jbne	45f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	46f
+45:
+	moveml	%d0-%d1,%sp@-
+	movel	%a4,%d0
+	movel	%d6,%d1
+	andil	#0xfffff4e0,%d1
+	lea	%pc@(mmu_040_print_flags),%a6
+	jbsr	mmu_print_tuple
+	moveml	%sp@+,%d0-%d1
+46:
+	movel	%d5,%a4
+	addq	#1,%d2
+	cmpib	#64,%d2
+	jbne	44b
+47:
+	movel	%d5,%a4
+	addq	#1,%d1
+	cmpib	#128,%d1
+	jbne	42b
+48:
+	movel	%d5,%a4			/* move to the next logical address */
+	addq	#1,%d0
+	cmpib	#128,%d0
+	jbne	40b
+
+	.chip	68040
+	movec	%dtt1,%d0
+	movel	%d0,%d1
+	andiw	#0x8000,%d1		/* is it valid ? */
+	jbeq	1f			/* No, bail out */
+
+	movel	%d0,%d1
+	andil	#0xff000000,%d1		/* Get the address */
+	putn	%d1
+	puts	"=="
+	putn	%d1
+
+	movel	%d0,%d6
+	jbsr	mmu_040_print_flags_tt
+1:
+	movec	%dtt0,%d0
+	movel	%d0,%d1
+	andiw	#0x8000,%d1		/* is it valid ? */
+	jbeq	1f			/* No, bail out */
+
+	movel	%d0,%d1
+	andil	#0xff000000,%d1		/* Get the address */
+	putn	%d1
+	puts	"=="
+	putn	%d1
+
+	movel	%d0,%d6
+	jbsr	mmu_040_print_flags_tt
+1:
+	.chip	68k
+
+	jbra	mmu_print_done
+
+mmu_040_print_flags:
+	btstl	#10,%d6
+	putZc(' ','G')	/* global bit */
+	btstl	#7,%d6
+	putZc(' ','S')	/* supervisor bit */
+mmu_040_print_flags_tt:
+	btstl	#6,%d6
+	jbne	3f
+	putc	'C'
+	btstl	#5,%d6
+	putZc('w','c')	/* write through or copy-back */
+	jbra	4f
+3:
+	putc	'N'
+	btstl	#5,%d6
+	putZc('s',' ')	/* serialized non-cacheable, or non-cacheable */
+4:
+	rts
+
+mmu_030_print_flags:
+	btstl	#6,%d6
+	putZc('C','I')	/* write through or copy-back */
+	rts
+
+mmu_030_print:
+	puts	"\nMMU030\n"
+	puts	"\nrp:"
+	putn	%a5
+	putc	'\n'
+	movel	%a5,%d0
+	andil	#0xfffffff0,%d0
+	movel	%d0,%a0
+	movel	#0x00000000,%a4		/* logical address */
+	movel	#0,%d0
+30:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<13,%d5
+	movel	%a0@+,%d6
+	btst	#1,%d6			/* is it a table ptr? */
+	jbne	31f			/* yes */
+	btst	#0,%d6			/* is it early terminating? */
+	jbeq	1f			/* no */
+	jbsr	mmu_030_print_helper
+	jbra	38f
+1:
+	jbsr	mmu_print_tuple_invalidate
+	jbra	38f
+31:
+	movel	#0,%d1
+	andil	#0xfffffff0,%d6
+	movel	%d6,%a1
+32:
+	movel	%a4,%d5
+	addil	#PAGESIZE<<6,%d5
+	movel	%a1@+,%d6
+	btst	#1,%d6			/* is it a table ptr? */
+	jbne	33f			/* yes */
+	btst	#0,%d6			/* is it a page descriptor? */
+	jbeq	1f			/* no */
+	jbsr	mmu_030_print_helper
+	jbra	37f
+1:
+	jbsr	mmu_print_tuple_invalidate
+	jbra	37f
+33:
+	movel	#0,%d2
+	andil	#0xfffffff0,%d6
+	movel	%d6,%a2
+34:
+	movel	%a4,%d5
+	addil	#PAGESIZE,%d5
+	movel	%a2@+,%d6
+	btst	#0,%d6
+	jbne	35f
+	jbsr	mmu_print_tuple_invalidate
+	jbra	36f
+35:
+	jbsr	mmu_030_print_helper
+36:
+	movel	%d5,%a4
+	addq	#1,%d2
+	cmpib	#64,%d2
+	jbne	34b
+37:
+	movel	%d5,%a4
+	addq	#1,%d1
+	cmpib	#128,%d1
+	jbne	32b
+38:
+	movel	%d5,%a4			/* move to the next logical address */
+	addq	#1,%d0
+	cmpib	#128,%d0
+	jbne	30b
+
+mmu_print_done:
+	puts	"\n"
+
+func_return	mmu_print
+
+
+mmu_030_print_helper:
+	moveml	%d0-%d1,%sp@-
+	movel	%a4,%d0
+	movel	%d6,%d1
+	lea	%pc@(mmu_030_print_flags),%a6
+	jbsr	mmu_print_tuple
+	moveml	%sp@+,%d0-%d1
+	rts
+
+mmu_print_tuple_invalidate:
+	moveml	%a0/%d7,%sp@-
+
+	lea	%pc@(L(mmu_print_data)),%a0
+	tstl	%a0@(mmu_next_valid)
+	jbmi	mmu_print_tuple_invalidate_exit
+
+	movel	#MMU_PRINT_INVALID,%a0@(mmu_next_valid)
+
+	putn	%a4
+
+	puts	"##\n"
+
+mmu_print_tuple_invalidate_exit:
+	moveml	%sp@+,%a0/%d7
+	rts
+
+
+mmu_print_tuple:
+	moveml	%d0-%d7/%a0,%sp@-
+
+	lea	%pc@(L(mmu_print_data)),%a0
+
+	tstl	%a0@(mmu_next_valid)
+	jble	mmu_print_tuple_print
+
+	cmpl	%a0@(mmu_next_physical),%d1
+	jbeq	mmu_print_tuple_increment
+
+mmu_print_tuple_print:
+	putn	%d0
+	puts	"->"
+	putn	%d1
+
+	movel	%d1,%d6
+	jbsr	%a6@
+
+mmu_print_tuple_record:
+	movel	#MMU_PRINT_VALID,%a0@(mmu_next_valid)
+
+	movel	%d1,%a0@(mmu_next_physical)
+
+mmu_print_tuple_increment:
+	movel	%d5,%d7
+	subl	%a4,%d7
+	addl	%d7,%a0@(mmu_next_physical)
+
+mmu_print_tuple_exit:
+	moveml	%sp@+,%d0-%d7/%a0
+	rts
+
+mmu_print_machine_cpu_types:
+	puts	"machine: "
+
+	is_not_amiga(1f)
+	puts	"amiga"
+	jbra	9f
+1:
+	is_not_atari(2f)
+	puts	"atari"
+	jbra	9f
+2:
+	is_not_mac(3f)
+	puts	"macintosh"
+	jbra	9f
+3:	puts	"unknown"
+9:	putc	'\n'
+
+	puts	"cputype: 0"
+	is_not_060(1f)
+	putc	'6'
+	jbra	9f
+1:
+	is_not_040_or_060(2f)
+	putc	'4'
+	jbra	9f
+2:	putc	'3'
+9:	putc	'0'
+	putc	'\n'
+
+	rts
+#endif /* MMU_PRINT */
+
+/*
+ * mmu_map_tt
+ *
+ * This is a specific function which works on all 680x0 machines.
+ * On 030, 040 & 060 it will attempt to use Transparent Translation
+ * registers (tt1).
+ * On 020 it will call the standard mmu_map which will use early
+ * terminating descriptors.
+ */
+func_start	mmu_map_tt,%d0/%d1/%a0,4
+
+	dputs	"mmu_map_tt:"
+	dputn	ARG1
+	dputn	ARG2
+	dputn	ARG3
+	dputn	ARG4
+	dputc	'\n'
+
+	is_020(L(do_map))
+
+	/* Extract the highest bit set
+	 */
+	bfffo	ARG3{#0,#32},%d1
+	cmpw	#8,%d1
+	jcc	L(do_map)
+
+	/* And get the mask
+	 */
+	moveq	#-1,%d0
+	lsrl	%d1,%d0
+	lsrl	#1,%d0
+
+	/* Mask the address
+	 */
+	movel	%d0,%d1
+	notl	%d1
+	andl	ARG2,%d1
+
+	/* Generate the upper 16bit of the tt register
+	 */
+	lsrl	#8,%d0
+	orl	%d0,%d1
+	clrw	%d1
+
+	is_040_or_060(L(mmu_map_tt_040))
+
+	/* set 030 specific bits (read/write access for supervisor mode
+	 * (highest function code set, lower two bits masked))
+	 */
+	orw	#TTR_ENABLE+TTR_RWM+TTR_FCB2+TTR_FCM1+TTR_FCM0,%d1
+	movel	ARG4,%d0
+	btst	#6,%d0
+	jeq	1f
+	orw	#TTR_CI,%d1
+
+1:	lea	STACK,%a0
+	dputn	%d1
+	movel	%d1,%a0@
+	.chip	68030
+	tstl	ARG1
+	jne	1f
+	pmove	%a0@,%tt0
+	jra	2f
+1:	pmove	%a0@,%tt1
+2:	.chip	68k
+	jra	L(mmu_map_tt_done)
+
+	/* set 040 specific bits
+	 */
+L(mmu_map_tt_040):
+	orw	#TTR_ENABLE+TTR_KERNELMODE,%d1
+	orl	ARG4,%d1
+	dputn	%d1
+
+	.chip	68040
+	tstl	ARG1
+	jne	1f
+	movec	%d1,%itt0
+	movec	%d1,%dtt0
+	jra	2f
+1:	movec	%d1,%itt1
+	movec	%d1,%dtt1
+2:	.chip	68k
+
+	jra	L(mmu_map_tt_done)
+
+L(do_map):
+	mmu_map_eq	ARG2,ARG3,ARG4
+
+L(mmu_map_tt_done):
+
+func_return	mmu_map_tt
+
+/*
+ *	mmu_map
+ *
+ *	This routine will map a range of memory using a pointer
+ *	table and allocating the pages on the fly from the kernel.
+ *	The pointer table does not have to be already linked into
+ *	the root table, this routine will do that if necessary.
+ *
+ *	NOTE
+ *	This routine will assert failure and use the serial_putc
+ *	routines in the case of a run-time error.  For example,
+ *	if the address is already mapped.
+ *
+ *	NOTE-2
+ *	This routine will use early terminating descriptors
+ *	where possible for the 68020+68851 and 68030 type
+ *	processors.
+ */
+func_start	mmu_map,%d0-%d4/%a0-%a4
+
+	dputs	"\nmmu_map:"
+	dputn	ARG1
+	dputn	ARG2
+	dputn	ARG3
+	dputn	ARG4
+	dputc	'\n'
+
+	/* Get logical address and round it down to 256KB
+	 */
+	movel	ARG1,%d0
+	andl	#-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a3
+
+	/* Get the end address
+	 */
+	movel	ARG1,%a4
+	addl	ARG3,%a4
+	subql	#1,%a4
+
+	/* Get physical address and round it down to 256KB
+	 */
+	movel	ARG2,%d0
+	andl	#-(PAGESIZE*PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a2
+
+	/* Add page attributes to the physical address
+	 */
+	movel	ARG4,%d0
+	orw	#_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+	addw	%d0,%a2
+
+	dputn	%a2
+	dputn	%a3
+	dputn	%a4
+
+	is_not_040_or_060(L(mmu_map_030))
+
+	addw	#_PAGE_GLOBAL040,%a2
+/*
+ *	MMU 040 & 060 Support
+ *
+ *	The MMU usage for the 040 and 060 is different enough from
+ *	the 030 and 68851 that there is separate code.  This comment
+ *	block describes the data structures and algorithms built by
+ *	this code.
+ *
+ *	The 040 does not support early terminating descriptors, as
+ *	the 030 does.  Therefore, a third level of table is needed
+ *	for the 040, and that would be the page table.  In Linux,
+ *	page tables are allocated directly from the memory above the
+ *	kernel.
+ *
+ */
+
+L(mmu_map_040):
+	/* Calculate the offset into the root table
+	 */
+	movel	%a3,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	%a3,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	%a3,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	/* The page table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	movel	%a2,%a0@
+2:
+	addw	#PAGESIZE,%a2
+	addw	#PAGESIZE,%a3
+
+	/* Ready with mapping?
+	 */
+	lea	%a3@(-1),%a0
+	cmpl	%a0,%a4
+	jhi	L(mmu_map_040)
+	jra	L(mmu_map_done)
+
+L(mmu_map_030):
+	/* Calculate the offset into the root table
+	 */
+	movel	%a3,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Check if logical address 32MB aligned,
+	 * so we can try to map it once
+	 */
+	movel	%a3,%d0
+	andl	#(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1)&(-ROOT_TABLE_SIZE),%d0
+	jne	1f
+
+	/* Is there enough to map for 32MB at once
+	 */
+	lea	%a3@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE-1),%a1
+	cmpl	%a1,%a4
+	jcs	1f
+
+	addql	#1,%a1
+
+	/* The root table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	dputs	"early term1"
+	dputn	%a2
+	dputn	%a3
+	dputn	%a1
+	dputc	'\n'
+	movel	%a2,%a0@
+
+	movel	%a1,%a3
+	lea	%a2@(PTR_TABLE_SIZE*PAGE_TABLE_SIZE*PAGESIZE),%a2
+	jra	L(mmu_mapnext_030)
+1:
+	/* Calculate the offset into the pointer table
+	 */
+	movel	%a3,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* The pointer table entry must not no be busy
+	 */
+	tstl	%a0@
+	jne	L(mmu_map_error)
+
+	/* Do the mapping and advance the pointers
+	 */
+	dputs	"early term2"
+	dputn	%a2
+	dputn	%a3
+	dputc	'\n'
+	movel	%a2,%a0@
+
+	addl	#PAGE_TABLE_SIZE*PAGESIZE,%a2
+	addl	#PAGE_TABLE_SIZE*PAGESIZE,%a3
+
+L(mmu_mapnext_030):
+	/* Ready with mapping?
+	 */
+	lea	%a3@(-1),%a0
+	cmpl	%a0,%a4
+	jhi	L(mmu_map_030)
+	jra	L(mmu_map_done)
+
+L(mmu_map_error):
+
+	dputs	"mmu_map error:"
+	dputn	%a2
+	dputn	%a3
+	dputc	'\n'
+
+L(mmu_map_done):
+
+func_return	mmu_map
+
+/*
+ *	mmu_fixup
+ *
+ *	On the 040 class machines, all pages that are used for the
+ *	mmu have to be fixed up.
+ */
+
+func_start	mmu_fixup_page_mmu_cache,%d0/%a0
+
+	dputs	"mmu_fixup_page_mmu_cache"
+	dputn	ARG1
+
+	/* Calculate the offset into the root table
+	 */
+	movel	ARG1,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	ARG1,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	mmu_get_ptr_table_entry		%a0,%d0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	ARG1,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	mmu_get_page_table_entry	%a0,%d0
+
+	movel	%a0@,%d0
+	andil	#_CACHEMASK040,%d0
+	orl	%pc@(m68k_pgtable_cachemode),%d0
+	movel	%d0,%a0@
+
+	dputc	'\n'
+
+func_return	mmu_fixup_page_mmu_cache
+
+/*
+ *	mmu_temp_map
+ *
+ *	create a temporary mapping to enable the mmu,
+ *	this we don't need any transparation translation tricks.
+ */
+
+func_start	mmu_temp_map,%d0/%d1/%a0/%a1
+
+	dputs	"mmu_temp_map"
+	dputn	ARG1
+	dputn	ARG2
+	dputc	'\n'
+
+	lea	%pc@(L(temp_mmap_mem)),%a1
+
+	/* Calculate the offset in the root table
+	 */
+	movel	ARG2,%d0
+	moveq	#ROOT_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	mmu_get_root_table_entry	%d0
+
+	/* Check if the table is temporary allocated, so we have to reuse it
+	 */
+	movel	%a0@,%d0
+	cmpl	%pc@(L(memory_start)),%d0
+	jcc	1f
+
+	/* Temporary allocate a ptr table and insert it into the root table
+	 */
+	movel	%a1@,%d0
+	addl	#PTR_TABLE_SIZE*4,%a1@
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+	dputs	" (new)"
+1:
+	dputn	%d0
+	/* Mask the root table entry for the ptr table
+	 */
+	andw	#-ROOT_TABLE_SIZE,%d0
+	movel	%d0,%a0
+
+	/* Calculate the offset into the pointer table
+	 */
+	movel	ARG2,%d0
+	moveq	#PTR_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PTR_TABLE_SIZE-1,%d0
+	lea	%a0@(%d0*4),%a0
+	dputn	%a0
+
+	/* Check if a temporary page table is already allocated
+	 */
+	movel	%a0@,%d0
+	jne	1f
+
+	/* Temporary allocate a page table and insert it into the ptr table
+	 */
+	movel	%a1@,%d0
+	/* The 512 should be PAGE_TABLE_SIZE*4, but that violates the
+	   alignment restriction for pointer tables on the '0[46]0.  */
+	addl	#512,%a1@
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+	dputs	" (new)"
+1:
+	dputn	%d0
+	/* Mask the ptr table entry for the page table
+	 */
+	andw	#-PTR_TABLE_SIZE,%d0
+	movel	%d0,%a0
+
+	/* Calculate the offset into the page table
+	 */
+	movel	ARG2,%d0
+	moveq	#PAGE_INDEX_SHIFT,%d1
+	lsrl	%d1,%d0
+	andl	#PAGE_TABLE_SIZE-1,%d0
+	lea	%a0@(%d0*4),%a0
+	dputn	%a0
+
+	/* Insert the address into the page table
+	 */
+	movel	ARG1,%d0
+	andw	#-PAGESIZE,%d0
+	orw	#_PAGE_PRESENT+_PAGE_ACCESSED+_PAGE_DIRTY,%d0
+	movel	%d0,%a0@
+	dputn	%d0
+
+	dputc	'\n'
+
+func_return	mmu_temp_map
+
+func_start	mmu_engage,%d0-%d2/%a0-%a3
+
+	moveq	#ROOT_TABLE_SIZE-1,%d0
+	/* Temporarily use a different root table.  */
+	lea	%pc@(L(kernel_pgdir_ptr)),%a0
+	movel	%a0@,%a2
+	movel	%pc@(L(memory_start)),%a1
+	movel	%a1,%a0@
+	movel	%a2,%a0
+1:
+	movel	%a0@+,%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(L(temp_mmap_mem)),%a0
+	movel	%a1,%a0@
+
+	movew	#PAGESIZE-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(1b),%a0
+	movel	#1b,%a1
+	/* Skip temp mappings if phys == virt */
+	cmpl	%a0,%a1
+	jeq	1f
+
+	mmu_temp_map	%a0,%a0
+	mmu_temp_map	%a0,%a1
+
+	addw	#PAGESIZE,%a0
+	addw	#PAGESIZE,%a1
+	mmu_temp_map	%a0,%a0
+	mmu_temp_map	%a0,%a1
+1:
+	movel	%pc@(L(memory_start)),%a3
+	movel	%pc@(L(phys_kernel_start)),%d2
+
+	is_not_040_or_060(L(mmu_engage_030))
+
+L(mmu_engage_040):
+	.chip	68040
+	nop
+	cinva	%bc
+	nop
+	pflusha
+	nop
+	movec	%a3,%srp
+	movel	#TC_ENABLE+TC_PAGE4K,%d0
+	movec	%d0,%tc		/* enable the MMU */
+	jmp	1f:l
+1:	nop
+	movec	%a2,%srp
+	nop
+	cinva	%bc
+	nop
+	pflusha
+	.chip	68k
+	jra	L(mmu_engage_cleanup)
+
+L(mmu_engage_030_temp):
+	.space	12
+L(mmu_engage_030):
+	.chip	68030
+	lea	%pc@(L(mmu_engage_030_temp)),%a0
+	movel	#0x80000002,%a0@
+	movel	%a3,%a0@(4)
+	movel	#0x0808,%d0
+	movec	%d0,%cacr
+	pmove	%a0@,%srp
+	pflusha
+	/*
+	 * enable,super root enable,4096 byte pages,7 bit root index,
+	 * 7 bit pointer index, 6 bit page table index.
+	 */
+	movel	#0x82c07760,%a0@(8)
+	pmove	%a0@(8),%tc	/* enable the MMU */
+	jmp	1f:l
+1:	movel	%a2,%a0@(4)
+	movel	#0x0808,%d0
+	movec	%d0,%cacr
+	pmove	%a0@,%srp
+	pflusha
+	.chip	68k
+
+L(mmu_engage_cleanup):
+	subl	#PAGE_OFFSET,%d2
+	subl	%d2,%a2
+	movel	%a2,L(kernel_pgdir_ptr)
+	subl	%d2,%fp
+	subl	%d2,%sp
+	subl	%d2,ARG0
+
+func_return	mmu_engage
+
+func_start	mmu_get_root_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_root_table_entry:"
+	dputn	ARG1
+	dputs	" ="
+#endif
+
+	movel	%pc@(L(kernel_pgdir_ptr)),%a0
+	tstl	%a0
+	jne	2f
+
+	dputs	"\nmmu_init:"
+
+	/* Find the start of free memory, get_bi_record does this for us,
+	 * as the bootinfo structure is located directly behind the kernel
+	 * and and we simply search for the last entry.
+	 */
+	get_bi_record	BI_LAST
+	addw	#PAGESIZE-1,%a0
+	movel	%a0,%d0
+	andw	#-PAGESIZE,%d0
+
+	dputn	%d0
+
+	lea	%pc@(L(memory_start)),%a0
+	movel	%d0,%a0@
+	lea	%pc@(L(kernel_end)),%a0
+	movel	%d0,%a0@
+
+	/* we have to return the first page at _stext since the init code
+	 * in mm/init.c simply expects kernel_pg_dir there, the rest of
+	 * page is used for further ptr tables in get_ptr_table.
+	 */
+	lea	%pc@(_stext),%a0
+	lea	%pc@(L(mmu_cached_pointer_tables)),%a1
+	movel	%a0,%a1@
+	addl	#ROOT_TABLE_SIZE*4,%a1@
+
+	lea	%pc@(L(mmu_num_pointer_tables)),%a1
+	addql	#1,%a1@
+
+	/* clear the page
+	 */
+	movel	%a0,%a1
+	movew	#PAGESIZE/4-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	lea	%pc@(L(kernel_pgdir_ptr)),%a1
+	movel	%a0,%a1@
+
+	dputn	%a0
+	dputc	'\n'
+2:
+	movel	ARG1,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_root_table_entry
+
+
+
+func_start	mmu_get_ptr_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_ptr_table_entry:"
+	dputn	ARG1
+	dputn	ARG2
+	dputs	" ="
+#endif
+
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+	jne	2f
+
+	/* Keep track of the number of pointer tables we use
+	 */
+	dputs	"\nmmu_get_new_ptr_table:"
+	lea	%pc@(L(mmu_num_pointer_tables)),%a0
+	movel	%a0@,%d0
+	addql	#1,%a0@
+
+	/* See if there is a free pointer table in our cache of pointer tables
+	 */
+	lea	%pc@(L(mmu_cached_pointer_tables)),%a1
+	andw	#7,%d0
+	jne	1f
+
+	/* Get a new pointer table page from above the kernel memory
+	 */
+	get_new_page
+	movel	%a0,%a1@
+1:
+	/* There is an unused pointer table in our cache... use it
+	 */
+	movel	%a1@,%d0
+	addl	#PTR_TABLE_SIZE*4,%a1@
+
+	dputn	%d0
+	dputc	'\n'
+
+	/* Insert the new pointer table into the root table
+	 */
+	movel	ARG1,%a0
+	orw	#_PAGE_TABLE+_PAGE_ACCESSED,%d0
+	movel	%d0,%a0@
+2:
+	/* Extract the pointer table entry
+	 */
+	andw	#-PTR_TABLE_SIZE,%d0
+	movel	%d0,%a0
+	movel	ARG2,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_ptr_table_entry
+
+
+func_start	mmu_get_page_table_entry,%d0/%a1
+
+#if 0
+	dputs	"mmu_get_page_table_entry:"
+	dputn	ARG1
+	dputn	ARG2
+	dputs	" ="
+#endif
+
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+	jne	2f
+
+	/* If the page table entry doesn't exist, we allocate a complete new
+	 * page and use it as one continues big page table which can cover
+	 * 4MB of memory, nearly almost all mappings have that alignment.
+	 */
+	get_new_page
+	addw	#_PAGE_TABLE+_PAGE_ACCESSED,%a0
+
+	/* align pointer table entry for a page of page tables
+	 */
+	movel	ARG1,%d0
+	andw	#-(PAGESIZE/PAGE_TABLE_SIZE),%d0
+	movel	%d0,%a1
+
+	/* Insert the page tables into the pointer entries
+	 */
+	moveq	#PAGESIZE/PAGE_TABLE_SIZE/4-1,%d0
+1:
+	movel	%a0,%a1@+
+	lea	%a0@(PAGE_TABLE_SIZE*4),%a0
+	dbra	%d0,1b
+
+	/* Now we can get the initialized pointer table entry
+	 */
+	movel	ARG1,%a0
+	movel	%a0@,%d0
+2:
+	/* Extract the page table entry
+	 */
+	andw	#-PAGE_TABLE_SIZE,%d0
+	movel	%d0,%a0
+	movel	ARG2,%d0
+	lea	%a0@(%d0*4),%a0
+
+#if 0
+	dputn	%a0
+	dputc	'\n'
+#endif
+
+func_return	mmu_get_page_table_entry
+
+/*
+ *	get_new_page
+ *
+ *	Return a new page from the memory start and clear it.
+ */
+func_start	get_new_page,%d0/%a1
+
+	dputs	"\nget_new_page:"
+
+	/* allocate the page and adjust memory_start
+	 */
+	lea	%pc@(L(memory_start)),%a0
+	movel	%a0@,%a1
+	addl	#PAGESIZE,%a0@
+
+	/* clear the new page
+	 */
+	movel	%a1,%a0
+	movew	#PAGESIZE/4-1,%d0
+1:
+	clrl	%a1@+
+	dbra	%d0,1b
+
+	dputn	%a0
+	dputc	'\n'
+
+func_return	get_new_page
+
+
+
+/*
+ * Debug output support
+ * Atarians have a choice between the parallel port, the serial port
+ * from the MFP or a serial port of the SCC
+ */
+
+#ifdef CONFIG_MAC
+/* You may define either or both of these. */
+#define MAC_USE_SCC_A /* Modem port */
+#define MAC_USE_SCC_B /* Printer port */
+
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+/* Initialisation table for SCC with 3.6864 MHz PCLK */
+L(scc_initable_mac):
+	.byte	4,0x44		/* x16, 1 stopbit, no parity */
+	.byte	3,0xc0		/* receiver: 8 bpc */
+	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
+	.byte	10,0		/* NRZ */
+	.byte	11,0x50		/* use baud rate generator */
+	.byte	12,1,13,0	/* 38400 baud */
+	.byte	14,1		/* Baud rate generator enable */
+	.byte	3,0xc1		/* enable receiver */
+	.byte	5,0xea		/* enable transmitter */
+	.byte	-1
+	.even
+#endif
+#endif /* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+/* #define USE_PRINTER */
+/* #define USE_SCC_B */
+/* #define USE_SCC_A */
+#define USE_MFP
+
+#if defined(USE_SCC_A) || defined(USE_SCC_B)
+/* Initialisation table for SCC with 7.9872 MHz PCLK */
+/* PCLK == 8.0539 gives baud == 9680.1 */
+L(scc_initable_atari):
+	.byte	4,0x44		/* x16, 1 stopbit, no parity */
+	.byte	3,0xc0		/* receiver: 8 bpc */
+	.byte	5,0xe2		/* transmitter: 8 bpc, assert dtr/rts */
+	.byte	10,0		/* NRZ */
+	.byte	11,0x50		/* use baud rate generator */
+	.byte	12,24,13,0	/* 9600 baud */
+	.byte	14,2,14,3	/* use master clock for BRG, enable */
+	.byte	3,0xc1		/* enable receiver */
+	.byte	5,0xea		/* enable transmitter */
+	.byte	-1
+	.even
+#endif
+
+#ifdef USE_PRINTER
+
+LPSG_SELECT	= 0xff8800
+LPSG_READ	= 0xff8800
+LPSG_WRITE	= 0xff8802
+LPSG_IO_A	= 14
+LPSG_IO_B	= 15
+LPSG_CONTROL	= 7
+LSTMFP_GPIP	= 0xfffa01
+LSTMFP_DDR	= 0xfffa05
+LSTMFP_IERB	= 0xfffa09
+
+#elif defined(USE_SCC_B)
+
+LSCC_CTRL	= 0xff8c85
+LSCC_DATA	= 0xff8c87
+
+#elif defined(USE_SCC_A)
+
+LSCC_CTRL	= 0xff8c81
+LSCC_DATA	= 0xff8c83
+
+#elif defined(USE_MFP)
+
+LMFP_UCR     = 0xfffa29
+LMFP_TDCDR   = 0xfffa1d
+LMFP_TDDR    = 0xfffa25
+LMFP_TSR     = 0xfffa2d
+LMFP_UDR     = 0xfffa2f
+
+#endif
+#endif	/* CONFIG_ATARI */
+
+/*
+ * Serial port output support.
+ */
+
+/*
+ * Initialize serial port hardware
+ */
+func_start	serial_init,%d0/%d1/%a0/%a1
+	/*
+	 *	Some of the register usage that follows
+	 *	CONFIG_AMIGA
+	 *		a0 = pointer to boot info record
+	 *		d0 = boot info offset
+	 *	CONFIG_ATARI
+	 *		a0 = address of SCC
+	 *		a1 = Liobase address/address of scc_initable_atari
+	 *		d0 = init data for serial port
+	 *	CONFIG_MAC
+	 *		a0 = address of SCC
+	 *		a1 = address of scc_initable_mac
+	 *		d0 = init data for serial port
+	 */
+
+#ifdef CONFIG_AMIGA
+#define SERIAL_DTR	7
+#define SERIAL_CNTRL	CIABBASE+C_PRA
+
+	is_not_amiga(1f)
+	lea	%pc@(L(custom)),%a0
+	movel	#-ZTWOBASE,%a0@
+	bclr	#SERIAL_DTR,SERIAL_CNTRL-ZTWOBASE
+	get_bi_record	BI_AMIGA_SERPER
+	movew	%a0@,CUSTOMBASE+C_SERPER-ZTWOBASE
+|	movew	#61,CUSTOMBASE+C_SERPER-ZTWOBASE
+1:
+#endif
+
+#ifdef CONFIG_ATARI
+	is_not_atari(4f)
+	movel	%pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+	bclr	#0,%a1@(LSTMFP_IERB)
+	bclr	#0,%a1@(LSTMFP_DDR)
+	moveb	#LPSG_CONTROL,%a1@(LPSG_SELECT)
+	moveb	#0xff,%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_B,%a1@(LPSG_SELECT)
+	clrb	%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_A,%a1@(LPSG_SELECT)
+	moveb	%a1@(LPSG_READ),%d0
+	bset	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC_A) || defined(USE_SCC_B)
+	lea	%a1@(LSCC_CTRL),%a0
+	/* Reset SCC register pointer */
+	moveb	%a0@,%d0
+	/* Reset SCC device: write register pointer then register value */
+	moveb	#9,%a0@
+	moveb	#0xc0,%a0@
+	/* Wait for 5 PCLK cycles, which is about 63 CPU cycles */
+	/* 5 / 7.9872 MHz = approx. 0.63 us = 63 / 100 MHz */
+	movel	#32,%d0
+2:
+	subq	#1,%d0
+	jne	2b
+	/* Initialize channel */
+	lea	%pc@(L(scc_initable_atari)),%a1
+2:	moveb	%a1@+,%d0
+	jmi	3f
+	moveb	%d0,%a0@
+	moveb	%a1@+,%a0@
+	jra	2b
+3:	clrb	%a0@
+#elif defined(USE_MFP)
+	bclr	#1,%a1@(LMFP_TSR)
+	moveb   #0x88,%a1@(LMFP_UCR)
+	andb	#0x70,%a1@(LMFP_TDCDR)
+	moveb   #2,%a1@(LMFP_TDDR)
+	orb	#1,%a1@(LMFP_TDCDR)
+	bset	#1,%a1@(LMFP_TSR)
+#endif
+	jra	L(serial_init_done)
+4:
+#endif
+
+#ifdef CONFIG_MAC
+	is_not_mac(L(serial_init_not_mac))
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+#define mac_scc_cha_b_ctrl_offset	0x0
+#define mac_scc_cha_a_ctrl_offset	0x2
+#define mac_scc_cha_b_data_offset	0x4
+#define mac_scc_cha_a_data_offset	0x6
+	movel	%pc@(L(mac_sccbase)),%a0
+	/* Reset SCC register pointer */
+	moveb	%a0@(mac_scc_cha_a_ctrl_offset),%d0
+	/* Reset SCC device: write register pointer then register value */
+	moveb	#9,%a0@(mac_scc_cha_a_ctrl_offset)
+	moveb	#0xc0,%a0@(mac_scc_cha_a_ctrl_offset)
+	/* Wait for 5 PCLK cycles, which is about 68 CPU cycles */
+	/* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */
+	movel	#35,%d0
+5:
+	subq	#1,%d0
+	jne	5b
+#endif
+#ifdef MAC_USE_SCC_A
+	/* Initialize channel A */
+	lea	%pc@(L(scc_initable_mac)),%a1
+5:	moveb	%a1@+,%d0
+	jmi	6f
+	moveb	%d0,%a0@(mac_scc_cha_a_ctrl_offset)
+	moveb	%a1@+,%a0@(mac_scc_cha_a_ctrl_offset)
+	jra	5b
+6:
+#endif	/* MAC_USE_SCC_A */
+#ifdef MAC_USE_SCC_B
+	/* Initialize channel B */
+	lea	%pc@(L(scc_initable_mac)),%a1
+7:	moveb	%a1@+,%d0
+	jmi	8f
+	moveb	%d0,%a0@(mac_scc_cha_b_ctrl_offset)
+	moveb	%a1@+,%a0@(mac_scc_cha_b_ctrl_offset)
+	jra	7b
+8:
+#endif	/* MAC_USE_SCC_B */
+	jra	L(serial_init_done)
+L(serial_init_not_mac):
+#endif	/* CONFIG_MAC */
+
+#ifdef CONFIG_Q40
+	is_not_q40(2f)
+/* debug output goes into SRAM, so we don't do it unless requested
+   - check for '%LX$' signature in SRAM   */
+	lea	%pc@(q40_mem_cptr),%a1
+	move.l	#0xff020010,%a1@  /* must be inited - also used by debug=mem */
+	move.l	#0xff020000,%a1
+	cmp.b	#'%',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'L',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'X',%a1@
+	bne	2f	/*nodbg*/
+	addq.w	#4,%a1
+	cmp.b	#'$',%a1@
+	bne	2f	/*nodbg*/
+	/* signature OK */
+	lea	%pc@(L(q40_do_debug)),%a1
+	tas	%a1@
+/*nodbg: q40_do_debug is 0 by default*/
+2:
+#endif
+
+#ifdef CONFIG_MVME16x
+	is_not_mvme16x(L(serial_init_not_mvme16x))
+	moveb	#0x10,M167_PCSCCMICR
+	moveb	#0x10,M167_PCSCCTICR
+	moveb	#0x10,M167_PCSCCRICR
+	jra	L(serial_init_done)
+L(serial_init_not_mvme16x):
+#endif
+
+#ifdef CONFIG_APOLLO
+/* We count on the PROM initializing SIO1 */
+#endif
+
+#ifdef CONFIG_HP300
+/* We count on the boot loader initialising the UART */
+#endif
+
+L(serial_init_done):
+func_return	serial_init
+
+/*
+ * Output character on serial port.
+ */
+func_start	serial_putc,%d0/%d1/%a0/%a1
+
+	movel	ARG1,%d0
+	cmpib	#'\n',%d0
+	jbne	1f
+
+	/* A little safe recursion is good for the soul */
+	serial_putc	#'\r'
+1:
+
+#ifdef CONFIG_AMIGA
+	is_not_amiga(2f)
+	andw	#0x00ff,%d0
+	oriw	#0x0100,%d0
+	movel	%pc@(L(custom)),%a0
+	movew	%d0,%a0@(CUSTOMBASE+C_SERDAT)
+1:	movew	%a0@(CUSTOMBASE+C_SERDATR),%d0
+	andw	#0x2000,%d0
+	jeq	1b
+	jra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MAC
+	is_not_mac(5f)
+#if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B)
+	movel	%pc@(L(mac_sccbase)),%a1
+#endif
+#ifdef MAC_USE_SCC_A
+3:	btst	#2,%a1@(mac_scc_cha_a_ctrl_offset)
+	jeq	3b
+	moveb	%d0,%a1@(mac_scc_cha_a_data_offset)
+#endif	/* MAC_USE_SCC_A */
+#ifdef MAC_USE_SCC_B
+4:	btst	#2,%a1@(mac_scc_cha_b_ctrl_offset)
+	jeq	4b
+	moveb	%d0,%a1@(mac_scc_cha_b_data_offset)
+#endif	/* MAC_USE_SCC_B */
+	jra	L(serial_putc_done)
+5:
+#endif	/* CONFIG_MAC */
+
+#ifdef CONFIG_ATARI
+	is_not_atari(4f)
+	movel	%pc@(L(iobase)),%a1
+#if defined(USE_PRINTER)
+3:	btst	#0,%a1@(LSTMFP_GPIP)
+	jne	3b
+	moveb	#LPSG_IO_B,%a1@(LPSG_SELECT)
+	moveb	%d0,%a1@(LPSG_WRITE)
+	moveb	#LPSG_IO_A,%a1@(LPSG_SELECT)
+	moveb	%a1@(LPSG_READ),%d0
+	bclr	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+	nop
+	nop
+	bset	#5,%d0
+	moveb	%d0,%a1@(LPSG_WRITE)
+#elif defined(USE_SCC_A) || defined(USE_SCC_B)
+3:	btst	#2,%a1@(LSCC_CTRL)
+	jeq	3b
+	moveb	%d0,%a1@(LSCC_DATA)
+#elif defined(USE_MFP)
+3:	btst	#7,%a1@(LMFP_TSR)
+	jeq	3b
+	moveb	%d0,%a1@(LMFP_UDR)
+#endif
+	jra	L(serial_putc_done)
+4:
+#endif	/* CONFIG_ATARI */
+
+#ifdef CONFIG_MVME147
+	is_not_mvme147(2f)
+1:	btst	#2,M147_SCC_CTRL_A
+	jeq	1b
+	moveb	%d0,M147_SCC_DATA_A
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_MVME16x
+	is_not_mvme16x(2f)
+	/*
+	 * If the loader gave us a board type then we can use that to
+	 * select an appropriate output routine; otherwise we just use
+	 * the Bug code.  If we have to use the Bug that means the Bug
+	 * workspace has to be valid, which means the Bug has to use
+	 * the SRAM, which is non-standard.
+	 */
+	moveml	%d0-%d7/%a2-%a6,%sp@-
+	movel	vme_brdtype,%d1
+	jeq	1f			| No tag - use the Bug
+	cmpi	#VME_TYPE_MVME162,%d1
+	jeq	6f
+	cmpi	#VME_TYPE_MVME172,%d1
+	jne	5f
+	/* 162/172; it's an SCC */
+6:	btst	#2,M162_SCC_CTRL_A
+	nop
+	nop
+	nop
+	jeq	6b
+	moveb	#8,M162_SCC_CTRL_A
+	nop
+	nop
+	nop
+	moveb	%d0,M162_SCC_CTRL_A
+	jra	3f
+5:
+	/* 166/167/177; it's a CD2401 */
+	moveb	#0,M167_CYCAR
+	moveb	M167_CYIER,%d2
+	moveb	#0x02,M167_CYIER
+7:
+	btst	#5,M167_PCSCCTICR
+	jeq	7b
+	moveb	M167_PCTPIACKR,%d1
+	moveb	M167_CYLICR,%d1
+	jeq	8f
+	moveb	#0x08,M167_CYTEOIR
+	jra	7b
+8:
+	moveb	%d0,M167_CYTDR
+	moveb	#0,M167_CYTEOIR
+	moveb	%d2,M167_CYIER
+	jra	3f
+1:
+	moveb	%d0,%sp@-
+	trap	#15
+	.word	0x0020	/* TRAP 0x020 */
+3:
+	moveml	%sp@+,%d0-%d7/%a2-%a6
+	jbra	L(serial_putc_done)
+2:
+#endif /* CONFIG_MVME16x */
+
+#ifdef CONFIG_BVME6000
+	is_not_bvme6000(2f)
+	/*
+	 * The BVME6000 machine has a serial port ...
+	 */
+1:	btst	#2,BVME_SCC_CTRL_A
+	jeq	1b
+	moveb	%d0,BVME_SCC_DATA_A
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_SUN3X
+	is_not_sun3x(2f)
+	movel	%d0,-(%sp)
+	movel	0xFEFE0018,%a1
+	jbsr	(%a1)
+	addq	#4,%sp
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_Q40
+	is_not_q40(2f)
+	tst.l	%pc@(L(q40_do_debug))	/* only debug if requested */
+	beq	2f
+	lea	%pc@(q40_mem_cptr),%a1
+	move.l	%a1@,%a0
+	move.b	%d0,%a0@
+	addq.l	#4,%a0
+	move.l	%a0,%a1@
+	jbra    L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_APOLLO
+	is_not_apollo(2f)
+	movl    %pc@(L(iobase)),%a1
+	moveb	%d0,%a1@(LTHRB0)
+1:      moveb   %a1@(LSRB0),%d0
+	andb	#0x4,%d0
+	beq	1b
+	jbra	L(serial_putc_done)
+2:
+#endif
+
+#ifdef CONFIG_HP300
+	is_not_hp300(3f)
+	movl    %pc@(L(iobase)),%a1
+	addl	%pc@(L(uartbase)),%a1
+	movel	%pc@(L(uart_scode)),%d1	/* Check the scode */
+	jmi	3f			/* Unset? Exit */
+	cmpi	#256,%d1		/* APCI scode? */
+	jeq	2f
+1:      moveb   %a1@(DCALSR),%d1	/* Output to DCA */
+	andb	#0x20,%d1
+	beq	1b
+	moveb	%d0,%a1@(DCADATA)
+	jbra	L(serial_putc_done)
+2:	moveb	%a1@(APCILSR),%d1	/* Output to APCI */
+	andb	#0x20,%d1
+	beq	2b
+	moveb	%d0,%a1@(APCIDATA)
+	jbra	L(serial_putc_done)
+3:
+#endif
+
+L(serial_putc_done):
+func_return	serial_putc
+
+/*
+ * Output a string.
+ */
+func_start	puts,%d0/%a0
+
+	movel	ARG1,%a0
+	jra	2f
+1:
+#ifdef CONSOLE_DEBUG
+	console_putc	%d0
+#endif
+#ifdef SERIAL_DEBUG
+	serial_putc	%d0
+#endif
+2:	moveb	%a0@+,%d0
+	jne	1b
+
+func_return	puts
+
+/*
+ * Output number in hex notation.
+ */
+
+func_start	putn,%d0-%d2
+
+	putc	' '
+
+	movel	ARG1,%d0
+	moveq	#7,%d1
+1:	roll	#4,%d0
+	move	%d0,%d2
+	andb	#0x0f,%d2
+	addb	#'0',%d2
+	cmpb	#'9',%d2
+	jls	2f
+	addb	#'A'-('9'+1),%d2
+2:
+#ifdef CONSOLE_DEBUG
+	console_putc	%d2
+#endif
+#ifdef SERIAL_DEBUG
+	serial_putc	%d2
+#endif
+	dbra	%d1,1b
+
+func_return	putn
+
+#ifdef CONFIG_EARLY_PRINTK
+/*
+ *	This routine takes its parameters on the stack.  It then
+ *	turns around and calls the internal routines.  This routine
+ *	is used by the boot console.
+ *
+ *	The calling parameters are:
+ *		void debug_cons_nputs(const char *str, unsigned length)
+ *
+ *	This routine does NOT understand variable arguments only
+ *	simple strings!
+ */
+ENTRY(debug_cons_nputs)
+	moveml	%d0/%d1/%a0,%sp@-
+	movew	%sr,%sp@-
+	ori	#0x0700,%sr
+	movel	%sp@(18),%a0		/* fetch parameter */
+	movel	%sp@(22),%d1		/* fetch parameter */
+	jra	2f
+1:
+#ifdef CONSOLE_DEBUG
+	console_putc	%d0
+#endif
+#ifdef SERIAL_DEBUG
+	serial_putc	%d0
+#endif
+	subq	#1,%d1
+2:	jeq	3f
+	moveb	%a0@+,%d0
+	jne	1b
+3:
+	movew	%sp@+,%sr
+	moveml	%sp@+,%d0/%d1/%a0
+	rts
+#endif /* CONFIG_EARLY_PRINTK */
+
+#if defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+func_start	set_leds,%d0/%a0
+	movel	ARG1,%d0
+#ifdef CONFIG_HP300
+	is_not_hp300(1f)
+	movel	%pc@(L(iobase)),%a0
+	moveb	%d0,%a0@(0x1ffff)
+	jra	2f
+#endif
+1:
+#ifdef CONFIG_APOLLO
+	movel   %pc@(L(iobase)),%a0
+	lsll    #8,%d0
+	eorw    #0xff00,%d0
+	moveb	%d0,%a0@(LCPUCTRL)
+#endif
+2:
+func_return	set_leds
+#endif
+
+#ifdef CONSOLE_DEBUG
+/*
+ *	For continuity, see the data alignment
+ *	to which this structure is tied.
+ */
+#define Lconsole_struct_cur_column	0
+#define Lconsole_struct_cur_row		4
+#define Lconsole_struct_num_columns	8
+#define Lconsole_struct_num_rows	12
+#define Lconsole_struct_left_edge	16
+
+func_start	console_init,%a0-%a4/%d0-%d7
+	/*
+	 *	Some of the register usage that follows
+	 *		a0 = pointer to boot_info
+	 *		a1 = pointer to screen
+	 *		a2 = pointer to console_globals
+	 *		d3 = pixel width of screen
+	 *		d4 = pixel height of screen
+	 *		(d3,d4) ~= (x,y) of a point just below
+	 *			and to the right of the screen
+	 *			NOT on the screen!
+	 *		d5 = number of bytes per scan line
+	 *		d6 = number of bytes on the entire screen
+	 */
+
+	lea	%pc@(L(console_globals)),%a2
+	movel	%pc@(L(mac_videobase)),%a1
+	movel	%pc@(L(mac_rowbytes)),%d5
+	movel	%pc@(L(mac_dimensions)),%d3	/* -> low byte */
+	movel	%d3,%d4
+	swap	%d4		/* -> high byte */
+	andl	#0xffff,%d3	/* d3 = screen width in pixels */
+	andl	#0xffff,%d4	/* d4 = screen height in pixels */
+
+	movel	%d5,%d6
+|	subl	#20,%d6
+	mulul	%d4,%d6		/* scan line bytes x num scan lines */
+	divul	#8,%d6		/* we'll clear 8 bytes at a time */
+	moveq	#-1,%d0		/* Mac_black */
+	subq	#1,%d6
+
+L(console_clear_loop):
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	dbra	%d6,L(console_clear_loop)
+
+	/* Calculate font size */
+
+#if   defined(FONT_8x8) && defined(CONFIG_FONT_8x8)
+	lea	%pc@(font_vga_8x8),%a0
+#elif defined(FONT_8x16) && defined(CONFIG_FONT_8x16)
+	lea	%pc@(font_vga_8x16),%a0
+#elif defined(FONT_6x11) && defined(CONFIG_FONT_6x11)
+	lea	%pc@(font_vga_6x11),%a0
+#elif defined(CONFIG_FONT_8x8) /* default */
+	lea	%pc@(font_vga_8x8),%a0
+#else /* no compiled-in font */
+	lea	0,%a0
+#endif
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	a1 = address of console_font pointer
+	 */
+	lea	%pc@(L(console_font)),%a1
+	movel	%a0,%a1@	/* store pointer to struct fbcon_font_desc in console_font */
+	tstl	%a0
+	jeq	1f
+	lea	%pc@(L(console_font_data)),%a4
+	movel	%a0@(FONT_DESC_DATA),%d0
+	subl	#L(console_font),%a1
+	addl	%a1,%d0
+	movel	%d0,%a4@
+
+	/*
+	 *	Calculate global maxs
+	 *	Note - we can use either an
+	 *	8 x 16 or 8 x 8 character font
+	 *	6 x 11 also supported
+	 */
+		/* ASSERT: a0 = contents of Lconsole_font */
+	movel	%d3,%d0				/* screen width in pixels */
+	divul	%a0@(FONT_DESC_WIDTH),%d0	/* d0 = max num chars per row */
+
+	movel	%d4,%d1				/* screen height in pixels */
+	divul	%a0@(FONT_DESC_HEIGHT),%d1	/* d1 = max num rows */
+
+	movel	%d0,%a2@(Lconsole_struct_num_columns)
+	movel	%d1,%a2@(Lconsole_struct_num_rows)
+
+	/*
+	 *	Clear the current row and column
+	 */
+	clrl	%a2@(Lconsole_struct_cur_column)
+	clrl	%a2@(Lconsole_struct_cur_row)
+	clrl	%a2@(Lconsole_struct_left_edge)
+
+	/*
+	 * Initialization is complete
+	 */
+1:
+func_return	console_init
+
+#ifdef CONFIG_LOGO
+func_start	console_put_penguin,%a0-%a1/%d0-%d7
+	/*
+	 *	Get 'that_penguin' onto the screen in the upper right corner
+	 *	penguin is 64 x 74 pixels, align against right edge of screen
+	 */
+	lea	%pc@(L(mac_dimensions)),%a0
+	movel	%a0@,%d0
+	andil	#0xffff,%d0
+	subil	#64,%d0		/* snug up against the right edge */
+	clrl	%d1		/* start at the top */
+	movel	#73,%d7
+	lea	%pc@(L(that_penguin)),%a1
+L(console_penguin_row):
+	movel	#31,%d6
+L(console_penguin_pixel_pair):
+	moveb	%a1@,%d2
+	lsrb	#4,%d2
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	moveb	%a1@+,%d2
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	dbra	%d6,L(console_penguin_pixel_pair)
+
+	subil	#64,%d0
+	addq	#1,%d1
+	dbra	%d7,L(console_penguin_row)
+
+func_return	console_put_penguin
+
+/* include penguin bitmap */
+L(that_penguin):
+#include "../mac/mac_penguin.S"
+#endif
+
+	/*
+	 * Calculate source and destination addresses
+	 *	output	a1 = dest
+	 *		a2 = source
+	 */
+
+func_start	console_scroll,%a0-%a4/%d0-%d7
+	lea	%pc@(L(mac_videobase)),%a0
+	movel	%a0@,%a1
+	movel	%a1,%a2
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d5
+	movel	%pc@(L(console_font)),%a0
+	tstl	%a0
+	jeq	1f
+	mulul	%a0@(FONT_DESC_HEIGHT),%d5	/* account for # scan lines per character */
+	addal	%d5,%a2
+
+	/*
+	 * Get dimensions
+	 */
+	lea	%pc@(L(mac_dimensions)),%a0
+	movel	%a0@,%d3
+	movel	%d3,%d4
+	swap	%d4
+	andl	#0xffff,%d3	/* d3 = screen width in pixels */
+	andl	#0xffff,%d4	/* d4 = screen height in pixels */
+
+	/*
+	 * Calculate number of bytes to move
+	 */
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d6
+	movel	%pc@(L(console_font)),%a0
+	subl	%a0@(FONT_DESC_HEIGHT),%d4	/* we're not scrolling the top row! */
+	mulul	%d4,%d6		/* scan line bytes x num scan lines */
+	divul	#32,%d6		/* we'll move 8 longs at a time */
+	subq	#1,%d6
+
+L(console_scroll_loop):
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	movel	%a2@+,%a1@+
+	dbra	%d6,L(console_scroll_loop)
+
+	lea	%pc@(L(mac_rowbytes)),%a0
+	movel	%a0@,%d6
+	movel	%pc@(L(console_font)),%a0
+	mulul	%a0@(FONT_DESC_HEIGHT),%d6	/* scan line bytes x font height */
+	divul	#32,%d6			/* we'll move 8 words at a time */
+	subq	#1,%d6
+
+	moveq	#-1,%d0
+L(console_scroll_clear_loop):
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	movel	%d0,%a1@+
+	dbra	%d6,L(console_scroll_clear_loop)
+
+1:
+func_return	console_scroll
+
+
+func_start	console_putc,%a0/%a1/%d0-%d7
+
+	is_not_mac(L(console_exit))
+	tstl	%pc@(L(console_font))
+	jeq	L(console_exit)
+
+	/* Output character in d7 on console.
+	 */
+	movel	ARG1,%d7
+	cmpib	#'\n',%d7
+	jbne	1f
+
+	/* A little safe recursion is good for the soul */
+	console_putc	#'\r'
+1:
+	lea	%pc@(L(console_globals)),%a0
+
+	cmpib	#10,%d7
+	jne	L(console_not_lf)
+	movel	%a0@(Lconsole_struct_cur_row),%d0
+	addil	#1,%d0
+	movel	%d0,%a0@(Lconsole_struct_cur_row)
+	movel	%a0@(Lconsole_struct_num_rows),%d1
+	cmpl	%d1,%d0
+	jcs	1f
+	subil	#1,%d0
+	movel	%d0,%a0@(Lconsole_struct_cur_row)
+	console_scroll
+1:
+	jra	L(console_exit)
+
+L(console_not_lf):
+	cmpib	#13,%d7
+	jne	L(console_not_cr)
+	clrl	%a0@(Lconsole_struct_cur_column)
+	jra	L(console_exit)
+
+L(console_not_cr):
+	cmpib	#1,%d7
+	jne	L(console_not_home)
+	clrl	%a0@(Lconsole_struct_cur_row)
+	clrl	%a0@(Lconsole_struct_cur_column)
+	jra	L(console_exit)
+
+/*
+ *	At this point we know that the %d7 character is going to be
+ *	rendered on the screen.  Register usage is -
+ *		a0 = pointer to console globals
+ *		a1 = font data
+ *		d0 = cursor column
+ *		d1 = cursor row to draw the character
+ *		d7 = character number
+ */
+L(console_not_home):
+	movel	%a0@(Lconsole_struct_cur_column),%d0
+	addql	#1,%a0@(Lconsole_struct_cur_column)
+	movel	%a0@(Lconsole_struct_num_columns),%d1
+	cmpl	%d1,%d0
+	jcs	1f
+	console_putc	#'\n'	/* recursion is OK! */
+1:
+	movel	%a0@(Lconsole_struct_cur_row),%d1
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	a0 = address of pointer to font data (fbcon_font_desc)
+	 */
+	movel	%pc@(L(console_font)),%a0
+	movel	%pc@(L(console_font_data)),%a1	/* Load fbcon_font_desc.data into a1 */
+	andl	#0x000000ff,%d7
+		/* ASSERT: a0 = contents of Lconsole_font */
+	mulul	%a0@(FONT_DESC_HEIGHT),%d7	/* d7 = index into font data */
+	addl	%d7,%a1			/* a1 = points to char image */
+
+	/*
+	 *	At this point we make a shift in register usage
+	 *	d0 = pixel coordinate, x
+	 *	d1 = pixel coordinate, y
+	 *	d2 = (bit 0) 1/0 for white/black (!) pixel on screen
+	 *	d3 = font scan line data (8 pixels)
+	 *	d6 = count down for the font's pixel width (8)
+	 *	d7 = count down for the font's pixel count in height
+	 */
+		/* ASSERT: a0 = contents of Lconsole_font */
+	mulul	%a0@(FONT_DESC_WIDTH),%d0
+	mulul	%a0@(FONT_DESC_HEIGHT),%d1
+	movel	%a0@(FONT_DESC_HEIGHT),%d7	/* Load fbcon_font_desc.height into d7 */
+	subq	#1,%d7
+L(console_read_char_scanline):
+	moveb	%a1@+,%d3
+
+		/* ASSERT: a0 = contents of Lconsole_font */
+	movel	%a0@(FONT_DESC_WIDTH),%d6	/* Load fbcon_font_desc.width into d6 */
+	subql	#1,%d6
+
+L(console_do_font_scanline):
+	lslb	#1,%d3
+	scsb	%d2		/* convert 1 bit into a byte */
+	console_plot_pixel %d0,%d1,%d2
+	addq	#1,%d0
+	dbra	%d6,L(console_do_font_scanline)
+
+		/* ASSERT: a0 = contents of Lconsole_font */
+	subl	%a0@(FONT_DESC_WIDTH),%d0
+	addq	#1,%d1
+	dbra	%d7,L(console_read_char_scanline)
+
+L(console_exit):
+func_return	console_putc
+
+	/*
+	 *	Input:
+	 *		d0 = x coordinate
+	 *		d1 = y coordinate
+	 *		d2 = (bit 0) 1/0 for white/black (!)
+	 *	All registers are preserved
+	 */
+func_start	console_plot_pixel,%a0-%a1/%d0-%d4
+
+	movel	%pc@(L(mac_videobase)),%a1
+	movel	%pc@(L(mac_videodepth)),%d3
+	movel	ARG1,%d0
+	movel	ARG2,%d1
+	mulul	%pc@(L(mac_rowbytes)),%d1
+	movel	ARG3,%d2
+
+	/*
+	 *	Register usage:
+	 *		d0 = x coord becomes byte offset into frame buffer
+	 *		d1 = y coord
+	 *		d2 = black or white (0/1)
+	 *		d3 = video depth
+	 *		d4 = temp of x (d0) for many bit depths
+	 */
+L(test_1bit):
+	cmpb	#1,%d3
+	jbne	L(test_2bit)
+	movel	%d0,%d4		/* we need the low order 3 bits! */
+	divul	#8,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#7,%d4
+	eorb	#7,%d4		/* reverse the x-coordinate w/ screen-bit # */
+	andb	#1,%d2
+	jbne	L(white_1)
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_1):
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_2bit):
+	cmpb	#2,%d3
+	jbne	L(test_4bit)
+	movel	%d0,%d4		/* we need the low order 2 bits! */
+	divul	#4,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#3,%d4
+	eorb	#3,%d4		/* reverse the x-coordinate w/ screen-bit # */
+	lsll	#1,%d4		/* ! */
+	andb	#1,%d2
+	jbne	L(white_2)
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_2):
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_4bit):
+	cmpb	#4,%d3
+	jbne	L(test_8bit)
+	movel	%d0,%d4		/* we need the low order bit! */
+	divul	#2,%d0
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d4
+	eorb	#1,%d4
+	lsll	#2,%d4		/* ! */
+	andb	#1,%d2
+	jbne	L(white_4)
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	addq	#1,%d4
+	bsetb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_4):
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	addq	#1,%d4
+	bclrb	%d4,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_8bit):
+	cmpb	#8,%d3
+	jbne	L(test_16bit)
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d2
+	jbne	L(white_8)
+	moveb	#0xff,%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_8):
+	clrb	%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(test_16bit):
+	cmpb	#16,%d3
+	jbne	L(console_plot_pixel_exit)
+	addal	%d0,%a1
+	addal	%d0,%a1
+	addal	%d1,%a1
+	andb	#1,%d2
+	jbne	L(white_16)
+	clrw	%a1@
+	jbra	L(console_plot_pixel_exit)
+L(white_16):
+	movew	#0x0fff,%a1@
+	jbra	L(console_plot_pixel_exit)
+
+L(console_plot_pixel_exit):
+func_return	console_plot_pixel
+#endif /* CONSOLE_DEBUG */
+
+
+__INITDATA
+	.align	4
+
+m68k_init_mapped_size:
+	.long	0
+
+#if defined(CONFIG_ATARI) || defined(CONFIG_AMIGA) || \
+    defined(CONFIG_HP300) || defined(CONFIG_APOLLO)
+L(custom):
+L(iobase):
+	.long 0
+#endif
+
+#ifdef CONSOLE_DEBUG
+L(console_globals):
+	.long	0		/* cursor column */
+	.long	0		/* cursor row */
+	.long	0		/* max num columns */
+	.long	0		/* max num rows */
+	.long	0		/* left edge */
+L(console_font):
+	.long	0		/* pointer to console font (struct font_desc) */
+L(console_font_data):
+	.long	0		/* pointer to console font data */
+#endif /* CONSOLE_DEBUG */
+
+#if defined(MMU_PRINT)
+L(mmu_print_data):
+	.long	0		/* valid flag */
+	.long	0		/* start logical */
+	.long	0		/* next logical */
+	.long	0		/* start physical */
+	.long	0		/* next physical */
+#endif /* MMU_PRINT */
+
+L(cputype):
+	.long	0
+L(mmu_cached_pointer_tables):
+	.long	0
+L(mmu_num_pointer_tables):
+	.long	0
+L(phys_kernel_start):
+	.long	0
+L(kernel_end):
+	.long	0
+L(memory_start):
+	.long	0
+L(kernel_pgdir_ptr):
+	.long	0
+L(temp_mmap_mem):
+	.long	0
+
+#if defined (CONFIG_MVME147)
+M147_SCC_CTRL_A = 0xfffe3002
+M147_SCC_DATA_A = 0xfffe3003
+#endif
+
+#if defined (CONFIG_MVME16x)
+M162_SCC_CTRL_A = 0xfff45005
+M167_CYCAR = 0xfff450ee
+M167_CYIER = 0xfff45011
+M167_CYLICR = 0xfff45026
+M167_CYTEOIR = 0xfff45085
+M167_CYTDR = 0xfff450f8
+M167_PCSCCMICR = 0xfff4201d
+M167_PCSCCTICR = 0xfff4201e
+M167_PCSCCRICR = 0xfff4201f
+M167_PCTPIACKR = 0xfff42025
+#endif
+
+#if defined (CONFIG_BVME6000)
+BVME_SCC_CTRL_A	= 0xffb0000b
+BVME_SCC_DATA_A	= 0xffb0000f
+#endif
+
+#if defined(CONFIG_MAC)
+L(mac_videobase):
+	.long	0
+L(mac_videodepth):
+	.long	0
+L(mac_dimensions):
+	.long	0
+L(mac_rowbytes):
+	.long	0
+L(mac_sccbase):
+	.long	0
+#endif /* CONFIG_MAC */
+
+#if defined (CONFIG_APOLLO)
+LSRB0        = 0x10412
+LTHRB0       = 0x10416
+LCPUCTRL     = 0x10100
+#endif
+
+#if defined(CONFIG_HP300)
+DCADATA	     = 0x11
+DCALSR	     = 0x1b
+APCIDATA     = 0x00
+APCILSR      = 0x14
+L(uartbase):
+	.long	0
+L(uart_scode):
+	.long	-1
+#endif
+
+__FINIT
+	.data
+	.align	4
+
+availmem:
+	.long	0
+m68k_pgtable_cachemode:
+	.long	0
+m68k_supervisor_cachemode:
+	.long	0
+#if defined(CONFIG_MVME16x)
+mvme_bdid:
+	.long	0,0,0,0,0,0,0,0
+#endif
+#if defined(CONFIG_Q40)
+q40_mem_cptr:
+	.long	0
+L(q40_do_debug):
+	.long	0
+#endif
diff --git a/arch/m68k/kernel/ints.c b/arch/m68k/kernel/ints.c
new file mode 100644
index 0000000..5b8d66f
--- /dev/null
+++ b/arch/m68k/kernel/ints.c
@@ -0,0 +1,169 @@
+/*
+ * linux/arch/m68k/kernel/ints.c -- Linux/m68k general interrupt handling code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/machdep.h>
+#include <asm/cacheflush.h>
+#include <asm/irq_regs.h>
+
+#ifdef CONFIG_Q40
+#include <asm/q40ints.h>
+#endif
+
+extern u32 auto_irqhandler_fixup[];
+extern u16 user_irqvec_fixup[];
+
+static int m68k_first_user_vec;
+
+static struct irq_chip auto_irq_chip = {
+	.name		= "auto",
+	.irq_startup	= m68k_irq_startup,
+	.irq_shutdown	= m68k_irq_shutdown,
+};
+
+static struct irq_chip user_irq_chip = {
+	.name		= "user",
+	.irq_startup	= m68k_irq_startup,
+	.irq_shutdown	= m68k_irq_shutdown,
+};
+
+/*
+ * void init_IRQ(void)
+ *
+ * Parameters:	None
+ *
+ * Returns:	Nothing
+ *
+ * This function should be called during kernel startup to initialize
+ * the IRQ handling routines.
+ */
+
+void __init init_IRQ(void)
+{
+	int i;
+
+	for (i = IRQ_AUTO_1; i <= IRQ_AUTO_7; i++)
+		irq_set_chip_and_handler(i, &auto_irq_chip, handle_simple_irq);
+
+	mach_init_IRQ();
+}
+
+/**
+ * m68k_setup_auto_interrupt
+ * @handler: called from auto vector interrupts
+ *
+ * setup the handler to be called from auto vector interrupts instead of the
+ * standard do_IRQ(), it will be called with irq numbers in the range
+ * from IRQ_AUTO_1 - IRQ_AUTO_7.
+ */
+void __init m68k_setup_auto_interrupt(void (*handler)(unsigned int, struct pt_regs *))
+{
+	if (handler)
+		*auto_irqhandler_fixup = (u32)handler;
+	flush_icache();
+}
+
+/**
+ * m68k_setup_user_interrupt
+ * @vec: first user vector interrupt to handle
+ * @cnt: number of active user vector interrupts
+ *
+ * setup user vector interrupts, this includes activating the specified range
+ * of interrupts, only then these interrupts can be requested (note: this is
+ * different from auto vector interrupts).
+ */
+void __init m68k_setup_user_interrupt(unsigned int vec, unsigned int cnt)
+{
+	int i;
+
+	BUG_ON(IRQ_USER + cnt > NR_IRQS);
+	m68k_first_user_vec = vec;
+	for (i = 0; i < cnt; i++)
+		irq_set_chip_and_handler(i, &user_irq_chip, handle_simple_irq);
+	*user_irqvec_fixup = vec - IRQ_USER;
+	flush_icache();
+}
+
+/**
+ * m68k_setup_irq_controller
+ * @chip: irq chip which controls specified irq
+ * @handle: flow handler which handles specified irq
+ * @irq: first irq to be managed by the controller
+ * @cnt: number of irqs to be managed by the controller
+ *
+ * Change the controller for the specified range of irq, which will be used to
+ * manage these irq. auto/user irq already have a default controller, which can
+ * be changed as well, but the controller probably should use m68k_irq_startup/
+ * m68k_irq_shutdown.
+ */
+void m68k_setup_irq_controller(struct irq_chip *chip,
+			       irq_flow_handler_t handle, unsigned int irq,
+			       unsigned int cnt)
+{
+	int i;
+
+	for (i = 0; i < cnt; i++) {
+		irq_set_chip(irq + i, chip);
+		if (handle)
+			irq_set_handler(irq + i, handle);
+	}
+}
+
+unsigned int m68k_irq_startup_irq(unsigned int irq)
+{
+	if (irq <= IRQ_AUTO_7)
+		vectors[VEC_SPUR + irq] = auto_inthandler;
+	else
+		vectors[m68k_first_user_vec + irq - IRQ_USER] = user_inthandler;
+	return 0;
+}
+
+unsigned int m68k_irq_startup(struct irq_data *data)
+{
+	return m68k_irq_startup_irq(data->irq);
+}
+
+void m68k_irq_shutdown(struct irq_data *data)
+{
+	unsigned int irq = data->irq;
+
+	if (irq <= IRQ_AUTO_7)
+		vectors[VEC_SPUR + irq] = bad_inthandler;
+	else
+		vectors[m68k_first_user_vec + irq - IRQ_USER] = bad_inthandler;
+}
+
+
+unsigned int irq_canonicalize(unsigned int irq)
+{
+#ifdef CONFIG_Q40
+	if (MACH_IS_Q40 && irq == 11)
+		irq = 10;
+#endif
+	return irq;
+}
+
+EXPORT_SYMBOL(irq_canonicalize);
+
+
+asmlinkage void handle_badint(struct pt_regs *regs)
+{
+	atomic_inc(&irq_err_count);
+	pr_warn("unexpected interrupt from %u\n", regs->vector);
+}
diff --git a/arch/m68k/kernel/irq.c b/arch/m68k/kernel/irq.c
new file mode 100644
index 0000000..9ab4f55
--- /dev/null
+++ b/arch/m68k/kernel/irq.c
@@ -0,0 +1,39 @@
+/*
+ * irq.c
+ *
+ * (C) Copyright 2007, Greg Ungerer <gerg@snapgear.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kernel_stat.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/seq_file.h>
+#include <asm/traps.h>
+
+asmlinkage void do_IRQ(int irq, struct pt_regs *regs)
+{
+	struct pt_regs *oldregs = set_irq_regs(regs);
+
+	irq_enter();
+	generic_handle_irq(irq);
+	irq_exit();
+
+	set_irq_regs(oldregs);
+}
+
+
+/* The number of spurious interrupts */
+atomic_t irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+	return 0;
+}
diff --git a/arch/m68k/kernel/machine_kexec.c b/arch/m68k/kernel/machine_kexec.c
new file mode 100644
index 0000000..206f849
--- /dev/null
+++ b/arch/m68k/kernel/machine_kexec.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * machine_kexec.c - handle transition of Linux booting another kernel
+ */
+#include <linux/compiler.h>
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const size_t relocate_new_kernel_size;
+
+int machine_kexec_prepare(struct kimage *kimage)
+{
+	return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void machine_shutdown(void)
+{
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+}
+
+typedef void (*relocate_kernel_t)(unsigned long ptr,
+				  unsigned long start,
+				  unsigned long cpu_mmu_flags) __noreturn;
+
+void machine_kexec(struct kimage *image)
+{
+	void *reboot_code_buffer;
+	unsigned long cpu_mmu_flags;
+
+	reboot_code_buffer = page_address(image->control_code_page);
+
+	memcpy(reboot_code_buffer, relocate_new_kernel,
+	       relocate_new_kernel_size);
+
+	/*
+	 * we do not want to be bothered.
+	 */
+	local_irq_disable();
+
+	pr_info("Will call new kernel at 0x%08lx. Bye...\n", image->start);
+	__flush_cache_all();
+	cpu_mmu_flags = m68k_cputype | m68k_mmutype << 8;
+	((relocate_kernel_t) reboot_code_buffer)(image->head & PAGE_MASK,
+						 image->start,
+						 cpu_mmu_flags);
+}
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c
new file mode 100644
index 0000000..aaac2da
--- /dev/null
+++ b/arch/m68k/kernel/module.c
@@ -0,0 +1,129 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#if 0
+#define DEBUGP(fmt, ...) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#else
+#define DEBUGP(fmt, ...) no_printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__)
+#endif
+
+#ifdef CONFIG_MODULES
+
+int apply_relocate(Elf32_Shdr *sechdrs,
+		   const char *strtab,
+		   unsigned int symindex,
+		   unsigned int relsec,
+		   struct module *me)
+{
+	unsigned int i;
+	Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	uint32_t *location;
+
+	DEBUGP("Applying relocate section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_68K_32:
+			/* We add the value into the location given */
+			*location += sym->st_value;
+			break;
+		case R_68K_PC32:
+			/* Add the value, subtract its position */
+			*location += sym->st_value - (uint32_t)location;
+			break;
+		default:
+			pr_err("module %s: Unknown relocation: %u\n", me->name,
+			       ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs,
+		       const char *strtab,
+		       unsigned int symindex,
+		       unsigned int relsec,
+		       struct module *me)
+{
+	unsigned int i;
+	Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
+	Elf32_Sym *sym;
+	uint32_t *location;
+
+	DEBUGP("Applying relocate_add section %u to %u\n", relsec,
+	       sechdrs[relsec].sh_info);
+	for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+		/* This is where to make the change */
+		location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+			+ rel[i].r_offset;
+		/* This is the symbol it is referring to.  Note that all
+		   undefined symbols have been resolved.  */
+		sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+			+ ELF32_R_SYM(rel[i].r_info);
+
+		switch (ELF32_R_TYPE(rel[i].r_info)) {
+		case R_68K_32:
+			/* We add the value into the location given */
+			*location = rel[i].r_addend + sym->st_value;
+			break;
+		case R_68K_PC32:
+			/* Add the value, subtract its position */
+			*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
+			break;
+		default:
+			pr_err("module %s: Unknown relocation: %u\n", me->name,
+			       ELF32_R_TYPE(rel[i].r_info));
+			return -ENOEXEC;
+		}
+	}
+	return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+		    const Elf_Shdr *sechdrs,
+		    struct module *mod)
+{
+	module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
+	return 0;
+}
+
+#endif /* CONFIG_MODULES */
+
+void module_fixup(struct module *mod, struct m68k_fixup_info *start,
+		  struct m68k_fixup_info *end)
+{
+#ifdef CONFIG_MMU
+	struct m68k_fixup_info *fixup;
+
+	for (fixup = start; fixup < end; fixup++) {
+		switch (fixup->type) {
+		case m68k_fixup_memoffset:
+			*(u32 *)fixup->addr = m68k_memoffset;
+			break;
+		case m68k_fixup_vnode_shift:
+			*(u16 *)fixup->addr += m68k_virt_to_node_shift;
+			break;
+		}
+	}
+#endif
+}
diff --git a/arch/m68k/kernel/module.lds b/arch/m68k/kernel/module.lds
new file mode 100644
index 0000000..fda94fa
--- /dev/null
+++ b/arch/m68k/kernel/module.lds
@@ -0,0 +1,7 @@
+SECTIONS {
+	.m68k_fixup : {
+		__start_fixup = .;
+		*(.m68k_fixup)
+		__stop_fixup = .;
+	}
+}
diff --git a/arch/m68k/kernel/pcibios.c b/arch/m68k/kernel/pcibios.c
new file mode 100644
index 0000000..8520250
--- /dev/null
+++ b/arch/m68k/kernel/pcibios.c
@@ -0,0 +1,104 @@
+/*
+ * pci.c -- basic PCI support code
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * (C) Copyright 2011, Greg Ungerer <gerg@uclinux.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+
+/*
+ * From arch/i386/kernel/pci-i386.c:
+ *
+ * We need to avoid collisions with `mirrored' VGA ports
+ * and other strange ISA hardware, so we always want the
+ * addresses to be allocated in the 0x000-0x0ff region
+ * modulo 0x400.
+ *
+ * Why? Because some silly external IO cards only decode
+ * the low 10 bits of the IO address. The 0x00-0xff region
+ * is reserved for motherboard devices that decode all 16
+ * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
+ * but we want to try to avoid allocating at 0x2900-0x2bff
+ * which might be mirrored at 0x0100-0x03ff..
+ */
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+	resource_size_t size, resource_size_t align)
+{
+	resource_size_t start = res->start;
+
+	if ((res->flags & IORESOURCE_IO) && (start & 0x300))
+		start = (start + 0x3ff) & ~0x3ff;
+
+	start = (start + align - 1) & ~(align - 1);
+
+	return start;
+}
+
+/*
+ * This is taken from the ARM code for this.
+ */
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+	struct resource *r;
+	u16 cmd, newcmd;
+	int idx;
+
+	pci_read_config_word(dev, PCI_COMMAND, &cmd);
+	newcmd = cmd;
+
+	for (idx = 0; idx < 6; idx++) {
+		/* Only set up the requested stuff */
+		if (!(mask & (1 << idx)))
+			continue;
+
+		r = dev->resource + idx;
+		if (!r->start && r->end) {
+			pr_err("PCI: Device %s not available because of resource collisions\n",
+				pci_name(dev));
+			return -EINVAL;
+		}
+		if (r->flags & IORESOURCE_IO)
+			newcmd |= PCI_COMMAND_IO;
+		if (r->flags & IORESOURCE_MEM)
+			newcmd |= PCI_COMMAND_MEMORY;
+	}
+
+	/*
+	 * Bridges (eg, cardbus bridges) need to be fully enabled
+	 */
+	if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
+		newcmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
+
+
+	if (newcmd != cmd) {
+		pr_info("PCI: enabling device %s (0x%04x -> 0x%04x)\n",
+			pci_name(dev), cmd, newcmd);
+		pci_write_config_word(dev, PCI_COMMAND, newcmd);
+	}
+	return 0;
+}
+
+void pcibios_fixup_bus(struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, 8);
+		pci_write_config_byte(dev, PCI_LATENCY_TIMER, 32);
+	}
+}
+
+char *pcibios_setup(char *str)
+{
+	return str;
+}
+
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
new file mode 100644
index 0000000..4e77a06
--- /dev/null
+++ b/arch/m68k/kernel/process.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/m68k/kernel/process.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ *
+ *  68060 fixes by Jesper Skov
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling..
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/reboot.h>
+#include <linux/init_task.h>
+#include <linux/mqueue.h>
+#include <linux/rcupdate.h>
+
+#include <linux/uaccess.h>
+#include <asm/traps.h>
+#include <asm/machdep.h>
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+#if defined(MACH_ATARI_ONLY)
+	/* block out HSYNC on the atari (falcon) */
+	__asm__("stop #0x2200" : : : "cc");
+#else
+	__asm__("stop #0x2000" : : : "cc");
+#endif
+}
+
+void machine_restart(char * __unused)
+{
+	if (mach_reset)
+		mach_reset();
+	for (;;);
+}
+
+void machine_halt(void)
+{
+	if (mach_halt)
+		mach_halt();
+	for (;;);
+}
+
+void machine_power_off(void)
+{
+	if (mach_power_off)
+		mach_power_off();
+	for (;;);
+}
+
+void (*pm_power_off)(void) = machine_power_off;
+EXPORT_SYMBOL(pm_power_off);
+
+void show_regs(struct pt_regs * regs)
+{
+	pr_info("Format %02x  Vector: %04x  PC: %08lx  Status: %04x    %s\n",
+		regs->format, regs->vector, regs->pc, regs->sr,
+		print_tainted());
+	pr_info("ORIG_D0: %08lx  D0: %08lx  A2: %08lx  A1: %08lx\n",
+		regs->orig_d0, regs->d0, regs->a2, regs->a1);
+	pr_info("A0: %08lx  D5: %08lx  D4: %08lx\n", regs->a0, regs->d5,
+		regs->d4);
+	pr_info("D3: %08lx  D2: %08lx  D1: %08lx\n", regs->d3, regs->d2,
+		regs->d1);
+	if (!(regs->sr & PS_S))
+		pr_info("USP: %08lx\n", rdusp());
+}
+
+void flush_thread(void)
+{
+	current->thread.fs = __USER_DS;
+#ifdef CONFIG_FPU
+	if (!FPU_IS_EMU) {
+		unsigned long zero = 0;
+		asm volatile("frestore %0": :"m" (zero));
+	}
+#endif
+}
+
+/*
+ * Why not generic sys_clone, you ask?  m68k passes all arguments on stack.
+ * And we need all registers saved, which means a bunch of stuff pushed
+ * on top of pt_regs, which means that sys_clone() arguments would be
+ * buried.  We could, of course, copy them, but it's too costly for no
+ * good reason - generic clone() would have to copy them *again* for
+ * do_fork() anyway.  So in this case it's actually better to pass pt_regs *
+ * and extract arguments for do_fork() from there.  Eventually we might
+ * go for calling do_fork() directly from the wrapper, but only after we
+ * are finished with do_fork() prototype conversion.
+ */
+asmlinkage int m68k_clone(struct pt_regs *regs)
+{
+	/* regs will be equal to current_pt_regs() */
+	return do_fork(regs->d1, regs->d2, 0,
+		       (int __user *)regs->d3, (int __user *)regs->d4);
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+		 unsigned long arg, struct task_struct *p)
+{
+	struct fork_frame {
+		struct switch_stack sw;
+		struct pt_regs regs;
+	} *frame;
+
+	frame = (struct fork_frame *) (task_stack_page(p) + THREAD_SIZE) - 1;
+
+	p->thread.ksp = (unsigned long)frame;
+	p->thread.esp0 = (unsigned long)&frame->regs;
+
+	/*
+	 * Must save the current SFC/DFC value, NOT the value when
+	 * the parent was last descheduled - RGH  10-08-96
+	 */
+	p->thread.fs = get_fs().seg;
+
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* kernel thread */
+		memset(frame, 0, sizeof(struct fork_frame));
+		frame->regs.sr = PS_S;
+		frame->sw.a3 = usp; /* function */
+		frame->sw.d7 = arg;
+		frame->sw.retpc = (unsigned long)ret_from_kernel_thread;
+		p->thread.usp = 0;
+		return 0;
+	}
+	memcpy(frame, container_of(current_pt_regs(), struct fork_frame, regs),
+		sizeof(struct fork_frame));
+	frame->regs.d0 = 0;
+	frame->sw.retpc = (unsigned long)ret_from_fork;
+	p->thread.usp = usp ?: rdusp();
+
+	if (clone_flags & CLONE_SETTLS)
+		task_thread_info(p)->tp_value = frame->regs.d5;
+
+#ifdef CONFIG_FPU
+	if (!FPU_IS_EMU) {
+		/* Copy the current fpu state */
+		asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
+
+		if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2]) {
+			if (CPU_IS_COLDFIRE) {
+				asm volatile ("fmovemd %/fp0-%/fp7,%0\n\t"
+					      "fmovel %/fpiar,%1\n\t"
+					      "fmovel %/fpcr,%2\n\t"
+					      "fmovel %/fpsr,%3"
+					      :
+					      : "m" (p->thread.fp[0]),
+						"m" (p->thread.fpcntl[0]),
+						"m" (p->thread.fpcntl[1]),
+						"m" (p->thread.fpcntl[2])
+					      : "memory");
+			} else {
+				asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
+					      "fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
+					      :
+					      : "m" (p->thread.fp[0]),
+						"m" (p->thread.fpcntl[0])
+					      : "memory");
+			}
+		}
+
+		/* Restore the state in case the fpu was busy */
+		asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
+	}
+#endif /* CONFIG_FPU */
+
+	return 0;
+}
+
+/* Fill in the fpu structure for a core dump.  */
+int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
+{
+	if (FPU_IS_EMU) {
+		int i;
+
+		memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
+		memcpy(fpu->fpregs, current->thread.fp, 96);
+		/* Convert internal fpu reg representation
+		 * into long double format
+		 */
+		for (i = 0; i < 24; i += 3)
+			fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
+			                 ((fpu->fpregs[i] & 0x0000ffff) << 16);
+		return 1;
+	}
+
+	if (IS_ENABLED(CONFIG_FPU)) {
+		char fpustate[216];
+
+		/* First dump the fpu context to avoid protocol violation.  */
+		asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
+		if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
+			return 0;
+
+		if (CPU_IS_COLDFIRE) {
+			asm volatile ("fmovel %/fpiar,%0\n\t"
+				      "fmovel %/fpcr,%1\n\t"
+				      "fmovel %/fpsr,%2\n\t"
+				      "fmovemd %/fp0-%/fp7,%3"
+				      :
+				      : "m" (fpu->fpcntl[0]),
+					"m" (fpu->fpcntl[1]),
+					"m" (fpu->fpcntl[2]),
+					"m" (fpu->fpregs[0])
+				      : "memory");
+		} else {
+			asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
+				      :
+				      : "m" (fpu->fpcntl[0])
+				      : "memory");
+			asm volatile ("fmovemx %/fp0-%/fp7,%0"
+				      :
+				      : "m" (fpu->fpregs[0])
+				      : "memory");
+		}
+	}
+
+	return 1;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+unsigned long get_wchan(struct task_struct *p)
+{
+	unsigned long fp, pc;
+	unsigned long stack_page;
+	int count = 0;
+	if (!p || p == current || p->state == TASK_RUNNING)
+		return 0;
+
+	stack_page = (unsigned long)task_stack_page(p);
+	fp = ((struct switch_stack *)p->thread.ksp)->a6;
+	do {
+		if (fp < stack_page+sizeof(struct thread_info) ||
+		    fp >= 8184+stack_page)
+			return 0;
+		pc = ((unsigned long *)fp)[1];
+		if (!in_sched_functions(pc))
+			return pc;
+		fp = *(unsigned long *) fp;
+	} while (count++ < 16);
+	return 0;
+}
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
new file mode 100644
index 0000000..748c63b
--- /dev/null
+++ b/arch/m68k/kernel/ptrace.c
@@ -0,0 +1,305 @@
+/*
+ *  linux/arch/m68k/kernel/ptrace.c
+ *
+ *  Copyright (C) 1994 by Hamish Macdonald
+ *  Taken from linux/kernel/ptrace.c and modified for M680x0.
+ *  linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/signal.h>
+#include <linux/tracehook.h>
+
+#include <linux/uaccess.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/* determines which bits in the SR the user has access to. */
+/* 1 = access 0 = no access */
+#define SR_MASK 0x001f
+
+/* sets the trace bits. */
+#define TRACE_BITS 0xC000
+#define T1_BIT 0x8000
+#define T0_BIT 0x4000
+
+/* Find the stack offset for a register, relative to thread.esp0. */
+#define PT_REG(reg)	((long)&((struct pt_regs *)0)->reg)
+#define SW_REG(reg)	((long)&((struct switch_stack *)0)->reg \
+			 - sizeof(struct switch_stack))
+/* Mapping from PT_xxx to the stack offset at which the register is
+   saved.  Notice that usp has no stack-slot and needs to be treated
+   specially (see get_reg/put_reg below). */
+static const int regoff[] = {
+	[0]	= PT_REG(d1),
+	[1]	= PT_REG(d2),
+	[2]	= PT_REG(d3),
+	[3]	= PT_REG(d4),
+	[4]	= PT_REG(d5),
+	[5]	= SW_REG(d6),
+	[6]	= SW_REG(d7),
+	[7]	= PT_REG(a0),
+	[8]	= PT_REG(a1),
+	[9]	= PT_REG(a2),
+	[10]	= SW_REG(a3),
+	[11]	= SW_REG(a4),
+	[12]	= SW_REG(a5),
+	[13]	= SW_REG(a6),
+	[14]	= PT_REG(d0),
+	[15]	= -1,
+	[16]	= PT_REG(orig_d0),
+	[17]	= PT_REG(sr),
+	[18]	= PT_REG(pc),
+};
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static inline long get_reg(struct task_struct *task, int regno)
+{
+	unsigned long *addr;
+
+	if (regno == PT_USP)
+		addr = &task->thread.usp;
+	else if (regno < ARRAY_SIZE(regoff))
+		addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
+	else
+		return 0;
+	/* Need to take stkadj into account. */
+	if (regno == PT_SR || regno == PT_PC) {
+		long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
+		addr = (unsigned long *) ((unsigned long)addr + stkadj);
+		/* The sr is actually a 16 bit register.  */
+		if (regno == PT_SR)
+			return *(unsigned short *)addr;
+	}
+	return *addr;
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static inline int put_reg(struct task_struct *task, int regno,
+			  unsigned long data)
+{
+	unsigned long *addr;
+
+	if (regno == PT_USP)
+		addr = &task->thread.usp;
+	else if (regno < ARRAY_SIZE(regoff))
+		addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
+	else
+		return -1;
+	/* Need to take stkadj into account. */
+	if (regno == PT_SR || regno == PT_PC) {
+		long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
+		addr = (unsigned long *) ((unsigned long)addr + stkadj);
+		/* The sr is actually a 16 bit register.  */
+		if (regno == PT_SR) {
+			*(unsigned short *)addr = data;
+			return 0;
+		}
+	}
+	*addr = data;
+	return 0;
+}
+
+/*
+ * Make sure the single step bit is not set.
+ */
+static inline void singlestep_disable(struct task_struct *child)
+{
+	unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
+	put_reg(child, PT_SR, tmp);
+	clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ */
+void ptrace_disable(struct task_struct *child)
+{
+	singlestep_disable(child);
+}
+
+void user_enable_single_step(struct task_struct *child)
+{
+	unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
+	put_reg(child, PT_SR, tmp | T1_BIT);
+	set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
+}
+
+#ifdef CONFIG_MMU
+void user_enable_block_step(struct task_struct *child)
+{
+	unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
+	put_reg(child, PT_SR, tmp | T0_BIT);
+}
+#endif
+
+void user_disable_single_step(struct task_struct *child)
+{
+	singlestep_disable(child);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+		 unsigned long addr, unsigned long data)
+{
+	unsigned long tmp;
+	int i, ret = 0;
+	int regno = addr >> 2; /* temporary hack. */
+	unsigned long __user *datap = (unsigned long __user *) data;
+
+	switch (request) {
+	/* read the word at location addr in the USER area. */
+	case PTRACE_PEEKUSR:
+		if (addr & 3)
+			goto out_eio;
+
+		if (regno >= 0 && regno < 19) {
+			tmp = get_reg(child, regno);
+		} else if (regno >= 21 && regno < 49) {
+			tmp = child->thread.fp[regno - 21];
+			/* Convert internal fpu reg representation
+			 * into long double format
+			 */
+			if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
+				tmp = ((tmp & 0xffff0000) << 15) |
+				      ((tmp & 0x0000ffff) << 16);
+#ifndef CONFIG_MMU
+		} else if (regno == 49) {
+			tmp = child->mm->start_code;
+		} else if (regno == 50) {
+			tmp = child->mm->start_data;
+		} else if (regno == 51) {
+			tmp = child->mm->end_code;
+#endif
+		} else
+			goto out_eio;
+		ret = put_user(tmp, datap);
+		break;
+
+	case PTRACE_POKEUSR:
+	/* write the word at location addr in the USER area */
+		if (addr & 3)
+			goto out_eio;
+
+		if (regno == PT_SR) {
+			data &= SR_MASK;
+			data |= get_reg(child, PT_SR) & ~SR_MASK;
+		}
+		if (regno >= 0 && regno < 19) {
+			if (put_reg(child, regno, data))
+				goto out_eio;
+		} else if (regno >= 21 && regno < 48) {
+			/* Convert long double format
+			 * into internal fpu reg representation
+			 */
+			if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
+				data <<= 15;
+				data = (data & 0xffff0000) |
+				       ((data & 0x0000ffff) >> 1);
+			}
+			child->thread.fp[regno - 21] = data;
+		} else
+			goto out_eio;
+		break;
+
+	case PTRACE_GETREGS:	/* Get all gp regs from the child. */
+		for (i = 0; i < 19; i++) {
+			tmp = get_reg(child, i);
+			ret = put_user(tmp, datap);
+			if (ret)
+				break;
+			datap++;
+		}
+		break;
+
+	case PTRACE_SETREGS:	/* Set all gp regs in the child. */
+		for (i = 0; i < 19; i++) {
+			ret = get_user(tmp, datap);
+			if (ret)
+				break;
+			if (i == PT_SR) {
+				tmp &= SR_MASK;
+				tmp |= get_reg(child, PT_SR) & ~SR_MASK;
+			}
+			put_reg(child, i, tmp);
+			datap++;
+		}
+		break;
+
+	case PTRACE_GETFPREGS:	/* Get the child FPU state. */
+		if (copy_to_user(datap, &child->thread.fp,
+				 sizeof(struct user_m68kfp_struct)))
+			ret = -EFAULT;
+		break;
+
+	case PTRACE_SETFPREGS:	/* Set the child FPU state. */
+		if (copy_from_user(&child->thread.fp, datap,
+				   sizeof(struct user_m68kfp_struct)))
+			ret = -EFAULT;
+		break;
+
+	case PTRACE_GET_THREAD_AREA:
+		ret = put_user(task_thread_info(child)->tp_value, datap);
+		break;
+
+	default:
+		ret = ptrace_request(child, request, addr, data);
+		break;
+	}
+
+	return ret;
+out_eio:
+	return -EIO;
+}
+
+asmlinkage void syscall_trace(void)
+{
+	ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+				 ? 0x80 : 0));
+	/*
+	 * this isn't the same as continuing with a signal, but it will do
+	 * for normal use.  strace only continues with a signal if the
+	 * stopping signal is not SIGTRAP.  -brl
+	 */
+	if (current->exit_code) {
+		send_sig(current->exit_code, current, 1);
+		current->exit_code = 0;
+	}
+}
+
+#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
+asmlinkage int syscall_trace_enter(void)
+{
+	int ret = 0;
+
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		ret = tracehook_report_syscall_entry(task_pt_regs(current));
+	return ret;
+}
+
+asmlinkage void syscall_trace_leave(void)
+{
+	if (test_thread_flag(TIF_SYSCALL_TRACE))
+		tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
+#endif /* CONFIG_COLDFIRE */
diff --git a/arch/m68k/kernel/relocate_kernel.S b/arch/m68k/kernel/relocate_kernel.S
new file mode 100644
index 0000000..ab0f1e7
--- /dev/null
+++ b/arch/m68k/kernel/relocate_kernel.S
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+
+
+#define MMU_BASE	8		/* MMU flags base in cpu_mmu_flags */
+
+.text
+
+ENTRY(relocate_new_kernel)
+	movel %sp@(4),%a0		/* a0 = ptr */
+	movel %sp@(8),%a1		/* a1 = start */
+	movel %sp@(12),%d1		/* d1 = cpu_mmu_flags */
+	movew #PAGE_MASK,%d2		/* d2 = PAGE_MASK */
+
+	/* Disable MMU */
+
+	btst #MMU_BASE + MMUB_68851,%d1
+	jeq 3f
+
+1:	/* 68851 or 68030 */
+
+	lea %pc@(.Lcopy),%a4
+2:	addl #0x00000000,%a4		/* virt_to_phys() */
+
+	.section ".m68k_fixup","aw"
+	.long M68K_FIXUP_MEMOFFSET, 2b+2
+	.previous
+
+	.chip 68030
+	pmove %tc,%d0			/* Disable MMU */
+	bclr #7,%d0
+	pmove %d0,%tc
+	jmp %a4@			/* Jump to physical .Lcopy */
+	.chip 68k
+
+3:
+	btst #MMU_BASE + MMUB_68030,%d1
+	jne 1b
+
+	btst #MMU_BASE + MMUB_68040,%d1
+	jeq 6f
+
+4:	/* 68040 or 68060 */
+
+	lea %pc@(.Lcont040),%a4
+5:	addl #0x00000000,%a4		/* virt_to_phys() */
+
+	.section ".m68k_fixup","aw"
+	.long M68K_FIXUP_MEMOFFSET, 5b+2
+	.previous
+
+	movel %a4,%d0
+	andl #0xff000000,%d0
+	orw #0xe020,%d0			/* Map 16 MiB, enable, cacheable */
+	.chip 68040
+	movec %d0,%itt0
+	movec %d0,%dtt0
+	.chip 68k
+	jmp %a4@			/* Jump to physical .Lcont040 */
+
+.Lcont040:
+	moveq #0,%d0
+	.chip 68040
+	movec %d0,%tc			/* Disable MMU */
+	movec %d0,%itt0
+	movec %d0,%itt1
+	movec %d0,%dtt0
+	movec %d0,%dtt1
+	.chip 68k
+	jra .Lcopy
+
+6:
+	btst #MMU_BASE + MMUB_68060,%d1
+	jne 4b
+
+.Lcopy:
+	movel %a0@+,%d0			/* d0 = entry = *ptr */
+	jeq .Lflush
+
+	btst #2,%d0			/* entry & IND_DONE? */
+	jne .Lflush
+
+	btst #1,%d0			/* entry & IND_INDIRECTION? */
+	jeq 1f
+	andw %d2,%d0
+	movel %d0,%a0			/* ptr = entry & PAGE_MASK */
+	jra .Lcopy
+
+1:
+	btst #0,%d0			/* entry & IND_DESTINATION? */
+	jeq 2f
+	andw %d2,%d0
+	movel %d0,%a2			/* a2 = dst = entry & PAGE_MASK */
+	jra .Lcopy
+
+2:
+	btst #3,%d0			/* entry & IND_SOURCE? */
+	jeq .Lcopy
+
+	andw %d2,%d0
+	movel %d0,%a3			/* a3 = src = entry & PAGE_MASK */
+	movew #PAGE_SIZE/32 - 1,%d0	/* d0 = PAGE_SIZE/32 - 1 */
+3:
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	movel %a3@+,%a2@+		/* *dst++ = *src++ */
+	dbf %d0, 3b
+	jra .Lcopy
+
+.Lflush:
+	/* Flush all caches */
+
+	btst #CPUB_68020,%d1
+	jeq 2f
+
+1:	/* 68020 or 68030 */
+	.chip 68030
+	movec %cacr,%d0
+	orw #0x808,%d0
+	movec %d0,%cacr
+	.chip 68k
+	jra .Lreincarnate
+
+2:
+	btst #CPUB_68030,%d1
+	jne 1b
+
+	btst #CPUB_68040,%d1
+	jeq 4f
+
+3:	/* 68040 or 68060 */
+	.chip 68040
+	nop
+	cpusha %bc
+	nop
+	cinva %bc
+	nop
+	.chip 68k
+	jra .Lreincarnate
+
+4:
+	btst #CPUB_68060,%d1
+	jne 3b
+
+.Lreincarnate:
+	jmp %a1@
+
+relocate_new_kernel_end:
+
+ENTRY(relocate_new_kernel_size)
+	.long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c
new file mode 100644
index 0000000..19a9298
--- /dev/null
+++ b/arch/m68k/kernel/setup.c
@@ -0,0 +1,11 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifdef CONFIG_MMU
+#include "setup_mm.c"
+#else
+#include "setup_no.c"
+#endif
+
+#if IS_ENABLED(CONFIG_INPUT_M68K_BEEP)
+void (*mach_beep)(unsigned int, unsigned int);
+EXPORT_SYMBOL(mach_beep);
+#endif
diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c
new file mode 100644
index 0000000..5d3596c
--- /dev/null
+++ b/arch/m68k/kernel/setup_mm.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/m68k/kernel/setup.c
+ *
+ *  Copyright (C) 1995  Hamish Macdonald
+ */
+
+/*
+ * This file handles the architecture-dependent parts of system setup
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/console.h>
+#include <linux/genhd.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/module.h>
+#include <linux/initrd.h>
+
+#include <asm/bootinfo.h>
+#include <asm/byteorder.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#ifdef CONFIG_AMIGA
+#include <asm/amigahw.h>
+#endif
+#ifdef CONFIG_ATARI
+#include <asm/atarihw.h>
+#include <asm/atari_stram.h>
+#endif
+#ifdef CONFIG_SUN3X
+#include <asm/dvma.h>
+#endif
+#include <asm/natfeat.h>
+
+#if !FPSTATESIZE || !NR_IRQS
+#warning No CPU/platform type selected, your kernel will not work!
+#warning Are you building an allnoconfig kernel?
+#endif
+
+unsigned long m68k_machtype;
+EXPORT_SYMBOL(m68k_machtype);
+unsigned long m68k_cputype;
+EXPORT_SYMBOL(m68k_cputype);
+unsigned long m68k_fputype;
+unsigned long m68k_mmutype;
+EXPORT_SYMBOL(m68k_mmutype);
+#ifdef CONFIG_VME
+unsigned long vme_brdtype;
+EXPORT_SYMBOL(vme_brdtype);
+#endif
+
+int m68k_is040or060;
+EXPORT_SYMBOL(m68k_is040or060);
+
+extern unsigned long availmem;
+
+int m68k_num_memory;
+EXPORT_SYMBOL(m68k_num_memory);
+int m68k_realnum_memory;
+EXPORT_SYMBOL(m68k_realnum_memory);
+unsigned long m68k_memoffset;
+struct m68k_mem_info m68k_memory[NUM_MEMINFO];
+EXPORT_SYMBOL(m68k_memory);
+
+static struct m68k_mem_info m68k_ramdisk __initdata;
+
+static char m68k_command_line[CL_SIZE] __initdata;
+
+void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
+/* machine dependent irq functions */
+void (*mach_init_IRQ) (void) __initdata = NULL;
+void (*mach_get_model) (char *model);
+void (*mach_get_hardware_list) (struct seq_file *m);
+/* machine dependent timer functions */
+int (*mach_hwclk) (int, struct rtc_time*);
+EXPORT_SYMBOL(mach_hwclk);
+unsigned int (*mach_get_ss)(void);
+int (*mach_get_rtc_pll)(struct rtc_pll_info *);
+int (*mach_set_rtc_pll)(struct rtc_pll_info *);
+EXPORT_SYMBOL(mach_get_ss);
+EXPORT_SYMBOL(mach_get_rtc_pll);
+EXPORT_SYMBOL(mach_set_rtc_pll);
+void (*mach_reset)( void );
+void (*mach_halt)( void );
+void (*mach_power_off)( void );
+long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
+#ifdef CONFIG_HEARTBEAT
+void (*mach_heartbeat) (int);
+EXPORT_SYMBOL(mach_heartbeat);
+#endif
+#ifdef CONFIG_M68K_L2_CACHE
+void (*mach_l2_flush) (int);
+#endif
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+int isa_type;
+int isa_sex;
+EXPORT_SYMBOL(isa_type);
+EXPORT_SYMBOL(isa_sex);
+#endif
+
+extern int amiga_parse_bootinfo(const struct bi_record *);
+extern int atari_parse_bootinfo(const struct bi_record *);
+extern int mac_parse_bootinfo(const struct bi_record *);
+extern int q40_parse_bootinfo(const struct bi_record *);
+extern int bvme6000_parse_bootinfo(const struct bi_record *);
+extern int mvme16x_parse_bootinfo(const struct bi_record *);
+extern int mvme147_parse_bootinfo(const struct bi_record *);
+extern int hp300_parse_bootinfo(const struct bi_record *);
+extern int apollo_parse_bootinfo(const struct bi_record *);
+
+extern void config_amiga(void);
+extern void config_atari(void);
+extern void config_mac(void);
+extern void config_sun3(void);
+extern void config_apollo(void);
+extern void config_mvme147(void);
+extern void config_mvme16x(void);
+extern void config_bvme6000(void);
+extern void config_hp300(void);
+extern void config_q40(void);
+extern void config_sun3x(void);
+
+#define MASK_256K 0xfffc0000
+
+extern void paging_init(void);
+
+static void __init m68k_parse_bootinfo(const struct bi_record *record)
+{
+	uint16_t tag;
+
+	save_bootinfo(record);
+
+	while ((tag = be16_to_cpu(record->tag)) != BI_LAST) {
+		int unknown = 0;
+		const void *data = record->data;
+		uint16_t size = be16_to_cpu(record->size);
+
+		switch (tag) {
+		case BI_MACHTYPE:
+		case BI_CPUTYPE:
+		case BI_FPUTYPE:
+		case BI_MMUTYPE:
+			/* Already set up by head.S */
+			break;
+
+		case BI_MEMCHUNK:
+			if (m68k_num_memory < NUM_MEMINFO) {
+				const struct mem_info *m = data;
+				m68k_memory[m68k_num_memory].addr =
+					be32_to_cpu(m->addr);
+				m68k_memory[m68k_num_memory].size =
+					be32_to_cpu(m->size);
+				memblock_add(m68k_memory[m68k_num_memory].addr,
+					     m68k_memory[m68k_num_memory].size);
+				m68k_num_memory++;
+			} else
+				pr_warn("%s: too many memory chunks\n",
+					__func__);
+			break;
+
+		case BI_RAMDISK:
+			{
+				const struct mem_info *m = data;
+				m68k_ramdisk.addr = be32_to_cpu(m->addr);
+				m68k_ramdisk.size = be32_to_cpu(m->size);
+			}
+			break;
+
+		case BI_COMMAND_LINE:
+			strlcpy(m68k_command_line, data,
+				sizeof(m68k_command_line));
+			break;
+
+		default:
+			if (MACH_IS_AMIGA)
+				unknown = amiga_parse_bootinfo(record);
+			else if (MACH_IS_ATARI)
+				unknown = atari_parse_bootinfo(record);
+			else if (MACH_IS_MAC)
+				unknown = mac_parse_bootinfo(record);
+			else if (MACH_IS_Q40)
+				unknown = q40_parse_bootinfo(record);
+			else if (MACH_IS_BVME6000)
+				unknown = bvme6000_parse_bootinfo(record);
+			else if (MACH_IS_MVME16x)
+				unknown = mvme16x_parse_bootinfo(record);
+			else if (MACH_IS_MVME147)
+				unknown = mvme147_parse_bootinfo(record);
+			else if (MACH_IS_HP300)
+				unknown = hp300_parse_bootinfo(record);
+			else if (MACH_IS_APOLLO)
+				unknown = apollo_parse_bootinfo(record);
+			else
+				unknown = 1;
+		}
+		if (unknown)
+			pr_warn("%s: unknown tag 0x%04x ignored\n", __func__,
+				tag);
+		record = (struct bi_record *)((unsigned long)record + size);
+	}
+
+	m68k_realnum_memory = m68k_num_memory;
+#ifdef CONFIG_SINGLE_MEMORY_CHUNK
+	if (m68k_num_memory > 1) {
+		pr_warn("%s: ignoring last %i chunks of physical memory\n",
+			__func__, (m68k_num_memory - 1));
+		m68k_num_memory = 1;
+	}
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+	/* The bootinfo is located right after the kernel */
+	if (!CPU_IS_COLDFIRE)
+		m68k_parse_bootinfo((const struct bi_record *)_end);
+
+	if (CPU_IS_040)
+		m68k_is040or060 = 4;
+	else if (CPU_IS_060)
+		m68k_is040or060 = 6;
+
+	/* FIXME: m68k_fputype is passed in by Penguin booter, which can
+	 * be confused by software FPU emulation. BEWARE.
+	 * We should really do our own FPU check at startup.
+	 * [what do we do with buggy 68LC040s? if we have problems
+	 *  with them, we should add a test to check_bugs() below] */
+#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU_ONLY)
+	/* clear the fpu if we have one */
+	if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060|FPU_COLDFIRE)) {
+		volatile int zero = 0;
+		asm volatile ("frestore %0" : : "m" (zero));
+	}
+#endif
+
+	if (CPU_IS_060) {
+		u32 pcr;
+
+		asm (".chip 68060; movec %%pcr,%0; .chip 68k"
+		     : "=d" (pcr));
+		if (((pcr >> 8) & 0xff) <= 5) {
+			pr_warn("Enabling workaround for errata I14\n");
+			asm (".chip 68060; movec %0,%%pcr; .chip 68k"
+			     : : "d" (pcr | 0x20));
+		}
+	}
+
+	init_mm.start_code = PAGE_OFFSET;
+	init_mm.end_code = (unsigned long)_etext;
+	init_mm.end_data = (unsigned long)_edata;
+	init_mm.brk = (unsigned long)_end;
+
+#if defined(CONFIG_BOOTPARAM)
+	strncpy(m68k_command_line, CONFIG_BOOTPARAM_STRING, CL_SIZE);
+	m68k_command_line[CL_SIZE - 1] = 0;
+#endif /* CONFIG_BOOTPARAM */
+	process_uboot_commandline(&m68k_command_line[0], CL_SIZE);
+	*cmdline_p = m68k_command_line;
+	memcpy(boot_command_line, *cmdline_p, CL_SIZE);
+
+	parse_early_param();
+
+#ifdef CONFIG_DUMMY_CONSOLE
+	conswitchp = &dummy_con;
+#endif
+
+	switch (m68k_machtype) {
+#ifdef CONFIG_AMIGA
+	case MACH_AMIGA:
+		config_amiga();
+		break;
+#endif
+#ifdef CONFIG_ATARI
+	case MACH_ATARI:
+		config_atari();
+		break;
+#endif
+#ifdef CONFIG_MAC
+	case MACH_MAC:
+		config_mac();
+		break;
+#endif
+#ifdef CONFIG_SUN3
+	case MACH_SUN3:
+		config_sun3();
+		break;
+#endif
+#ifdef CONFIG_APOLLO
+	case MACH_APOLLO:
+		config_apollo();
+		break;
+#endif
+#ifdef CONFIG_MVME147
+	case MACH_MVME147:
+		config_mvme147();
+		break;
+#endif
+#ifdef CONFIG_MVME16x
+	case MACH_MVME16x:
+		config_mvme16x();
+		break;
+#endif
+#ifdef CONFIG_BVME6000
+	case MACH_BVME6000:
+		config_bvme6000();
+		break;
+#endif
+#ifdef CONFIG_HP300
+	case MACH_HP300:
+		config_hp300();
+		break;
+#endif
+#ifdef CONFIG_Q40
+	case MACH_Q40:
+		config_q40();
+		break;
+#endif
+#ifdef CONFIG_SUN3X
+	case MACH_SUN3X:
+		config_sun3x();
+		break;
+#endif
+#ifdef CONFIG_COLDFIRE
+	case MACH_M54XX:
+	case MACH_M5441X:
+		cf_bootmem_alloc();
+		cf_mmu_context_init();
+		config_BSP(NULL, 0);
+		break;
+#endif
+	default:
+		panic("No configuration setup");
+	}
+
+	paging_init();
+
+#ifdef CONFIG_NATFEAT
+	nf_init();
+#endif
+
+#ifndef CONFIG_SUN3
+#ifdef CONFIG_BLK_DEV_INITRD
+	if (m68k_ramdisk.size) {
+		memblock_reserve(m68k_ramdisk.addr, m68k_ramdisk.size);
+		initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
+		initrd_end = initrd_start + m68k_ramdisk.size;
+		pr_info("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
+	}
+#endif
+
+#ifdef CONFIG_ATARI
+	if (MACH_IS_ATARI)
+		atari_stram_reserve_pages((void *)availmem);
+#endif
+#ifdef CONFIG_SUN3X
+	if (MACH_IS_SUN3X) {
+		dvma_init();
+	}
+#endif
+
+#endif /* !CONFIG_SUN3 */
+
+/* set ISA defs early as possible */
+#if defined(CONFIG_ISA) && defined(MULTI_ISA)
+	if (MACH_IS_Q40) {
+		isa_type = ISA_TYPE_Q40;
+		isa_sex = 0;
+	}
+#ifdef CONFIG_AMIGA_PCMCIA
+	if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
+		isa_type = ISA_TYPE_AG;
+		isa_sex = 1;
+	}
+#endif
+#ifdef CONFIG_ATARI_ROM_ISA
+	if (MACH_IS_ATARI) {
+		isa_type = ISA_TYPE_ENEC;
+		isa_sex = 0;
+	}
+#endif
+#endif
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	const char *cpu, *mmu, *fpu;
+	unsigned long clockfreq, clockfactor;
+
+#define LOOP_CYCLES_68020	(8)
+#define LOOP_CYCLES_68030	(8)
+#define LOOP_CYCLES_68040	(3)
+#define LOOP_CYCLES_68060	(1)
+#define LOOP_CYCLES_COLDFIRE	(2)
+
+	if (CPU_IS_020) {
+		cpu = "68020";
+		clockfactor = LOOP_CYCLES_68020;
+	} else if (CPU_IS_030) {
+		cpu = "68030";
+		clockfactor = LOOP_CYCLES_68030;
+	} else if (CPU_IS_040) {
+		cpu = "68040";
+		clockfactor = LOOP_CYCLES_68040;
+	} else if (CPU_IS_060) {
+		cpu = "68060";
+		clockfactor = LOOP_CYCLES_68060;
+	} else if (CPU_IS_COLDFIRE) {
+		cpu = "ColdFire";
+		clockfactor = LOOP_CYCLES_COLDFIRE;
+	} else {
+		cpu = "680x0";
+		clockfactor = 0;
+	}
+
+#ifdef CONFIG_M68KFPU_EMU_ONLY
+	fpu = "none(soft float)";
+#else
+	if (m68k_fputype & FPU_68881)
+		fpu = "68881";
+	else if (m68k_fputype & FPU_68882)
+		fpu = "68882";
+	else if (m68k_fputype & FPU_68040)
+		fpu = "68040";
+	else if (m68k_fputype & FPU_68060)
+		fpu = "68060";
+	else if (m68k_fputype & FPU_SUNFPA)
+		fpu = "Sun FPA";
+	else if (m68k_fputype & FPU_COLDFIRE)
+		fpu = "ColdFire";
+	else
+		fpu = "none";
+#endif
+
+	if (m68k_mmutype & MMU_68851)
+		mmu = "68851";
+	else if (m68k_mmutype & MMU_68030)
+		mmu = "68030";
+	else if (m68k_mmutype & MMU_68040)
+		mmu = "68040";
+	else if (m68k_mmutype & MMU_68060)
+		mmu = "68060";
+	else if (m68k_mmutype & MMU_SUN3)
+		mmu = "Sun-3";
+	else if (m68k_mmutype & MMU_APOLLO)
+		mmu = "Apollo";
+	else if (m68k_mmutype & MMU_COLDFIRE)
+		mmu = "ColdFire";
+	else
+		mmu = "unknown";
+
+	clockfreq = loops_per_jiffy * HZ * clockfactor;
+
+	seq_printf(m, "CPU:\t\t%s\n"
+		   "MMU:\t\t%s\n"
+		   "FPU:\t\t%s\n"
+		   "Clocking:\t%lu.%1luMHz\n"
+		   "BogoMips:\t%lu.%02lu\n"
+		   "Calibration:\t%lu loops\n",
+		   cpu, mmu, fpu,
+		   clockfreq/1000000,(clockfreq/100000)%10,
+		   loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
+		   loops_per_jiffy);
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < 1 ? (void *)1 : NULL;
+}
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return NULL;
+}
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+const struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_cpuinfo,
+};
+
+#ifdef CONFIG_PROC_HARDWARE
+static int hardware_proc_show(struct seq_file *m, void *v)
+{
+	char model[80];
+	unsigned long mem;
+	int i;
+
+	if (mach_get_model)
+		mach_get_model(model);
+	else
+		strcpy(model, "Unknown m68k");
+
+	seq_printf(m, "Model:\t\t%s\n", model);
+	for (mem = 0, i = 0; i < m68k_num_memory; i++)
+		mem += m68k_memory[i].size;
+	seq_printf(m, "System Memory:\t%ldK\n", mem >> 10);
+
+	if (mach_get_hardware_list)
+		mach_get_hardware_list(m);
+
+	return 0;
+}
+
+static int __init proc_hardware_init(void)
+{
+	proc_create_single("hardware", 0, NULL, hardware_proc_show);
+	return 0;
+}
+module_init(proc_hardware_init);
+#endif
+
+void check_bugs(void)
+{
+#if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU)
+	if (m68k_fputype == 0) {
+		pr_emerg("*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
+			"WHICH IS REQUIRED BY LINUX/M68K ***\n");
+		pr_emerg("Upgrade your hardware or join the FPU "
+			"emulation project\n");
+		panic("no FPU");
+	}
+#endif /* !CONFIG_M68KFPU_EMU */
+}
+
+#ifdef CONFIG_ADB
+static int __init adb_probe_sync_enable (char *str) {
+	extern int __adb_probe_sync;
+	__adb_probe_sync = 1;
+	return 1;
+}
+
+__setup("adb_sync", adb_probe_sync_enable);
+#endif /* CONFIG_ADB */
diff --git a/arch/m68k/kernel/setup_no.c b/arch/m68k/kernel/setup_no.c
new file mode 100644
index 0000000..cfd5475
--- /dev/null
+++ b/arch/m68k/kernel/setup_no.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ *  linux/arch/m68knommu/kernel/setup.c
+ *
+ *  Copyright (C) 1999-2007  Greg Ungerer (gerg@snapgear.com)
+ *  Copyright (C) 1998,1999  D. Jeff Dionne <jeff@uClinux.org>
+ *  Copyleft  ()) 2000       James D. Schettine {james@telos-systems.com}
+ *  Copyright (C) 1998       Kenneth Albanowski <kjahds@kjahds.com>
+ *  Copyright (C) 1995       Hamish Macdonald
+ *  Copyright (C) 2000       Lineo Inc. (www.lineo.com)
+ *  Copyright (C) 2001 	     Lineo, Inc. <www.lineo.com>
+ *
+ *  68VZ328 Fixes/support    Evan Stawnyczy <e@lineo.ca>
+ */
+
+/*
+ * This file handles the architecture-dependent parts of system setup
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/memblock.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/rtc.h>
+
+#include <asm/setup.h>
+#include <asm/bootinfo.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+unsigned long memory_start;
+unsigned long memory_end;
+
+EXPORT_SYMBOL(memory_start);
+EXPORT_SYMBOL(memory_end);
+
+char __initdata command_line[COMMAND_LINE_SIZE];
+
+/* machine dependent timer functions */
+void (*mach_sched_init)(irq_handler_t handler) __initdata = NULL;
+int (*mach_hwclk) (int, struct rtc_time*);
+
+/* machine dependent reboot functions */
+void (*mach_reset)(void);
+void (*mach_halt)(void);
+void (*mach_power_off)(void);
+
+#ifdef CONFIG_M68000
+#if defined(CONFIG_M68328)
+#define CPU_NAME	"MC68328"
+#elif defined(CONFIG_M68EZ328)
+#define CPU_NAME	"MC68EZ328"
+#elif defined(CONFIG_M68VZ328)
+#define CPU_NAME	"MC68VZ328"
+#else
+#define CPU_NAME	"MC68000"
+#endif
+#endif /* CONFIG_M68000 */
+#ifndef CPU_NAME
+#define	CPU_NAME	"UNKNOWN"
+#endif
+
+/*
+ * Different cores have different instruction execution timings.
+ * The old/traditional 68000 cores are basically all the same, at 16.
+ * The ColdFire cores vary a little, their values are defined in their
+ * headers. We default to the standard 68000 value here.
+ */
+#ifndef CPU_INSTR_PER_JIFFY
+#define	CPU_INSTR_PER_JIFFY	16
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+	memory_start = PAGE_ALIGN(_ramstart);
+	memory_end = _ramend;
+
+	init_mm.start_code = (unsigned long) &_stext;
+	init_mm.end_code = (unsigned long) &_etext;
+	init_mm.end_data = (unsigned long) &_edata;
+	init_mm.brk = (unsigned long) 0;
+
+	config_BSP(&command_line[0], sizeof(command_line));
+
+#if defined(CONFIG_BOOTPARAM)
+	strncpy(&command_line[0], CONFIG_BOOTPARAM_STRING, sizeof(command_line));
+	command_line[sizeof(command_line) - 1] = 0;
+#endif /* CONFIG_BOOTPARAM */
+
+	process_uboot_commandline(&command_line[0], sizeof(command_line));
+
+	pr_info("uClinux with CPU " CPU_NAME "\n");
+
+#ifdef CONFIG_UCDIMM
+	pr_info("uCdimm by Lineo, Inc. <www.lineo.com>\n");
+#endif
+#ifdef CONFIG_M68VZ328
+	pr_info("M68VZ328 support by Evan Stawnyczy <e@lineo.ca>\n");
+#endif
+#ifdef CONFIG_COLDFIRE
+	pr_info("COLDFIRE port done by Greg Ungerer, gerg@snapgear.com\n");
+#ifdef CONFIG_M5307
+	pr_info("Modified for M5307 by Dave Miller, dmiller@intellistor.com\n");
+#endif
+#ifdef CONFIG_ELITE
+	pr_info("Modified for M5206eLITE by Rob Scott, rscott@mtrob.fdns.net\n");
+#endif
+#endif
+	pr_info("Flat model support (C) 1998,1999 Kenneth Albanowski, D. Jeff Dionne\n");
+
+#if defined( CONFIG_PILOT ) && defined( CONFIG_M68328 )
+	pr_info("TRG SuperPilot FLASH card support <info@trgnet.com>\n");
+#endif
+#if defined( CONFIG_PILOT ) && defined( CONFIG_M68EZ328 )
+	pr_info("PalmV support by Lineo Inc. <jeff@uclinux.com>\n");
+#endif
+#ifdef CONFIG_DRAGEN2
+	pr_info("DragonEngine II board support by Georges Menie\n");
+#endif
+#ifdef CONFIG_M5235EVB
+	pr_info("Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
+#endif
+
+	pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n",
+		 _stext, _etext, _sdata, _edata, __bss_start, __bss_stop);
+	pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ",
+		 __bss_stop, memory_start, memory_start, memory_end);
+
+	memblock_add(memory_start, memory_end - memory_start);
+
+	/* Keep a copy of command line */
+	*cmdline_p = &command_line[0];
+	memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
+	boot_command_line[COMMAND_LINE_SIZE-1] = 0;
+
+#if defined(CONFIG_FRAMEBUFFER_CONSOLE) && defined(CONFIG_DUMMY_CONSOLE)
+	conswitchp = &dummy_con;
+#endif
+
+	/*
+	 * Give all the memory to the bootmap allocator, tell it to put the
+	 * boot mem_map at the start of memory.
+	 */
+	min_low_pfn = PFN_DOWN(memory_start);
+	max_pfn = max_low_pfn = PFN_DOWN(memory_end);
+
+#if defined(CONFIG_UBOOT) && defined(CONFIG_BLK_DEV_INITRD)
+	if ((initrd_start > 0) && (initrd_start < initrd_end) &&
+			(initrd_end < memory_end))
+		memblock_reserve(initrd_start, initrd_end - initrd_start);
+#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
+
+	/*
+	 * Get kmalloc into gear.
+	 */
+	paging_init();
+}
+
+/*
+ *	Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+	char *cpu, *mmu, *fpu;
+	u_long clockfreq;
+
+	cpu = CPU_NAME;
+	mmu = "none";
+	fpu = "none";
+	clockfreq = (loops_per_jiffy * HZ) * CPU_INSTR_PER_JIFFY;
+
+	seq_printf(m, "CPU:\t\t%s\n"
+		      "MMU:\t\t%s\n"
+		      "FPU:\t\t%s\n"
+		      "Clocking:\t%lu.%1luMHz\n"
+		      "BogoMips:\t%lu.%02lu\n"
+		      "Calibration:\t%lu loops\n",
+		      cpu, mmu, fpu,
+		      clockfreq / 1000000,
+		      (clockfreq / 100000) % 10,
+		      (loops_per_jiffy * HZ) / 500000,
+		      ((loops_per_jiffy * HZ) / 5000) % 100,
+		      (loops_per_jiffy * HZ));
+
+	return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+	return *pos < NR_CPUS ? ((void *) 0x12345678) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	++*pos;
+	return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+	.start	= c_start,
+	.next	= c_next,
+	.stop	= c_stop,
+	.show	= show_cpuinfo,
+};
+
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
new file mode 100644
index 0000000..72850b8
--- /dev/null
+++ b/arch/m68k/kernel/signal.c
@@ -0,0 +1,1139 @@
+/*
+ *  linux/arch/m68k/kernel/signal.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ *
+ * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
+ *
+ * mathemu support by Roman Zippel
+ *  (Note: fpstate in the signal context is completely ignored for the emulator
+ *         and the internal floating point format is put on stack)
+ */
+
+/*
+ * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
+ * Atari :-) Current limitation: Only one sigstack can be active at one time.
+ * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
+ * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
+ * signal handlers!
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/highuid.h>
+#include <linux/personality.h>
+#include <linux/tty.h>
+#include <linux/binfmts.h>
+#include <linux/extable.h>
+#include <linux/tracehook.h>
+
+#include <asm/setup.h>
+#include <linux/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/traps.h>
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_MMU
+
+/*
+ * Handle the slight differences in classic 68k and ColdFire trap frames.
+ */
+#ifdef CONFIG_COLDFIRE
+#define	FORMAT		4
+#define	FMT4SIZE	0
+#else
+#define	FORMAT		0
+#define	FMT4SIZE	sizeof(((struct frame *)0)->un.fmt4)
+#endif
+
+static const int frame_size_change[16] = {
+  [1]	= -1, /* sizeof(((struct frame *)0)->un.fmt1), */
+  [2]	= sizeof(((struct frame *)0)->un.fmt2),
+  [3]	= sizeof(((struct frame *)0)->un.fmt3),
+  [4]	= FMT4SIZE,
+  [5]	= -1, /* sizeof(((struct frame *)0)->un.fmt5), */
+  [6]	= -1, /* sizeof(((struct frame *)0)->un.fmt6), */
+  [7]	= sizeof(((struct frame *)0)->un.fmt7),
+  [8]	= -1, /* sizeof(((struct frame *)0)->un.fmt8), */
+  [9]	= sizeof(((struct frame *)0)->un.fmt9),
+  [10]	= sizeof(((struct frame *)0)->un.fmta),
+  [11]	= sizeof(((struct frame *)0)->un.fmtb),
+  [12]	= -1, /* sizeof(((struct frame *)0)->un.fmtc), */
+  [13]	= -1, /* sizeof(((struct frame *)0)->un.fmtd), */
+  [14]	= -1, /* sizeof(((struct frame *)0)->un.fmte), */
+  [15]	= -1, /* sizeof(((struct frame *)0)->un.fmtf), */
+};
+
+static inline int frame_extra_sizes(int f)
+{
+	return frame_size_change[f];
+}
+
+int fixup_exception(struct pt_regs *regs)
+{
+	const struct exception_table_entry *fixup;
+	struct pt_regs *tregs;
+
+	/* Are we prepared to handle this kernel fault? */
+	fixup = search_exception_tables(regs->pc);
+	if (!fixup)
+		return 0;
+
+	/* Create a new four word stack frame, discarding the old one. */
+	regs->stkadj = frame_extra_sizes(regs->format);
+	tregs =	(struct pt_regs *)((long)regs + regs->stkadj);
+	tregs->vector = regs->vector;
+	tregs->format = FORMAT;
+	tregs->pc = fixup->fixup;
+	tregs->sr = regs->sr;
+
+	return 1;
+}
+
+static inline void push_cache (unsigned long vaddr)
+{
+	/*
+	 * Using the old cache_push_v() was really a big waste.
+	 *
+	 * What we are trying to do is to flush 8 bytes to ram.
+	 * Flushing 2 cache lines of 16 bytes is much cheaper than
+	 * flushing 1 or 2 pages, as previously done in
+	 * cache_push_v().
+	 *                                                     Jes
+	 */
+	if (CPU_IS_040) {
+		unsigned long temp;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "ptestr (%1)\n\t"
+				      "movec %%mmusr,%0\n\t"
+				      ".chip 68k"
+				      : "=r" (temp)
+				      : "a" (vaddr));
+
+		temp &= PAGE_MASK;
+		temp |= vaddr & ~PAGE_MASK;
+
+		__asm__ __volatile__ (".chip 68040\n\t"
+				      "nop\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	}
+	else if (CPU_IS_060) {
+		unsigned long temp;
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "plpar (%0)\n\t"
+				      ".chip 68k"
+				      : "=a" (temp)
+				      : "0" (vaddr));
+		__asm__ __volatile__ (".chip 68060\n\t"
+				      "cpushl %%bc,(%0)\n\t"
+				      ".chip 68k"
+				      : : "a" (temp));
+	} else if (!CPU_IS_COLDFIRE) {
+		/*
+		 * 68030/68020 have no writeback cache;
+		 * still need to clear icache.
+		 * Note that vaddr is guaranteed to be long word aligned.
+		 */
+		unsigned long temp;
+		asm volatile ("movec %%cacr,%0" : "=r" (temp));
+		temp += 4;
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr), "r" (temp));
+		asm volatile ("movec %0,%%caar\n\t"
+			      "movec %1,%%cacr"
+			      : : "r" (vaddr + 4), "r" (temp));
+	} else {
+		/* CPU_IS_COLDFIRE */
+#if defined(CONFIG_CACHE_COPYBACK)
+		flush_cf_dcache(0, DCACHE_MAX_ADDR);
+#endif
+		/* Invalidate instruction cache for the pushed bytes */
+		clear_cf_icache(vaddr, vaddr + 8);
+	}
+}
+
+static inline void adjustformat(struct pt_regs *regs)
+{
+}
+
+static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+}
+
+#else /* CONFIG_MMU */
+
+void ret_from_user_signal(void);
+void ret_from_user_rt_signal(void);
+
+static inline int frame_extra_sizes(int f)
+{
+	/* No frame size adjustments required on non-MMU CPUs */
+	return 0;
+}
+
+static inline void adjustformat(struct pt_regs *regs)
+{
+	/*
+	 * set format byte to make stack appear modulo 4, which it will
+	 * be when doing the rte
+	 */
+	regs->format = 0x4;
+}
+
+static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+	sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
+}
+
+static inline void push_cache(unsigned long vaddr)
+{
+}
+
+#endif /* CONFIG_MMU */
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct sigframe
+{
+	char __user *pretcode;
+	int sig;
+	int code;
+	struct sigcontext __user *psc;
+	char retcode[8];
+	unsigned long extramask[_NSIG_WORDS-1];
+	struct sigcontext sc;
+};
+
+struct rt_sigframe
+{
+	char __user *pretcode;
+	int sig;
+	struct siginfo __user *pinfo;
+	void __user *puc;
+	char retcode[8];
+	struct siginfo info;
+	struct ucontext uc;
+};
+
+#define FPCONTEXT_SIZE	216
+#define uc_fpstate	uc_filler[0]
+#define uc_formatvec	uc_filler[FPCONTEXT_SIZE/4]
+#define uc_extra	uc_filler[FPCONTEXT_SIZE/4+1]
+
+#ifdef CONFIG_FPU
+
+static unsigned char fpu_version;	/* version number of fpu, set by setup_frame */
+
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+	    /* restore registers */
+	    memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
+	    memcpy(current->thread.fp, sc->sc_fpregs, 24);
+	    return 0;
+	}
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+	    /* Verify the frame format.  */
+	    if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		 (sc->sc_fpstate[0] != fpu_version))
+		goto out;
+	    if (CPU_IS_020_OR_030) {
+		if (m68k_fputype & FPU_68881 &&
+		    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
+		    goto out;
+		if (m68k_fputype & FPU_68882 &&
+		    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
+		    goto out;
+	    } else if (CPU_IS_040) {
+		if (!(sc->sc_fpstate[1] == 0x00 ||
+                      sc->sc_fpstate[1] == 0x28 ||
+                      sc->sc_fpstate[1] == 0x60))
+		    goto out;
+	    } else if (CPU_IS_060) {
+		if (!(sc->sc_fpstate[3] == 0x00 ||
+                      sc->sc_fpstate[3] == 0x60 ||
+		      sc->sc_fpstate[3] == 0xe0))
+		    goto out;
+	    } else if (CPU_IS_COLDFIRE) {
+		if (!(sc->sc_fpstate[0] == 0x00 ||
+		      sc->sc_fpstate[0] == 0x05 ||
+		      sc->sc_fpstate[0] == 0xe5))
+		    goto out;
+	    } else
+		goto out;
+
+	    if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
+				  "fmovel %1,%%fpcr\n\t"
+				  "fmovel %2,%%fpsr\n\t"
+				  "fmovel %3,%%fpiar"
+				  : /* no outputs */
+				  : "m" (sc->sc_fpregs[0]),
+				    "m" (sc->sc_fpcntl[0]),
+				    "m" (sc->sc_fpcntl[1]),
+				    "m" (sc->sc_fpcntl[2]));
+	    } else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fmovemx %0,%%fp0-%%fp1\n\t"
+				  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+				  ".chip 68k"
+				  : /* no outputs */
+				  : "m" (*sc->sc_fpregs),
+				    "m" (*sc->sc_fpcntl));
+	    }
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate));
+	}
+	err = 0;
+
+out:
+	return err;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
+	fpregset_t fpregs;
+	int err = 1;
+
+	if (FPU_IS_EMU) {
+		/* restore fpu control register */
+		if (__copy_from_user(current->thread.fpcntl,
+				uc->uc_mcontext.fpregs.f_fpcntl, 12))
+			goto out;
+		/* restore all other fpu register */
+		if (__copy_from_user(current->thread.fp,
+				uc->uc_mcontext.fpregs.f_fpregs, 96))
+			goto out;
+		return 0;
+	}
+
+	if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
+		goto out;
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
+			context_size = fpstate[1];
+		/* Verify the frame format.  */
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
+		     (fpstate[0] != fpu_version))
+			goto out;
+		if (CPU_IS_020_OR_030) {
+			if (m68k_fputype & FPU_68881 &&
+			    !(context_size == 0x18 || context_size == 0xb4))
+				goto out;
+			if (m68k_fputype & FPU_68882 &&
+			    !(context_size == 0x38 || context_size == 0xd4))
+				goto out;
+		} else if (CPU_IS_040) {
+			if (!(context_size == 0x00 ||
+			      context_size == 0x28 ||
+			      context_size == 0x60))
+				goto out;
+		} else if (CPU_IS_060) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x60 ||
+			      fpstate[3] == 0xe0))
+				goto out;
+		} else if (CPU_IS_COLDFIRE) {
+			if (!(fpstate[3] == 0x00 ||
+			      fpstate[3] == 0x05 ||
+			      fpstate[3] == 0xe5))
+				goto out;
+		} else
+			goto out;
+		if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
+				     sizeof(fpregs)))
+			goto out;
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
+					  "fmovel %1,%%fpcr\n\t"
+					  "fmovel %2,%%fpsr\n\t"
+					  "fmovel %3,%%fpiar"
+					  : /* no outputs */
+					  : "m" (fpregs.f_fpregs[0]),
+					    "m" (fpregs.f_fpcntl[0]),
+					    "m" (fpregs.f_fpcntl[1]),
+					    "m" (fpregs.f_fpcntl[2]));
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %0,%%fp0-%%fp7\n\t"
+					  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
+					  ".chip 68k"
+					  : /* no outputs */
+					  : "m" (*fpregs.f_fpregs),
+					    "m" (*fpregs.f_fpcntl));
+		}
+	}
+	if (context_size &&
+	    __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
+			     context_size))
+		goto out;
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("frestore %0" : : "m" (*fpstate));
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "frestore %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate));
+	}
+	err = 0;
+
+out:
+	return err;
+}
+
+/*
+ * Set up a signal frame.
+ */
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+	if (FPU_IS_EMU) {
+		/* save registers */
+		memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
+		memcpy(sc->sc_fpregs, current->thread.fp, 24);
+		return;
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*sc->sc_fpstate) : "memory");
+	}
+
+	if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
+		fpu_version = sc->sc_fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
+				sc->sc_fpstate[0x38] |= 1 << 3;
+		}
+
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (sc->sc_fpregs[0]),
+					    "=m" (sc->sc_fpcntl[0]),
+					    "=m" (sc->sc_fpcntl[1]),
+					    "=m" (sc->sc_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp1,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*sc->sc_fpregs),
+					    "=m" (*sc->sc_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
+	}
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	unsigned char fpstate[FPCONTEXT_SIZE];
+	int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
+	int err = 0;
+
+	if (FPU_IS_EMU) {
+		/* save fpu control register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
+				current->thread.fpcntl, 12);
+		/* save all other fpu register */
+		err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
+				current->thread.fp, 96);
+		return err;
+	}
+
+	if (CPU_IS_COLDFIRE) {
+		__asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
+	} else {
+		__asm__ volatile (".chip 68k/68881\n\t"
+				  "fsave %0\n\t"
+				  ".chip 68k"
+				  : : "m" (*fpstate) : "memory");
+	}
+
+	err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
+	if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
+		fpregset_t fpregs;
+		if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
+			context_size = fpstate[1];
+		fpu_version = fpstate[0];
+		if (CPU_IS_020_OR_030 &&
+		    regs->vector >= (VEC_FPBRUC * 4) &&
+		    regs->vector <= (VEC_FPNAN * 4)) {
+			/* Clear pending exception in 68882 idle frame */
+			if (*(unsigned short *) fpstate == 0x1f38)
+				fpstate[0x38] |= 1 << 3;
+		}
+		if (CPU_IS_COLDFIRE) {
+			__asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
+					  "fmovel %%fpcr,%1\n\t"
+					  "fmovel %%fpsr,%2\n\t"
+					  "fmovel %%fpiar,%3"
+					  : "=m" (fpregs.f_fpregs[0]),
+					    "=m" (fpregs.f_fpcntl[0]),
+					    "=m" (fpregs.f_fpcntl[1]),
+					    "=m" (fpregs.f_fpcntl[2])
+					  : /* no inputs */
+					  : "memory");
+		} else {
+			__asm__ volatile (".chip 68k/68881\n\t"
+					  "fmovemx %%fp0-%%fp7,%0\n\t"
+					  "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
+					  ".chip 68k"
+					  : "=m" (*fpregs.f_fpregs),
+					    "=m" (*fpregs.f_fpcntl)
+					  : /* no inputs */
+					  : "memory");
+		}
+		err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
+				    sizeof(fpregs));
+	}
+	if (context_size)
+		err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
+				    context_size);
+	return err;
+}
+
+#else /* CONFIG_FPU */
+
+/*
+ * For the case with no FPU configured these all do nothing.
+ */
+static inline int restore_fpu_state(struct sigcontext *sc)
+{
+	return 0;
+}
+
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
+{
+	return 0;
+}
+
+static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
+{
+}
+
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	return 0;
+}
+
+#endif /* CONFIG_FPU */
+
+static inline void siginfo_build_tests(void)
+{
+	/*
+	 * This needs to be tested on m68k as it has a lesser
+	 * alignment requirement than x86 and that can cause surprises.
+	 */
+
+	/* This is part of the ABI and can never change in size: */
+	BUILD_BUG_ON(sizeof(siginfo_t) != 128);
+
+	/* Ensure the known fields never change in location */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_code)  != 8);
+
+	/* _kill */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x10);
+
+	/* _timer */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_tid)     != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_value)   != 0x14);
+
+	/* _rt */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)   != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)   != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x14);
+
+	/* _sigchld */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pid)    != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_uid)    != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x14);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_utime)  != 0x18);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_stime)  != 0x1c);
+
+	/* _sigfault */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x0c);
+
+	/* _sigfault._mcerr */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x10);
+
+	/* _sigfault._addr_bnd */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x12);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x16);
+
+	/* _sigfault._addr_pkey */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x12);
+
+	/* _sigpoll */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_band)   != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_fd)     != 0x10);
+
+	/* _sigsys */
+	BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x0c);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_syscall)   != 0x10);
+	BUILD_BUG_ON(offsetof(siginfo_t, si_arch)      != 0x14);
+
+	/* any new si_fields should be added here */
+}
+
+static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
+			       void __user *fp)
+{
+	int fsize = frame_extra_sizes(formatvec >> 12);
+	if (fsize < 0) {
+		/*
+		 * user process trying to return with weird frame format
+		 */
+		pr_debug("user process returning with weird frame format\n");
+		return 1;
+	}
+	if (!fsize) {
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
+	} else {
+		struct switch_stack *sw = (struct switch_stack *)regs - 1;
+		unsigned long buf[fsize / 2]; /* yes, twice as much */
+
+		/* that'll make sure that expansion won't crap over data */
+		if (copy_from_user(buf + fsize / 4, fp, fsize))
+			return 1;
+
+		/* point of no return */
+		regs->format = formatvec >> 12;
+		regs->vector = formatvec & 0xfff;
+#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
+		__asm__ __volatile__ (
+#ifdef CONFIG_COLDFIRE
+			 "   movel %0,%/sp\n\t"
+			 "   bra ret_from_signal\n"
+#else
+			 "   movel %0,%/a0\n\t"
+			 "   subl %1,%/a0\n\t"     /* make room on stack */
+			 "   movel %/a0,%/sp\n\t"  /* set stack pointer */
+			 /* move switch_stack and pt_regs */
+			 "1: movel %0@+,%/a0@+\n\t"
+			 "   dbra %2,1b\n\t"
+			 "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
+			 "   lsrl  #2,%1\n\t"
+			 "   subql #1,%1\n\t"
+			 /* copy to the gap we'd made */
+			 "2: movel %4@+,%/a0@+\n\t"
+			 "   dbra %1,2b\n\t"
+			 "   bral ret_from_signal\n"
+#endif
+			 : /* no outputs, it doesn't ever return */
+			 : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
+			   "n" (frame_offset), "a" (buf + fsize/4)
+			 : "a0");
+#undef frame_offset
+	}
+	return 0;
+}
+
+static inline int
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
+{
+	int formatvec;
+	struct sigcontext context;
+	int err = 0;
+
+	siginfo_build_tests();
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	/* get previous context */
+	if (copy_from_user(&context, usc, sizeof(context)))
+		goto badframe;
+
+	/* restore passed registers */
+	regs->d0 = context.sc_d0;
+	regs->d1 = context.sc_d1;
+	regs->a0 = context.sc_a0;
+	regs->a1 = context.sc_a1;
+	regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
+	regs->pc = context.sc_pc;
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	wrusp(context.sc_usp);
+	formatvec = context.sc_formatvec;
+
+	err = restore_fpu_state(&context);
+
+	if (err || mangle_kernel_stack(regs, formatvec, fp))
+		goto badframe;
+
+	return 0;
+
+badframe:
+	return 1;
+}
+
+static inline int
+rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
+		    struct ucontext __user *uc)
+{
+	int temp;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
+	unsigned long usp;
+	int err;
+
+	/* Always make any pending restarted system calls return -EINTR */
+	current->restart_block.fn = do_no_restart_syscall;
+
+	err = __get_user(temp, &uc->uc_mcontext.version);
+	if (temp != MCONTEXT_VERSION)
+		goto badframe;
+	/* restore passed registers */
+	err |= __get_user(regs->d0, &gregs[0]);
+	err |= __get_user(regs->d1, &gregs[1]);
+	err |= __get_user(regs->d2, &gregs[2]);
+	err |= __get_user(regs->d3, &gregs[3]);
+	err |= __get_user(regs->d4, &gregs[4]);
+	err |= __get_user(regs->d5, &gregs[5]);
+	err |= __get_user(sw->d6, &gregs[6]);
+	err |= __get_user(sw->d7, &gregs[7]);
+	err |= __get_user(regs->a0, &gregs[8]);
+	err |= __get_user(regs->a1, &gregs[9]);
+	err |= __get_user(regs->a2, &gregs[10]);
+	err |= __get_user(sw->a3, &gregs[11]);
+	err |= __get_user(sw->a4, &gregs[12]);
+	err |= __get_user(sw->a5, &gregs[13]);
+	err |= __get_user(sw->a6, &gregs[14]);
+	err |= __get_user(usp, &gregs[15]);
+	wrusp(usp);
+	err |= __get_user(regs->pc, &gregs[16]);
+	err |= __get_user(temp, &gregs[17]);
+	regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
+	regs->orig_d0 = -1;		/* disable syscall checks */
+	err |= __get_user(temp, &uc->uc_formatvec);
+
+	err |= rt_restore_fpu_state(uc);
+	err |= restore_altstack(&uc->uc_stack);
+
+	if (err)
+		goto badframe;
+
+	if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
+		goto badframe;
+
+	return 0;
+
+badframe:
+	return 1;
+}
+
+asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+{
+	unsigned long usp = rdusp();
+	struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
+	    (_NSIG_WORDS > 1 &&
+	     __copy_from_user(&set.sig[1], &frame->extramask,
+			      sizeof(frame->extramask))))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (restore_sigcontext(regs, &frame->sc, frame + 1))
+		goto badframe;
+	return regs->d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
+{
+	unsigned long usp = rdusp();
+	struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
+	sigset_t set;
+
+	if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+		goto badframe;
+	if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+		goto badframe;
+
+	set_current_blocked(&set);
+
+	if (rt_restore_ucontext(regs, sw, &frame->uc))
+		goto badframe;
+	return regs->d0;
+
+badframe:
+	force_sig(SIGSEGV, current);
+	return 0;
+}
+
+static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
+			     unsigned long mask)
+{
+	sc->sc_mask = mask;
+	sc->sc_usp = rdusp();
+	sc->sc_d0 = regs->d0;
+	sc->sc_d1 = regs->d1;
+	sc->sc_a0 = regs->a0;
+	sc->sc_a1 = regs->a1;
+	sc->sc_sr = regs->sr;
+	sc->sc_pc = regs->pc;
+	sc->sc_formatvec = regs->format << 12 | regs->vector;
+	save_a5_state(sc, regs);
+	save_fpu_state(sc, regs);
+}
+
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
+{
+	struct switch_stack *sw = (struct switch_stack *)regs - 1;
+	greg_t __user *gregs = uc->uc_mcontext.gregs;
+	int err = 0;
+
+	err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+	err |= __put_user(regs->d0, &gregs[0]);
+	err |= __put_user(regs->d1, &gregs[1]);
+	err |= __put_user(regs->d2, &gregs[2]);
+	err |= __put_user(regs->d3, &gregs[3]);
+	err |= __put_user(regs->d4, &gregs[4]);
+	err |= __put_user(regs->d5, &gregs[5]);
+	err |= __put_user(sw->d6, &gregs[6]);
+	err |= __put_user(sw->d7, &gregs[7]);
+	err |= __put_user(regs->a0, &gregs[8]);
+	err |= __put_user(regs->a1, &gregs[9]);
+	err |= __put_user(regs->a2, &gregs[10]);
+	err |= __put_user(sw->a3, &gregs[11]);
+	err |= __put_user(sw->a4, &gregs[12]);
+	err |= __put_user(sw->a5, &gregs[13]);
+	err |= __put_user(sw->a6, &gregs[14]);
+	err |= __put_user(rdusp(), &gregs[15]);
+	err |= __put_user(regs->pc, &gregs[16]);
+	err |= __put_user(regs->sr, &gregs[17]);
+	err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
+	err |= rt_save_fpu_state(uc, regs);
+	return err;
+}
+
+static inline void __user *
+get_sigframe(struct ksignal *ksig, size_t frame_size)
+{
+	unsigned long usp = sigsp(rdusp(), ksig);
+
+	return (void __user *)((usp - frame_size) & -8UL);
+}
+
+static int setup_frame(struct ksignal *ksig, sigset_t *set,
+			struct pt_regs *regs)
+{
+	struct sigframe __user *frame;
+	int fsize = frame_extra_sizes(regs->format);
+	struct sigcontext context;
+	int err = 0, sig = ksig->sig;
+
+	if (fsize < 0) {
+		pr_debug("setup_frame: Unknown frame format %#x\n",
+			 regs->format);
+		return -EFAULT;
+	}
+
+	frame = get_sigframe(ksig, sizeof(*frame) + fsize);
+
+	if (fsize)
+		err |= copy_to_user (frame + 1, regs + 1, fsize);
+
+	err |= __put_user(sig, &frame->sig);
+
+	err |= __put_user(regs->vector, &frame->code);
+	err |= __put_user(&frame->sc, &frame->psc);
+
+	if (_NSIG_WORDS > 1)
+		err |= copy_to_user(frame->extramask, &set->sig[1],
+				    sizeof(frame->extramask));
+
+	setup_sigcontext(&context, regs, set->sig[0]);
+	err |= copy_to_user (&frame->sc, &context, sizeof(context));
+
+	/* Set up to return from userspace.  */
+#ifdef CONFIG_MMU
+	err |= __put_user(frame->retcode, &frame->pretcode);
+	/* moveq #,d0; trap #0 */
+	err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
+			  (long __user *)(frame->retcode));
+#else
+	err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
+#endif
+
+	if (err)
+		return -EFAULT;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+	adjustformat(regs);
+
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return 0;
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+			   struct pt_regs *regs)
+{
+	struct rt_sigframe __user *frame;
+	int fsize = frame_extra_sizes(regs->format);
+	int err = 0, sig = ksig->sig;
+
+	if (fsize < 0) {
+		pr_debug("setup_frame: Unknown frame format %#x\n",
+			 regs->format);
+		return -EFAULT;
+	}
+
+	frame = get_sigframe(ksig, sizeof(*frame));
+
+	if (fsize)
+		err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
+
+	err |= __put_user(sig, &frame->sig);
+	err |= __put_user(&frame->info, &frame->pinfo);
+	err |= __put_user(&frame->uc, &frame->puc);
+	err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+	/* Create the ucontext.  */
+	err |= __put_user(0, &frame->uc.uc_flags);
+	err |= __put_user(NULL, &frame->uc.uc_link);
+	err |= __save_altstack(&frame->uc.uc_stack, rdusp());
+	err |= rt_setup_ucontext(&frame->uc, regs);
+	err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
+
+	/* Set up to return from userspace.  */
+#ifdef CONFIG_MMU
+	err |= __put_user(frame->retcode, &frame->pretcode);
+#ifdef __mcoldfire__
+	/* movel #__NR_rt_sigreturn,d0; trap #0 */
+	err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
+	err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
+			  (long __user *)(frame->retcode + 4));
+#else
+	/* moveq #,d0; notb d0; trap #0 */
+	err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
+			  (long __user *)(frame->retcode + 0));
+	err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
+#endif
+#else
+	err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
+#endif /* CONFIG_MMU */
+
+	if (err)
+		return -EFAULT;
+
+	push_cache ((unsigned long) &frame->retcode);
+
+	/*
+	 * Set up registers for signal handler.  All the state we are about
+	 * to destroy is successfully copied to sigframe.
+	 */
+	wrusp ((unsigned long) frame);
+	regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
+	adjustformat(regs);
+
+	/*
+	 * This is subtle; if we build more than one sigframe, all but the
+	 * first one will see frame format 0 and have fsize == 0, so we won't
+	 * screw stkadj.
+	 */
+	if (fsize)
+		regs->stkadj = fsize;
+
+	/* Prepare to skip over the extra stuff in the exception frame.  */
+	if (regs->stkadj) {
+		struct pt_regs *tregs =
+			(struct pt_regs *)((ulong)regs + regs->stkadj);
+		pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
+		/* This must be copied with decreasing addresses to
+                   handle overlaps.  */
+		tregs->vector = 0;
+		tregs->format = 0;
+		tregs->pc = regs->pc;
+		tregs->sr = regs->sr;
+	}
+	return 0;
+}
+
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+	switch (regs->d0) {
+	case -ERESTARTNOHAND:
+		if (!has_handler)
+			goto do_restart;
+		regs->d0 = -EINTR;
+		break;
+
+	case -ERESTART_RESTARTBLOCK:
+		if (!has_handler) {
+			regs->d0 = __NR_restart_syscall;
+			regs->pc -= 2;
+			break;
+		}
+		regs->d0 = -EINTR;
+		break;
+
+	case -ERESTARTSYS:
+		if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+			regs->d0 = -EINTR;
+			break;
+		}
+	/* fallthrough */
+	case -ERESTARTNOINTR:
+	do_restart:
+		regs->d0 = regs->orig_d0;
+		regs->pc -= 2;
+		break;
+	}
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+	sigset_t *oldset = sigmask_to_save();
+	int err;
+	/* are we from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* If so, check system call restarting.. */
+		handle_restart(regs, &ksig->ka, 1);
+
+	/* set up the stack frame */
+	if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+		err = setup_rt_frame(ksig, oldset, regs);
+	else
+		err = setup_frame(ksig, oldset, regs);
+
+	signal_setup_done(err, ksig, 0);
+
+	if (test_thread_flag(TIF_DELAYED_TRACE)) {
+		regs->sr &= ~0x8000;
+		send_sig(SIGTRAP, current, 1);
+	}
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ */
+static void do_signal(struct pt_regs *regs)
+{
+	struct ksignal ksig;
+
+	current->thread.esp0 = (unsigned long) regs;
+
+	if (get_signal(&ksig)) {
+		/* Whee!  Actually deliver the signal.  */
+		handle_signal(&ksig, regs);
+		return;
+	}
+
+	/* Did we come from a system call? */
+	if (regs->orig_d0 >= 0)
+		/* Restart the system call - no handlers present */
+		handle_restart(regs, NULL, 0);
+
+	/* If there's no signal to deliver, we just restore the saved mask.  */
+	restore_saved_sigmask();
+}
+
+void do_notify_resume(struct pt_regs *regs)
+{
+	if (test_thread_flag(TIF_SIGPENDING))
+		do_signal(regs);
+
+	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
+		tracehook_notify_resume(regs);
+}
diff --git a/arch/m68k/kernel/sun3-head.S b/arch/m68k/kernel/sun3-head.S
new file mode 100644
index 0000000..faf18f4
--- /dev/null
+++ b/arch/m68k/kernel/sun3-head.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+#include <asm/entry.h>
+#include <asm/page.h>
+#include <asm/contregs.h>
+#include <asm/sun3-head.h>
+
+PSL_HIGHIPL     = 0x2700
+NBSG            = 0x20000
+ICACHE_ONLY	= 0x00000009
+CACHES_OFF	= 0x00000008	| actually a clear and disable --m
+#define MAS_STACK INT_STACK
+ROOT_TABLE_SIZE = 128
+PAGESIZE	= 8192
+SUN3_INVALID_PMEG = 255
+.globl bootup_user_stack
+.globl bootup_kernel_stack
+.globl pg0
+.globl swapper_pg_dir
+.globl kernel_pmd_table
+.globl availmem
+.global m68k_pgtable_cachemode
+.global kpt
+| todo: all these should be in bss!
+swapper_pg_dir:                .skip 0x2000
+pg0:                           .skip 0x2000
+kernel_pmd_table:              .skip 0x2000
+
+.globl kernel_pg_dir
+.equ    kernel_pg_dir,kernel_pmd_table
+
+	__HEAD
+ENTRY(_stext)
+ENTRY(_start)
+
+/* Firstly, disable interrupts and set up function codes. */
+	movew	#PSL_HIGHIPL, %sr
+	moveq	#FC_CONTROL, %d0
+	movec	%d0, %sfc
+	movec	%d0, %dfc
+
+/* Make sure we're in context zero. */
+	moveq	#0, %d0
+	movsb	%d0, AC_CONTEXT
+
+/* map everything the bootloader left us into high memory, clean up the
+   excess later */
+	lea	(AC_SEGMAP+0),%a0
+	lea	(AC_SEGMAP+KERNBASE),%a1
+1:
+	movsb	%a0@, %d1
+	movsb	%d1, %a1@
+	cmpib	#SUN3_INVALID_PMEG, %d1
+	beq	2f
+	addl	#NBSG,%a0
+	addl	#NBSG,%a1
+	jmp	1b
+
+2:
+
+/* Disable caches and jump to high code. */
+	moveq	#ICACHE_ONLY,%d0	| Cache disabled until we're ready to enable it
+	movc	%d0, %cacr	|   is this the right value? (yes --m)
+	jmp	1f:l
+
+/* Following code executes at high addresses (0xE000xxx). */
+1:	lea	init_task,%curptr			| get initial thread...
+	lea	init_thread_union+THREAD_SIZE,%sp	| ...and its stack.
+
+/* Point MSP at an invalid page to trap if it's used. --m */
+	movl	#(PAGESIZE),%d0
+	movc	%d0,%msp
+	moveq	#-1,%d0
+	movsb	%d0,(AC_SEGMAP+0x0)
+
+	jbsr	sun3_init
+
+	jbsr	base_trap_init
+
+        jbsr    start_kernel
+	trap	#15
+
+        .data
+        .even
+kpt:
+        .long 0
+availmem:
+        .long 0
+| todo: remove next two. --m
+is_medusa:
+        .long 0
+m68k_pgtable_cachemode:
+        .long 0
+
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
new file mode 100644
index 0000000..6363ec8
--- /dev/null
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -0,0 +1,572 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * linux/arch/m68k/kernel/sys_m68k.c
+ *
+ * This file contains various random system calls that
+ * have a non-standard calling sequence on the Linux/m68k
+ * platform.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/syscalls.h>
+#include <linux/mman.h>
+#include <linux/file.h>
+#include <linux/ipc.h>
+
+#include <asm/setup.h>
+#include <linux/uaccess.h>
+#include <asm/cachectl.h>
+#include <asm/traps.h>
+#include <asm/page.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_MMU
+
+#include <asm/tlb.h>
+
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+			     unsigned long error_code);
+
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+	unsigned long prot, unsigned long flags,
+	unsigned long fd, unsigned long pgoff)
+{
+	/*
+	 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
+	 * so we need to shift the argument down by 1; m68k mmap64(3)
+	 * (in libc) expects the last argument of mmap2 in 4Kb units.
+	 */
+	return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
+}
+
+/* Convert virtual (user) address VADDR to physical address PADDR */
+#define virt_to_phys_040(vaddr)						\
+({									\
+  unsigned long _mmusr, _paddr;						\
+									\
+  __asm__ __volatile__ (".chip 68040\n\t"				\
+			"ptestr (%1)\n\t"				\
+			"movec %%mmusr,%0\n\t"				\
+			".chip 68k"					\
+			: "=r" (_mmusr)					\
+			: "a" (vaddr));					\
+  _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0;		\
+  _paddr;								\
+})
+
+static inline int
+cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+  unsigned long paddr, i;
+
+  switch (scope)
+    {
+    case FLUSH_SCOPE_ALL:
+      switch (cache)
+	{
+	case FLUSH_CACHE_DATA:
+	  /* This nop is needed for some broken versions of the 68040.  */
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %dc\n\t"
+				".chip 68k");
+	  break;
+	case FLUSH_CACHE_INSN:
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %ic\n\t"
+				".chip 68k");
+	  break;
+	default:
+	case FLUSH_CACHE_BOTH:
+	  __asm__ __volatile__ ("nop\n\t"
+				".chip 68040\n\t"
+				"cpusha %bc\n\t"
+				".chip 68k");
+	  break;
+	}
+      break;
+
+    case FLUSH_SCOPE_LINE:
+      /* Find the physical address of the first mapped page in the
+	 address range.  */
+      if ((paddr = virt_to_phys_040(addr))) {
+        paddr += addr & ~(PAGE_MASK | 15);
+        len = (len + (addr & 15) + 15) >> 4;
+      } else {
+	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+	if (len <= tmp)
+	  return 0;
+	addr += tmp;
+	len -= tmp;
+	tmp = PAGE_SIZE;
+	for (;;)
+	  {
+	    if ((paddr = virt_to_phys_040(addr)))
+	      break;
+	    if (len <= tmp)
+	      return 0;
+	    addr += tmp;
+	    len -= tmp;
+	  }
+	len = (len + 15) >> 4;
+      }
+      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+      while (len--)
+	{
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushl %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	  if (!--i && len)
+	    {
+	      /*
+	       * No need to page align here since it is done by
+	       * virt_to_phys_040().
+	       */
+	      addr += PAGE_SIZE;
+	      i = PAGE_SIZE / 16;
+	      /* Recompute physical address when crossing a page
+	         boundary. */
+	      for (;;)
+		{
+		  if ((paddr = virt_to_phys_040(addr)))
+		    break;
+		  if (len <= i)
+		    return 0;
+		  len -= i;
+		  addr += PAGE_SIZE;
+		}
+	    }
+	  else
+	    paddr += 16;
+	}
+      break;
+
+    default:
+    case FLUSH_SCOPE_PAGE:
+      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+	{
+	  if (!(paddr = virt_to_phys_040(addr)))
+	    continue;
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ ("nop\n\t"
+				    ".chip 68040\n\t"
+				    "cpushp %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	}
+      break;
+    }
+  return 0;
+}
+
+#define virt_to_phys_060(vaddr)				\
+({							\
+  unsigned long paddr;					\
+  __asm__ __volatile__ (".chip 68060\n\t"		\
+			"plpar (%0)\n\t"		\
+			".chip 68k"			\
+			: "=a" (paddr)			\
+			: "0" (vaddr));			\
+  (paddr); /* XXX */					\
+})
+
+static inline int
+cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
+{
+  unsigned long paddr, i;
+
+  /*
+   * 68060 manual says:
+   *  cpush %dc : flush DC, remains valid (with our %cacr setup)
+   *  cpush %ic : invalidate IC
+   *  cpush %bc : flush DC + invalidate IC
+   */
+  switch (scope)
+    {
+    case FLUSH_SCOPE_ALL:
+      switch (cache)
+	{
+	case FLUSH_CACHE_DATA:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %dc\n\t"
+				".chip 68k");
+	  break;
+	case FLUSH_CACHE_INSN:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %ic\n\t"
+				".chip 68k");
+	  break;
+	default:
+	case FLUSH_CACHE_BOTH:
+	  __asm__ __volatile__ (".chip 68060\n\t"
+				"cpusha %bc\n\t"
+				".chip 68k");
+	  break;
+	}
+      break;
+
+    case FLUSH_SCOPE_LINE:
+      /* Find the physical address of the first mapped page in the
+	 address range.  */
+      len += addr & 15;
+      addr &= -16;
+      if (!(paddr = virt_to_phys_060(addr))) {
+	unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
+
+	if (len <= tmp)
+	  return 0;
+	addr += tmp;
+	len -= tmp;
+	tmp = PAGE_SIZE;
+	for (;;)
+	  {
+	    if ((paddr = virt_to_phys_060(addr)))
+	      break;
+	    if (len <= tmp)
+	      return 0;
+	    addr += tmp;
+	    len -= tmp;
+	  }
+      }
+      len = (len + 15) >> 4;
+      i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
+      while (len--)
+	{
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushl %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	  if (!--i && len)
+	    {
+
+	      /*
+	       * We just want to jump to the first cache line
+	       * in the next page.
+	       */
+	      addr += PAGE_SIZE;
+	      addr &= PAGE_MASK;
+
+	      i = PAGE_SIZE / 16;
+	      /* Recompute physical address when crossing a page
+	         boundary. */
+	      for (;;)
+	        {
+	          if ((paddr = virt_to_phys_060(addr)))
+	            break;
+	          if (len <= i)
+	            return 0;
+	          len -= i;
+	          addr += PAGE_SIZE;
+	        }
+	    }
+	  else
+	    paddr += 16;
+	}
+      break;
+
+    default:
+    case FLUSH_SCOPE_PAGE:
+      len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
+      addr &= PAGE_MASK;	/* Workaround for bug in some
+				   revisions of the 68060 */
+      for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
+	{
+	  if (!(paddr = virt_to_phys_060(addr)))
+	    continue;
+	  switch (cache)
+	    {
+	    case FLUSH_CACHE_DATA:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%dc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    case FLUSH_CACHE_INSN:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%ic,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    default:
+	    case FLUSH_CACHE_BOTH:
+	      __asm__ __volatile__ (".chip 68060\n\t"
+				    "cpushp %%bc,(%0)\n\t"
+				    ".chip 68k"
+				    : : "a" (paddr));
+	      break;
+	    }
+	}
+      break;
+    }
+  return 0;
+}
+
+/* sys_cacheflush -- flush (part of) the processor cache.  */
+asmlinkage int
+sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+{
+	int ret = -EINVAL;
+
+	if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
+	    cache & ~FLUSH_CACHE_BOTH)
+		goto out;
+
+	if (scope == FLUSH_SCOPE_ALL) {
+		/* Only the superuser may explicitly flush the whole cache. */
+		ret = -EPERM;
+		if (!capable(CAP_SYS_ADMIN))
+			goto out;
+	} else {
+		struct vm_area_struct *vma;
+
+		/* Check for overflow.  */
+		if (addr + len < addr)
+			goto out;
+
+		/*
+		 * Verify that the specified address region actually belongs
+		 * to this process.
+		 */
+		down_read(&current->mm->mmap_sem);
+		vma = find_vma(current->mm, addr);
+		if (!vma || addr < vma->vm_start || addr + len > vma->vm_end)
+			goto out_unlock;
+	}
+
+	if (CPU_IS_020_OR_030) {
+		if (scope == FLUSH_SCOPE_LINE && len < 256) {
+			unsigned long cacr;
+			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
+			if (cache & FLUSH_CACHE_INSN)
+				cacr |= 4;
+			if (cache & FLUSH_CACHE_DATA)
+				cacr |= 0x400;
+			len >>= 2;
+			while (len--) {
+				__asm__ __volatile__ ("movec %1, %%caar\n\t"
+						      "movec %0, %%cacr"
+						      : /* no outputs */
+						      : "r" (cacr), "r" (addr));
+				addr += 4;
+			}
+		} else {
+			/* Flush the whole cache, even if page granularity requested. */
+			unsigned long cacr;
+			__asm__ ("movec %%cacr, %0" : "=r" (cacr));
+			if (cache & FLUSH_CACHE_INSN)
+				cacr |= 8;
+			if (cache & FLUSH_CACHE_DATA)
+				cacr |= 0x800;
+			__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
+		}
+		ret = 0;
+		goto out_unlock;
+	} else {
+	    /*
+	     * 040 or 060: don't blindly trust 'scope', someone could
+	     * try to flush a few megs of memory.
+	     */
+
+	    if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
+	        scope=FLUSH_SCOPE_PAGE;
+	    if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
+	        scope=FLUSH_SCOPE_ALL;
+	    if (CPU_IS_040) {
+		ret = cache_flush_040 (addr, scope, cache, len);
+	    } else if (CPU_IS_060) {
+		ret = cache_flush_060 (addr, scope, cache, len);
+	    }
+	}
+out_unlock:
+	up_read(&current->mm->mmap_sem);
+out:
+	return ret;
+}
+
+/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
+   D1 (newval).  */
+asmlinkage int
+sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+		      unsigned long __user * mem)
+{
+	/* This was borrowed from ARM's implementation.  */
+	for (;;) {
+		struct mm_struct *mm = current->mm;
+		pgd_t *pgd;
+		pmd_t *pmd;
+		pte_t *pte;
+		spinlock_t *ptl;
+		unsigned long mem_value;
+
+		down_read(&mm->mmap_sem);
+		pgd = pgd_offset(mm, (unsigned long)mem);
+		if (!pgd_present(*pgd))
+			goto bad_access;
+		pmd = pmd_offset(pgd, (unsigned long)mem);
+		if (!pmd_present(*pmd))
+			goto bad_access;
+		pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
+		if (!pte_present(*pte) || !pte_dirty(*pte)
+		    || !pte_write(*pte)) {
+			pte_unmap_unlock(pte, ptl);
+			goto bad_access;
+		}
+
+		/*
+		 * No need to check for EFAULT; we know that the page is
+		 * present and writable.
+		 */
+		__get_user(mem_value, mem);
+		if (mem_value == oldval)
+			__put_user(newval, mem);
+
+		pte_unmap_unlock(pte, ptl);
+		up_read(&mm->mmap_sem);
+		return mem_value;
+
+	      bad_access:
+		up_read(&mm->mmap_sem);
+		/* This is not necessarily a bad access, we can get here if
+		   a memory we're trying to write to should be copied-on-write.
+		   Make the kernel do the necessary page stuff, then re-iterate.
+		   Simulate a write access fault to do that.  */
+		{
+			/* The first argument of the function corresponds to
+			   D1, which is the first field of struct pt_regs.  */
+			struct pt_regs *fp = (struct pt_regs *)&newval;
+
+			/* '3' is an RMW flag.  */
+			if (do_page_fault(fp, (unsigned long)mem, 3))
+				/* If the do_page_fault() failed, we don't
+				   have anything meaningful to return.
+				   There should be a SIGSEGV pending for
+				   the process.  */
+				return 0xdeadbeef;
+		}
+	}
+}
+
+#else
+
+/* sys_cacheflush -- flush (part of) the processor cache.  */
+asmlinkage int
+sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
+{
+	flush_cache_all();
+	return 0;
+}
+
+/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
+   D1 (newval).  */
+asmlinkage int
+sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+		      unsigned long __user * mem)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned long mem_value;
+
+	down_read(&mm->mmap_sem);
+
+	mem_value = *mem;
+	if (mem_value == oldval)
+		*mem = newval;
+
+	up_read(&mm->mmap_sem);
+	return mem_value;
+}
+
+#endif /* CONFIG_MMU */
+
+asmlinkage int sys_getpagesize(void)
+{
+	return PAGE_SIZE;
+}
+
+asmlinkage unsigned long sys_get_thread_area(void)
+{
+	return current_thread_info()->tp_value;
+}
+
+asmlinkage int sys_set_thread_area(unsigned long tp)
+{
+	current_thread_info()->tp_value = tp;
+	return 0;
+}
+
+asmlinkage int sys_atomic_barrier(void)
+{
+	/* no code needed for uniprocs */
+	return 0;
+}
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
new file mode 100644
index 0000000..2c8402e
--- /dev/null
+++ b/arch/m68k/kernel/syscalltable.S
@@ -0,0 +1,403 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *  Copyright (C) 2002, Greg Ungerer (gerg@snapgear.com)
+ *
+ *  Based on older entry.S files, the following copyrights apply:
+ *
+ *  Copyright (C) 1998  D. Jeff Dionne <jeff@lineo.ca>,
+ *                      Kenneth Albanowski <kjahds@kjahds.com>,
+ *  Copyright (C) 2000  Lineo Inc. (www.lineo.com) 
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *
+ *  Linux/m68k support by Hamish Macdonald
+ */
+
+#include <linux/linkage.h>
+
+#ifndef CONFIG_MMU
+#define sys_mmap2		sys_mmap_pgoff
+#endif
+
+.section .rodata
+ALIGN
+ENTRY(sys_call_table)
+	.long sys_restart_syscall	/* 0 - old "setup()" system call, used for restarting */
+	.long sys_exit
+	.long __sys_fork
+	.long sys_read
+	.long sys_write
+	.long sys_open			/* 5 */
+	.long sys_close
+	.long sys_waitpid
+	.long sys_creat
+	.long sys_link
+	.long sys_unlink		/* 10 */
+	.long sys_execve
+	.long sys_chdir
+	.long sys_time
+	.long sys_mknod
+	.long sys_chmod			/* 15 */
+	.long sys_chown16
+	.long sys_ni_syscall		/* old break syscall holder */
+	.long sys_stat
+	.long sys_lseek
+	.long sys_getpid		/* 20 */
+	.long sys_mount
+	.long sys_oldumount
+	.long sys_setuid16
+	.long sys_getuid16
+	.long sys_stime			/* 25 */
+	.long sys_ptrace
+	.long sys_alarm
+	.long sys_fstat
+	.long sys_pause
+	.long sys_utime			/* 30 */
+	.long sys_ni_syscall		/* old stty syscall holder */
+	.long sys_ni_syscall		/* old gtty syscall holder */
+	.long sys_access
+	.long sys_nice
+	.long sys_ni_syscall		/* 35 - old ftime syscall holder */
+	.long sys_sync
+	.long sys_kill
+	.long sys_rename
+	.long sys_mkdir
+	.long sys_rmdir			/* 40 */
+	.long sys_dup
+	.long sys_pipe
+	.long sys_times
+	.long sys_ni_syscall		/* old prof syscall holder */
+	.long sys_brk			/* 45 */
+	.long sys_setgid16
+	.long sys_getgid16
+	.long sys_signal
+	.long sys_geteuid16
+	.long sys_getegid16		/* 50 */
+	.long sys_acct
+	.long sys_umount		/* recycled never used phys() */
+	.long sys_ni_syscall		/* old lock syscall holder */
+	.long sys_ioctl
+	.long sys_fcntl			/* 55 */
+	.long sys_ni_syscall		/* old mpx syscall holder */
+	.long sys_setpgid
+	.long sys_ni_syscall		/* old ulimit syscall holder */
+	.long sys_ni_syscall
+	.long sys_umask			/* 60 */
+	.long sys_chroot
+	.long sys_ustat
+	.long sys_dup2
+	.long sys_getppid
+	.long sys_getpgrp		/* 65 */
+	.long sys_setsid
+	.long sys_sigaction
+	.long sys_sgetmask
+	.long sys_ssetmask
+	.long sys_setreuid16		/* 70 */
+	.long sys_setregid16
+	.long sys_sigsuspend
+	.long sys_sigpending
+	.long sys_sethostname
+	.long sys_setrlimit		/* 75 */
+	.long sys_old_getrlimit
+	.long sys_getrusage
+	.long sys_gettimeofday
+	.long sys_settimeofday
+	.long sys_getgroups16		/* 80 */
+	.long sys_setgroups16
+	.long sys_old_select
+	.long sys_symlink
+	.long sys_lstat
+	.long sys_readlink		/* 85 */
+	.long sys_uselib
+	.long sys_swapon
+	.long sys_reboot
+	.long sys_old_readdir
+	.long sys_old_mmap		/* 90 */
+	.long sys_munmap
+	.long sys_truncate
+	.long sys_ftruncate
+	.long sys_fchmod
+	.long sys_fchown16		/* 95 */
+	.long sys_getpriority
+	.long sys_setpriority
+	.long sys_ni_syscall		/* old profil syscall holder */
+	.long sys_statfs
+	.long sys_fstatfs		/* 100 */
+	.long sys_ni_syscall		/* ioperm for i386 */
+	.long sys_socketcall
+	.long sys_syslog
+	.long sys_setitimer
+	.long sys_getitimer		/* 105 */
+	.long sys_newstat
+	.long sys_newlstat
+	.long sys_newfstat
+	.long sys_ni_syscall
+	.long sys_ni_syscall		/* 110 - iopl for i386 */
+	.long sys_vhangup
+	.long sys_ni_syscall		/* obsolete idle() syscall */
+	.long sys_ni_syscall		/* vm86old for i386 */
+	.long sys_wait4
+	.long sys_swapoff		/* 115 */
+	.long sys_sysinfo
+	.long sys_ipc
+	.long sys_fsync
+	.long sys_sigreturn
+	.long __sys_clone		/* 120 */
+	.long sys_setdomainname
+	.long sys_newuname
+	.long sys_cacheflush		/* modify_ldt for i386 */
+	.long sys_adjtimex
+	.long sys_mprotect		/* 125 */
+	.long sys_sigprocmask
+	.long sys_ni_syscall		/* old "create_module" */
+	.long sys_init_module
+	.long sys_delete_module
+	.long sys_ni_syscall		/* 130 - old "get_kernel_syms" */
+	.long sys_quotactl
+	.long sys_getpgid
+	.long sys_fchdir
+	.long sys_bdflush
+	.long sys_sysfs			/* 135 */
+	.long sys_personality
+	.long sys_ni_syscall		/* for afs_syscall */
+	.long sys_setfsuid16
+	.long sys_setfsgid16
+	.long sys_llseek		/* 140 */
+	.long sys_getdents
+	.long sys_select
+	.long sys_flock
+	.long sys_msync
+	.long sys_readv			/* 145 */
+	.long sys_writev
+	.long sys_getsid
+	.long sys_fdatasync
+	.long sys_sysctl
+	.long sys_mlock			/* 150 */
+	.long sys_munlock
+	.long sys_mlockall
+	.long sys_munlockall
+	.long sys_sched_setparam
+	.long sys_sched_getparam	/* 155 */
+	.long sys_sched_setscheduler
+	.long sys_sched_getscheduler
+	.long sys_sched_yield
+	.long sys_sched_get_priority_max
+	.long sys_sched_get_priority_min  /* 160 */
+	.long sys_sched_rr_get_interval
+	.long sys_nanosleep
+	.long sys_mremap
+	.long sys_setresuid16
+	.long sys_getresuid16		/* 165 */
+	.long sys_getpagesize
+	.long sys_ni_syscall		/* old "query_module" */
+	.long sys_poll
+	.long sys_ni_syscall		/* old nfsservctl */
+	.long sys_setresgid16		/* 170 */
+	.long sys_getresgid16
+	.long sys_prctl
+	.long sys_rt_sigreturn
+	.long sys_rt_sigaction
+	.long sys_rt_sigprocmask	/* 175 */
+	.long sys_rt_sigpending
+	.long sys_rt_sigtimedwait
+	.long sys_rt_sigqueueinfo
+	.long sys_rt_sigsuspend
+	.long sys_pread64		/* 180 */
+	.long sys_pwrite64
+	.long sys_lchown16
+	.long sys_getcwd
+	.long sys_capget
+	.long sys_capset		/* 185 */
+	.long sys_sigaltstack
+	.long sys_sendfile
+	.long sys_ni_syscall		/* streams1 */
+	.long sys_ni_syscall		/* streams2 */
+	.long __sys_vfork		/* 190 */
+	.long sys_getrlimit
+	.long sys_mmap2
+	.long sys_truncate64
+	.long sys_ftruncate64
+	.long sys_stat64		/* 195 */
+	.long sys_lstat64
+	.long sys_fstat64
+	.long sys_chown
+	.long sys_getuid
+	.long sys_getgid		/* 200 */
+	.long sys_geteuid
+	.long sys_getegid
+	.long sys_setreuid
+	.long sys_setregid
+	.long sys_getgroups		/* 205 */
+	.long sys_setgroups
+	.long sys_fchown
+	.long sys_setresuid
+	.long sys_getresuid
+	.long sys_setresgid		/* 210 */
+	.long sys_getresgid
+	.long sys_lchown
+	.long sys_setuid
+	.long sys_setgid
+	.long sys_setfsuid		/* 215 */
+	.long sys_setfsgid
+	.long sys_pivot_root
+	.long sys_ni_syscall
+	.long sys_ni_syscall
+	.long sys_getdents64		/* 220 */
+	.long sys_gettid
+	.long sys_tkill
+	.long sys_setxattr
+	.long sys_lsetxattr
+	.long sys_fsetxattr		/* 225 */
+	.long sys_getxattr
+	.long sys_lgetxattr
+	.long sys_fgetxattr
+	.long sys_listxattr
+	.long sys_llistxattr		/* 230 */
+	.long sys_flistxattr
+	.long sys_removexattr
+	.long sys_lremovexattr
+	.long sys_fremovexattr
+	.long sys_futex			/* 235 */
+	.long sys_sendfile64
+	.long sys_mincore
+	.long sys_madvise
+	.long sys_fcntl64
+	.long sys_readahead		/* 240 */
+	.long sys_io_setup
+	.long sys_io_destroy
+	.long sys_io_getevents
+	.long sys_io_submit
+	.long sys_io_cancel		/* 245 */
+	.long sys_fadvise64
+	.long sys_exit_group
+	.long sys_lookup_dcookie
+	.long sys_epoll_create
+	.long sys_epoll_ctl		/* 250 */
+	.long sys_epoll_wait
+	.long sys_remap_file_pages
+	.long sys_set_tid_address
+	.long sys_timer_create
+	.long sys_timer_settime		/* 255 */
+	.long sys_timer_gettime
+	.long sys_timer_getoverrun
+	.long sys_timer_delete
+	.long sys_clock_settime
+	.long sys_clock_gettime		/* 260 */
+	.long sys_clock_getres
+	.long sys_clock_nanosleep
+	.long sys_statfs64
+	.long sys_fstatfs64
+	.long sys_tgkill		/* 265 */
+	.long sys_utimes
+	.long sys_fadvise64_64
+	.long sys_mbind
+	.long sys_get_mempolicy
+	.long sys_set_mempolicy		/* 270 */
+	.long sys_mq_open
+	.long sys_mq_unlink
+	.long sys_mq_timedsend
+	.long sys_mq_timedreceive
+	.long sys_mq_notify		/* 275 */
+	.long sys_mq_getsetattr
+	.long sys_waitid
+	.long sys_ni_syscall		/* for sys_vserver */
+	.long sys_add_key
+	.long sys_request_key		/* 280 */
+	.long sys_keyctl
+	.long sys_ioprio_set
+	.long sys_ioprio_get
+	.long sys_inotify_init
+	.long sys_inotify_add_watch	/* 285 */
+	.long sys_inotify_rm_watch
+	.long sys_migrate_pages
+	.long sys_openat
+	.long sys_mkdirat
+	.long sys_mknodat		/* 290 */
+	.long sys_fchownat
+	.long sys_futimesat
+	.long sys_fstatat64
+	.long sys_unlinkat
+	.long sys_renameat		/* 295 */
+	.long sys_linkat
+	.long sys_symlinkat
+	.long sys_readlinkat
+	.long sys_fchmodat
+	.long sys_faccessat		/* 300 */
+	.long sys_pselect6
+	.long sys_ppoll
+	.long sys_unshare
+	.long sys_set_robust_list
+	.long sys_get_robust_list	/* 305 */
+	.long sys_splice
+	.long sys_sync_file_range
+	.long sys_tee
+	.long sys_vmsplice
+	.long sys_move_pages		/* 310 */
+	.long sys_sched_setaffinity
+	.long sys_sched_getaffinity
+	.long sys_kexec_load
+	.long sys_getcpu
+	.long sys_epoll_pwait		/* 315 */
+	.long sys_utimensat
+	.long sys_signalfd
+	.long sys_timerfd_create
+	.long sys_eventfd
+	.long sys_fallocate		/* 320 */
+	.long sys_timerfd_settime
+	.long sys_timerfd_gettime
+	.long sys_signalfd4
+	.long sys_eventfd2
+	.long sys_epoll_create1		/* 325 */
+	.long sys_dup3
+	.long sys_pipe2
+	.long sys_inotify_init1
+	.long sys_preadv
+	.long sys_pwritev		/* 330 */
+	.long sys_rt_tgsigqueueinfo
+	.long sys_perf_event_open
+	.long sys_get_thread_area
+	.long sys_set_thread_area
+	.long sys_atomic_cmpxchg_32	/* 335 */
+	.long sys_atomic_barrier
+	.long sys_fanotify_init
+	.long sys_fanotify_mark
+	.long sys_prlimit64
+	.long sys_name_to_handle_at	/* 340 */
+	.long sys_open_by_handle_at
+	.long sys_clock_adjtime
+	.long sys_syncfs
+	.long sys_setns
+	.long sys_process_vm_readv	/* 345 */
+	.long sys_process_vm_writev
+	.long sys_kcmp
+	.long sys_finit_module
+	.long sys_sched_setattr
+	.long sys_sched_getattr		/* 350 */
+	.long sys_renameat2
+	.long sys_getrandom
+	.long sys_memfd_create
+	.long sys_bpf
+	.long sys_execveat		/* 355 */
+	.long sys_socket
+	.long sys_socketpair
+	.long sys_bind
+	.long sys_connect
+	.long sys_listen		/* 360 */
+	.long sys_accept4
+	.long sys_getsockopt
+	.long sys_setsockopt
+	.long sys_getsockname
+	.long sys_getpeername		/* 365 */
+	.long sys_sendto
+	.long sys_sendmsg
+	.long sys_recvfrom
+	.long sys_recvmsg
+	.long sys_shutdown		/* 370 */
+	.long sys_recvmmsg
+	.long sys_sendmmsg
+	.long sys_userfaultfd
+	.long sys_membarrier
+	.long sys_mlock2		/* 375 */
+	.long sys_copy_file_range
+	.long sys_preadv2
+	.long sys_pwritev2
+	.long sys_statx
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
new file mode 100644
index 0000000..3a8b47f
--- /dev/null
+++ b/arch/m68k/kernel/time.c
@@ -0,0 +1,157 @@
+/*
+ *  linux/arch/m68k/kernel/time.c
+ *
+ *  Copyright (C) 1991, 1992, 1995  Linus Torvalds
+ *
+ * This file contains the m68k-specific time handling details.
+ * Most of the stuff is located in the machine specific files.
+ *
+ * 1997-09-10	Updated NTP code according to technical memorandum Jan '96
+ *		"A Kernel Model for Precision Timekeeping" by Dave Mills
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/sched/loadavg.h>
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/rtc.h>
+#include <linux/platform_device.h>
+
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/irq_regs.h>
+
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/profile.h>
+
+
+unsigned long (*mach_random_get_entropy)(void);
+EXPORT_SYMBOL_GPL(mach_random_get_entropy);
+
+
+/*
+ * timer_interrupt() needs to keep up the real-time clock,
+ * as well as call the "xtime_update()" routine every clocktick
+ */
+static irqreturn_t timer_interrupt(int irq, void *dummy)
+{
+	xtime_update(1);
+	update_process_times(user_mode(get_irq_regs()));
+	profile_tick(CPU_PROFILING);
+
+#ifdef CONFIG_HEARTBEAT
+	/* use power LED as a heartbeat instead -- much more useful
+	   for debugging -- based on the version for PReP by Cort */
+	/* acts like an actual heart beat -- ie thump-thump-pause... */
+	if (mach_heartbeat) {
+	    static unsigned cnt = 0, period = 0, dist = 0;
+
+	    if (cnt == 0 || cnt == dist)
+		mach_heartbeat( 1 );
+	    else if (cnt == 7 || cnt == dist+7)
+		mach_heartbeat( 0 );
+
+	    if (++cnt > period) {
+		cnt = 0;
+		/* The hyperbolic function below modifies the heartbeat period
+		 * length in dependency of the current (5min) load. It goes
+		 * through the points f(0)=126, f(1)=86, f(5)=51,
+		 * f(inf)->30. */
+		period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
+		dist = period / 4;
+	    }
+	}
+#endif /* CONFIG_HEARTBEAT */
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_M68KCLASSIC
+#if !IS_BUILTIN(CONFIG_RTC_DRV_GENERIC)
+void read_persistent_clock64(struct timespec64 *ts)
+{
+	struct rtc_time time;
+
+	ts->tv_sec = 0;
+	ts->tv_nsec = 0;
+
+	if (!mach_hwclk)
+		return;
+
+	mach_hwclk(0, &time);
+
+	ts->tv_sec = mktime64(time.tm_year + 1900, time.tm_mon + 1, time.tm_mday,
+			      time.tm_hour, time.tm_min, time.tm_sec);
+}
+#endif
+
+#if IS_ENABLED(CONFIG_RTC_DRV_GENERIC)
+static int rtc_generic_get_time(struct device *dev, struct rtc_time *tm)
+{
+	mach_hwclk(0, tm);
+	return 0;
+}
+
+static int rtc_generic_set_time(struct device *dev, struct rtc_time *tm)
+{
+	if (mach_hwclk(1, tm) < 0)
+		return -EOPNOTSUPP;
+	return 0;
+}
+
+static int rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+	struct rtc_pll_info pll;
+	struct rtc_pll_info __user *argp = (void __user *)arg;
+
+	switch (cmd) {
+	case RTC_PLL_GET:
+		if (!mach_get_rtc_pll || mach_get_rtc_pll(&pll))
+			return -EINVAL;
+		return copy_to_user(argp, &pll, sizeof pll) ? -EFAULT : 0;
+
+	case RTC_PLL_SET:
+		if (!mach_set_rtc_pll)
+			return -EINVAL;
+		if (!capable(CAP_SYS_TIME))
+			return -EACCES;
+		if (copy_from_user(&pll, argp, sizeof(pll)))
+			return -EFAULT;
+		return mach_set_rtc_pll(&pll);
+	}
+
+	return -ENOIOCTLCMD;
+}
+
+static const struct rtc_class_ops generic_rtc_ops = {
+	.ioctl = rtc_ioctl,
+	.read_time = rtc_generic_get_time,
+	.set_time = rtc_generic_set_time,
+};
+
+static int __init rtc_init(void)
+{
+	struct platform_device *pdev;
+
+	if (!mach_hwclk)
+		return -ENODEV;
+
+	pdev = platform_device_register_data(NULL, "rtc-generic", -1,
+					     &generic_rtc_ops,
+					     sizeof(generic_rtc_ops));
+	return PTR_ERR_OR_ZERO(pdev);
+}
+
+module_init(rtc_init);
+#endif /* CONFIG_RTC_DRV_GENERIC */
+#endif /* CONFIG M68KCLASSIC */
+
+void __init time_init(void)
+{
+	mach_sched_init(timer_interrupt);
+}
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
new file mode 100644
index 0000000..b2fd000
--- /dev/null
+++ b/arch/m68k/kernel/traps.c
@@ -0,0 +1,1164 @@
+/*
+ *  linux/arch/m68k/kernel/traps.c
+ *
+ *  Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ *  68040 fixes by Michael Rausch
+ *  68040 fixes by Martin Apel
+ *  68040 fixes and writeback by Richard Zidlicky
+ *  68060 fixes by Roman Hodek
+ *  68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/signal.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/user.h>
+#include <linux/string.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <linux/uaccess.h>
+#include <asm/traps.h>
+#include <asm/pgalloc.h>
+#include <asm/machdep.h>
+#include <asm/siginfo.h>
+
+
+static const char *vec_names[] = {
+	[VEC_RESETSP]	= "RESET SP",
+	[VEC_RESETPC]	= "RESET PC",
+	[VEC_BUSERR]	= "BUS ERROR",
+	[VEC_ADDRERR]	= "ADDRESS ERROR",
+	[VEC_ILLEGAL]	= "ILLEGAL INSTRUCTION",
+	[VEC_ZERODIV]	= "ZERO DIVIDE",
+	[VEC_CHK]	= "CHK",
+	[VEC_TRAP]	= "TRAPcc",
+	[VEC_PRIV]	= "PRIVILEGE VIOLATION",
+	[VEC_TRACE]	= "TRACE",
+	[VEC_LINE10]	= "LINE 1010",
+	[VEC_LINE11]	= "LINE 1111",
+	[VEC_RESV12]	= "UNASSIGNED RESERVED 12",
+	[VEC_COPROC]	= "COPROCESSOR PROTOCOL VIOLATION",
+	[VEC_FORMAT]	= "FORMAT ERROR",
+	[VEC_UNINT]	= "UNINITIALIZED INTERRUPT",
+	[VEC_RESV16]	= "UNASSIGNED RESERVED 16",
+	[VEC_RESV17]	= "UNASSIGNED RESERVED 17",
+	[VEC_RESV18]	= "UNASSIGNED RESERVED 18",
+	[VEC_RESV19]	= "UNASSIGNED RESERVED 19",
+	[VEC_RESV20]	= "UNASSIGNED RESERVED 20",
+	[VEC_RESV21]	= "UNASSIGNED RESERVED 21",
+	[VEC_RESV22]	= "UNASSIGNED RESERVED 22",
+	[VEC_RESV23]	= "UNASSIGNED RESERVED 23",
+	[VEC_SPUR]	= "SPURIOUS INTERRUPT",
+	[VEC_INT1]	= "LEVEL 1 INT",
+	[VEC_INT2]	= "LEVEL 2 INT",
+	[VEC_INT3]	= "LEVEL 3 INT",
+	[VEC_INT4]	= "LEVEL 4 INT",
+	[VEC_INT5]	= "LEVEL 5 INT",
+	[VEC_INT6]	= "LEVEL 6 INT",
+	[VEC_INT7]	= "LEVEL 7 INT",
+	[VEC_SYS]	= "SYSCALL",
+	[VEC_TRAP1]	= "TRAP #1",
+	[VEC_TRAP2]	= "TRAP #2",
+	[VEC_TRAP3]	= "TRAP #3",
+	[VEC_TRAP4]	= "TRAP #4",
+	[VEC_TRAP5]	= "TRAP #5",
+	[VEC_TRAP6]	= "TRAP #6",
+	[VEC_TRAP7]	= "TRAP #7",
+	[VEC_TRAP8]	= "TRAP #8",
+	[VEC_TRAP9]	= "TRAP #9",
+	[VEC_TRAP10]	= "TRAP #10",
+	[VEC_TRAP11]	= "TRAP #11",
+	[VEC_TRAP12]	= "TRAP #12",
+	[VEC_TRAP13]	= "TRAP #13",
+	[VEC_TRAP14]	= "TRAP #14",
+	[VEC_TRAP15]	= "TRAP #15",
+	[VEC_FPBRUC]	= "FPCP BSUN",
+	[VEC_FPIR]	= "FPCP INEXACT",
+	[VEC_FPDIVZ]	= "FPCP DIV BY 0",
+	[VEC_FPUNDER]	= "FPCP UNDERFLOW",
+	[VEC_FPOE]	= "FPCP OPERAND ERROR",
+	[VEC_FPOVER]	= "FPCP OVERFLOW",
+	[VEC_FPNAN]	= "FPCP SNAN",
+	[VEC_FPUNSUP]	= "FPCP UNSUPPORTED OPERATION",
+	[VEC_MMUCFG]	= "MMU CONFIGURATION ERROR",
+	[VEC_MMUILL]	= "MMU ILLEGAL OPERATION ERROR",
+	[VEC_MMUACC]	= "MMU ACCESS LEVEL VIOLATION ERROR",
+	[VEC_RESV59]	= "UNASSIGNED RESERVED 59",
+	[VEC_UNIMPEA]	= "UNASSIGNED RESERVED 60",
+	[VEC_UNIMPII]	= "UNASSIGNED RESERVED 61",
+	[VEC_RESV62]	= "UNASSIGNED RESERVED 62",
+	[VEC_RESV63]	= "UNASSIGNED RESERVED 63",
+};
+
+static const char *space_names[] = {
+	[0]		= "Space 0",
+	[USER_DATA]	= "User Data",
+	[USER_PROGRAM]	= "User Program",
+#ifndef CONFIG_SUN3
+	[3]		= "Space 3",
+#else
+	[FC_CONTROL]	= "Control",
+#endif
+	[4]		= "Space 4",
+	[SUPER_DATA]	= "Super Data",
+	[SUPER_PROGRAM]	= "Super Program",
+	[CPU_SPACE]	= "CPU"
+};
+
+void die_if_kernel(char *,struct pt_regs *,int);
+asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
+                             unsigned long error_code);
+int send_fault_sig(struct pt_regs *regs);
+
+asmlinkage void trap_c(struct frame *fp);
+
+#if defined (CONFIG_M68060)
+static inline void access_error060 (struct frame *fp)
+{
+	unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
+
+	pr_debug("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
+
+	if (fslw & MMU060_BPE) {
+		/* branch prediction error -> clear branch cache */
+		__asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
+				      "orl   #0x00400000,%/d0\n\t"
+				      "movec %/d0,%/cacr"
+				      : : : "d0" );
+		/* return if there's no other error */
+		if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
+			return;
+	}
+
+	if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
+		unsigned long errorcode;
+		unsigned long addr = fp->un.fmt4.effaddr;
+
+		if (fslw & MMU060_MA)
+			addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
+
+		errorcode = 1;
+		if (fslw & MMU060_DESC_ERR) {
+			__flush_tlb040_one(addr);
+			errorcode = 0;
+		}
+		if (fslw & MMU060_W)
+			errorcode |= 2;
+		pr_debug("errorcode = %ld\n", errorcode);
+		do_page_fault(&fp->ptregs, addr, errorcode);
+	} else if (fslw & (MMU060_SEE)){
+		/* Software Emulation Error.
+		 * fault during mem_read/mem_write in ifpsp060/os.S
+		 */
+		send_fault_sig(&fp->ptregs);
+	} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
+		   send_fault_sig(&fp->ptregs) > 0) {
+		pr_err("pc=%#lx, fa=%#lx\n", fp->ptregs.pc,
+		       fp->un.fmt4.effaddr);
+		pr_err("68060 access error, fslw=%lx\n", fslw);
+		trap_c( fp );
+	}
+}
+#endif /* CONFIG_M68060 */
+
+#if defined (CONFIG_M68040)
+static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
+{
+	unsigned long mmusr;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(MAKE_MM_SEG(wbs));
+
+	if (iswrite)
+		asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
+	else
+		asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
+
+	asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
+
+	set_fs(old_fs);
+
+	return mmusr;
+}
+
+static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
+				   unsigned long wbd)
+{
+	int res = 0;
+	mm_segment_t old_fs = get_fs();
+
+	/* set_fs can not be moved, otherwise put_user() may oops */
+	set_fs(MAKE_MM_SEG(wbs));
+
+	switch (wbs & WBSIZ_040) {
+	case BA_SIZE_BYTE:
+		res = put_user(wbd & 0xff, (char __user *)wba);
+		break;
+	case BA_SIZE_WORD:
+		res = put_user(wbd & 0xffff, (short __user *)wba);
+		break;
+	case BA_SIZE_LONG:
+		res = put_user(wbd, (int __user *)wba);
+		break;
+	}
+
+	/* set_fs can not be moved, otherwise put_user() may oops */
+	set_fs(old_fs);
+
+
+	pr_debug("do_040writeback1, res=%d\n", res);
+
+	return res;
+}
+
+/* after an exception in a writeback the stack frame corresponding
+ * to that exception is discarded, set a few bits in the old frame
+ * to simulate what it should look like
+ */
+static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
+{
+	fp->un.fmt7.faddr = wba;
+	fp->un.fmt7.ssw = wbs & 0xff;
+	if (wba != current->thread.faddr)
+	    fp->un.fmt7.ssw |= MA_040;
+}
+
+static inline void do_040writebacks(struct frame *fp)
+{
+	int res = 0;
+#if 0
+	if (fp->un.fmt7.wb1s & WBV_040)
+		pr_err("access_error040: cannot handle 1st writeback. oops.\n");
+#endif
+
+	if ((fp->un.fmt7.wb2s & WBV_040) &&
+	    !(fp->un.fmt7.wb2s & WBTT_040)) {
+		res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
+				       fp->un.fmt7.wb2d);
+		if (res)
+			fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
+		else
+			fp->un.fmt7.wb2s = 0;
+	}
+
+	/* do the 2nd wb only if the first one was successful (except for a kernel wb) */
+	if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
+		res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
+				       fp->un.fmt7.wb3d);
+		if (res)
+		    {
+			fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
+
+			fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
+			fp->un.fmt7.wb3s &= (~WBV_040);
+			fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
+			fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
+		    }
+		else
+			fp->un.fmt7.wb3s = 0;
+	}
+
+	if (res)
+		send_fault_sig(&fp->ptregs);
+}
+
+/*
+ * called from sigreturn(), must ensure userspace code didn't
+ * manipulate exception frame to circumvent protection, then complete
+ * pending writebacks
+ * we just clear TM2 to turn it into a userspace access
+ */
+asmlinkage void berr_040cleanup(struct frame *fp)
+{
+	fp->un.fmt7.wb2s &= ~4;
+	fp->un.fmt7.wb3s &= ~4;
+
+	do_040writebacks(fp);
+}
+
+static inline void access_error040(struct frame *fp)
+{
+	unsigned short ssw = fp->un.fmt7.ssw;
+	unsigned long mmusr;
+
+	pr_debug("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
+	pr_debug("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
+		fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
+	pr_debug("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
+		fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
+		fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
+
+	if (ssw & ATC_040) {
+		unsigned long addr = fp->un.fmt7.faddr;
+		unsigned long errorcode;
+
+		/*
+		 * The MMU status has to be determined AFTER the address
+		 * has been corrected if there was a misaligned access (MA).
+		 */
+		if (ssw & MA_040)
+			addr = (addr + 7) & -8;
+
+		/* MMU error, get the MMUSR info for this access */
+		mmusr = probe040(!(ssw & RW_040), addr, ssw);
+		pr_debug("mmusr = %lx\n", mmusr);
+		errorcode = 1;
+		if (!(mmusr & MMU_R_040)) {
+			/* clear the invalid atc entry */
+			__flush_tlb040_one(addr);
+			errorcode = 0;
+		}
+
+		/* despite what documentation seems to say, RMW
+		 * accesses have always both the LK and RW bits set */
+		if (!(ssw & RW_040) || (ssw & LK_040))
+			errorcode |= 2;
+
+		if (do_page_fault(&fp->ptregs, addr, errorcode)) {
+			pr_debug("do_page_fault() !=0\n");
+			if (user_mode(&fp->ptregs)){
+				/* delay writebacks after signal delivery */
+				pr_debug(".. was usermode - return\n");
+				return;
+			}
+			/* disable writeback into user space from kernel
+			 * (if do_page_fault didn't fix the mapping,
+                         * the writeback won't do good)
+			 */
+disable_wb:
+			pr_debug(".. disabling wb2\n");
+			if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
+				fp->un.fmt7.wb2s &= ~WBV_040;
+			if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
+				fp->un.fmt7.wb3s &= ~WBV_040;
+		}
+	} else {
+		/* In case of a bus error we either kill the process or expect
+		 * the kernel to catch the fault, which then is also responsible
+		 * for cleaning up the mess.
+		 */
+		current->thread.signo = SIGBUS;
+		current->thread.faddr = fp->un.fmt7.faddr;
+		if (send_fault_sig(&fp->ptregs) >= 0)
+			pr_err("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
+			       fp->un.fmt7.faddr);
+		goto disable_wb;
+	}
+
+	do_040writebacks(fp);
+}
+#endif /* CONFIG_M68040 */
+
+#if defined(CONFIG_SUN3)
+#include <asm/sun3mmu.h>
+
+extern int mmu_emu_handle_fault (unsigned long, int, int);
+
+/* sun3 version of bus_error030 */
+
+static inline void bus_error030 (struct frame *fp)
+{
+	unsigned char buserr_type = sun3_get_buserr ();
+	unsigned long addr, errorcode;
+	unsigned short ssw = fp->un.fmtb.ssw;
+	extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
+
+	if (ssw & (FC | FB))
+		pr_debug("Instruction fault at %#010lx\n",
+			ssw & FC ?
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+			:
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+	if (ssw & DF)
+		pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+			ssw & RW ? "read" : "write",
+			fp->un.fmtb.daddr,
+			space_names[ssw & DFC], fp->ptregs.pc);
+
+	/*
+	 * Check if this page should be demand-mapped. This needs to go before
+	 * the testing for a bad kernel-space access (demand-mapping applies
+	 * to kernel accesses too).
+	 */
+
+	if ((ssw & DF)
+	    && (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
+		if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
+			return;
+	}
+
+	/* Check for kernel-space pagefault (BAD). */
+	if (fp->ptregs.sr & PS_S) {
+		/* kernel fault must be a data fault to user space */
+		if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
+		     // try checking the kernel mappings before surrender
+		     if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
+			  return;
+			/* instruction fault or kernel data fault! */
+			if (ssw & (FC | FB))
+				pr_err("Instruction fault at %#010lx\n",
+					fp->ptregs.pc);
+			if (ssw & DF) {
+				/* was this fault incurred testing bus mappings? */
+				if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
+				   (fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
+					send_fault_sig(&fp->ptregs);
+					return;
+				}
+
+				pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+					ssw & RW ? "read" : "write",
+					fp->un.fmtb.daddr,
+					space_names[ssw & DFC], fp->ptregs.pc);
+			}
+			pr_err("BAD KERNEL BUSERR\n");
+
+			die_if_kernel("Oops", &fp->ptregs,0);
+			force_sig(SIGKILL, current);
+			return;
+		}
+	} else {
+		/* user fault */
+		if (!(ssw & (FC | FB)) && !(ssw & DF))
+			/* not an instruction fault or data fault! BAD */
+			panic ("USER BUSERR w/o instruction or data fault");
+	}
+
+
+	/* First handle the data fault, if any.  */
+	if (ssw & DF) {
+		addr = fp->un.fmtb.daddr;
+
+// errorcode bit 0:	0 -> no page		1 -> protection fault
+// errorcode bit 1:	0 -> read fault		1 -> write fault
+
+// (buserr_type & SUN3_BUSERR_PROTERR)	-> protection fault
+// (buserr_type & SUN3_BUSERR_INVALID)	-> invalid page fault
+
+		if (buserr_type & SUN3_BUSERR_PROTERR)
+			errorcode = 0x01;
+		else if (buserr_type & SUN3_BUSERR_INVALID)
+			errorcode = 0x00;
+		else {
+			pr_debug("*** unexpected busfault type=%#04x\n",
+				 buserr_type);
+			pr_debug("invalid %s access at %#lx from pc %#lx\n",
+				 !(ssw & RW) ? "write" : "read", addr,
+				 fp->ptregs.pc);
+			die_if_kernel ("Oops", &fp->ptregs, buserr_type);
+			force_sig (SIGBUS, current);
+			return;
+		}
+
+//todo: wtf is RM bit? --m
+		if (!(ssw & RW) || ssw & RM)
+			errorcode |= 0x02;
+
+		/* Handle page fault. */
+		do_page_fault (&fp->ptregs, addr, errorcode);
+
+		/* Retry the data fault now. */
+		return;
+	}
+
+	/* Now handle the instruction fault. */
+
+	/* Get the fault address. */
+	if (fp->ptregs.format == 0xA)
+		addr = fp->ptregs.pc + 4;
+	else
+		addr = fp->un.fmtb.baddr;
+	if (ssw & FC)
+		addr -= 2;
+
+	if (buserr_type & SUN3_BUSERR_INVALID) {
+		if (!mmu_emu_handle_fault(addr, 1, 0))
+			do_page_fault (&fp->ptregs, addr, 0);
+       } else {
+		pr_debug("protection fault on insn access (segv).\n");
+		force_sig (SIGSEGV, current);
+       }
+}
+#else
+#if defined(CPU_M68020_OR_M68030)
+static inline void bus_error030 (struct frame *fp)
+{
+	volatile unsigned short temp;
+	unsigned short mmusr;
+	unsigned long addr, errorcode;
+	unsigned short ssw = fp->un.fmtb.ssw;
+#ifdef DEBUG
+	unsigned long desc;
+#endif
+
+	pr_debug("pid = %x  ", current->pid);
+	pr_debug("SSW=%#06x  ", ssw);
+
+	if (ssw & (FC | FB))
+		pr_debug("Instruction fault at %#010lx\n",
+			ssw & FC ?
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
+			:
+			fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+	if (ssw & DF)
+		pr_debug("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+			ssw & RW ? "read" : "write",
+			fp->un.fmtb.daddr,
+			space_names[ssw & DFC], fp->ptregs.pc);
+
+	/* ++andreas: If a data fault and an instruction fault happen
+	   at the same time map in both pages.  */
+
+	/* First handle the data fault, if any.  */
+	if (ssw & DF) {
+		addr = fp->un.fmtb.daddr;
+
+#ifdef DEBUG
+		asm volatile ("ptestr %3,%2@,#7,%0\n\t"
+			      "pmove %%psr,%1"
+			      : "=a&" (desc), "=m" (temp)
+			      : "a" (addr), "d" (ssw));
+		pr_debug("mmusr is %#x for addr %#lx in task %p\n",
+			 temp, addr, current);
+		pr_debug("descriptor address is 0x%p, contents %#lx\n",
+			 __va(desc), *(unsigned long *)__va(desc));
+#else
+		asm volatile ("ptestr %2,%1@,#7\n\t"
+			      "pmove %%psr,%0"
+			      : "=m" (temp) : "a" (addr), "d" (ssw));
+#endif
+		mmusr = temp;
+		errorcode = (mmusr & MMU_I) ? 0 : 1;
+		if (!(ssw & RW) || (ssw & RM))
+			errorcode |= 2;
+
+		if (mmusr & (MMU_I | MMU_WP)) {
+			if (ssw & 4) {
+				pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+				       ssw & RW ? "read" : "write",
+				       fp->un.fmtb.daddr,
+				       space_names[ssw & DFC], fp->ptregs.pc);
+				goto buserr;
+			}
+			/* Don't try to do anything further if an exception was
+			   handled. */
+			if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
+				return;
+		} else if (!(mmusr & MMU_I)) {
+			/* probably a 020 cas fault */
+			if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
+				pr_err("unexpected bus error (%#x,%#x)\n", ssw,
+				       mmusr);
+		} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+			pr_err("invalid %s access at %#lx from pc %#lx\n",
+			       !(ssw & RW) ? "write" : "read", addr,
+			       fp->ptregs.pc);
+			die_if_kernel("Oops",&fp->ptregs,mmusr);
+			force_sig(SIGSEGV, current);
+			return;
+		} else {
+#if 0
+			static volatile long tlong;
+#endif
+
+			pr_err("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
+			       !(ssw & RW) ? "write" : "read", addr,
+			       fp->ptregs.pc, ssw);
+			asm volatile ("ptestr #1,%1@,#0\n\t"
+				      "pmove %%psr,%0"
+				      : "=m" (temp)
+				      : "a" (addr));
+			mmusr = temp;
+
+			pr_err("level 0 mmusr is %#x\n", mmusr);
+#if 0
+			asm volatile ("pmove %%tt0,%0"
+				      : "=m" (tlong));
+			pr_debug("tt0 is %#lx, ", tlong);
+			asm volatile ("pmove %%tt1,%0"
+				      : "=m" (tlong));
+			pr_debug("tt1 is %#lx\n", tlong);
+#endif
+			pr_debug("Unknown SIGSEGV - 1\n");
+			die_if_kernel("Oops",&fp->ptregs,mmusr);
+			force_sig(SIGSEGV, current);
+			return;
+		}
+
+		/* setup an ATC entry for the access about to be retried */
+		if (!(ssw & RW) || (ssw & RM))
+			asm volatile ("ploadw %1,%0@" : /* no outputs */
+				      : "a" (addr), "d" (ssw));
+		else
+			asm volatile ("ploadr %1,%0@" : /* no outputs */
+				      : "a" (addr), "d" (ssw));
+	}
+
+	/* Now handle the instruction fault. */
+
+	if (!(ssw & (FC|FB)))
+		return;
+
+	if (fp->ptregs.sr & PS_S) {
+		pr_err("Instruction fault at %#010lx\n", fp->ptregs.pc);
+	buserr:
+		pr_err("BAD KERNEL BUSERR\n");
+		die_if_kernel("Oops",&fp->ptregs,0);
+		force_sig(SIGKILL, current);
+		return;
+	}
+
+	/* get the fault address */
+	if (fp->ptregs.format == 10)
+		addr = fp->ptregs.pc + 4;
+	else
+		addr = fp->un.fmtb.baddr;
+	if (ssw & FC)
+		addr -= 2;
+
+	if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
+		/* Insn fault on same page as data fault.  But we
+		   should still create the ATC entry.  */
+		goto create_atc_entry;
+
+#ifdef DEBUG
+	asm volatile ("ptestr #1,%2@,#7,%0\n\t"
+		      "pmove %%psr,%1"
+		      : "=a&" (desc), "=m" (temp)
+		      : "a" (addr));
+	pr_debug("mmusr is %#x for addr %#lx in task %p\n",
+		temp, addr, current);
+	pr_debug("descriptor address is 0x%p, contents %#lx\n",
+		__va(desc), *(unsigned long *)__va(desc));
+#else
+	asm volatile ("ptestr #1,%1@,#7\n\t"
+		      "pmove %%psr,%0"
+		      : "=m" (temp) : "a" (addr));
+#endif
+	mmusr = temp;
+	if (mmusr & MMU_I)
+		do_page_fault (&fp->ptregs, addr, 0);
+	else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
+		pr_err("invalid insn access at %#lx from pc %#lx\n",
+			addr, fp->ptregs.pc);
+		pr_debug("Unknown SIGSEGV - 2\n");
+		die_if_kernel("Oops",&fp->ptregs,mmusr);
+		force_sig(SIGSEGV, current);
+		return;
+	}
+
+create_atc_entry:
+	/* setup an ATC entry for the access about to be retried */
+	asm volatile ("ploadr #2,%0@" : /* no outputs */
+		      : "a" (addr));
+}
+#endif /* CPU_M68020_OR_M68030 */
+#endif /* !CONFIG_SUN3 */
+
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+#include <asm/mcfmmu.h>
+
+/*
+ *	The following table converts the FS encoding of a ColdFire
+ *	exception stack frame into the error_code value needed by
+ *	do_fault.
+*/
+static const unsigned char fs_err_code[] = {
+	0,  /* 0000 */
+	0,  /* 0001 */
+	0,  /* 0010 */
+	0,  /* 0011 */
+	1,  /* 0100 */
+	0,  /* 0101 */
+	0,  /* 0110 */
+	0,  /* 0111 */
+	2,  /* 1000 */
+	3,  /* 1001 */
+	2,  /* 1010 */
+	0,  /* 1011 */
+	1,  /* 1100 */
+	1,  /* 1101 */
+	0,  /* 1110 */
+	0   /* 1111 */
+};
+
+static inline void access_errorcf(unsigned int fs, struct frame *fp)
+{
+	unsigned long mmusr, addr;
+	unsigned int err_code;
+	int need_page_fault;
+
+	mmusr = mmu_read(MMUSR);
+	addr = mmu_read(MMUAR);
+
+	/*
+	 * error_code:
+	 *	bit 0 == 0 means no page found, 1 means protection fault
+	 *	bit 1 == 0 means read, 1 means write
+	 */
+	switch (fs) {
+	case  5:  /* 0101 TLB opword X miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 0);
+		addr = fp->ptregs.pc;
+		break;
+	case  6:  /* 0110 TLB extension word X miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 0, 1);
+		addr = fp->ptregs.pc + sizeof(long);
+		break;
+	case 10:  /* 1010 TLB W miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 1, 1, 0);
+		break;
+	case 14: /* 1110 TLB R miss */
+		need_page_fault = cf_tlb_miss(&fp->ptregs, 0, 1, 0);
+		break;
+	default:
+		/* 0000 Normal  */
+		/* 0001 Reserved */
+		/* 0010 Interrupt during debug service routine */
+		/* 0011 Reserved */
+		/* 0100 X Protection */
+		/* 0111 IFP in emulator mode */
+		/* 1000 W Protection*/
+		/* 1001 Write error*/
+		/* 1011 Reserved*/
+		/* 1100 R Protection*/
+		/* 1101 R Protection*/
+		/* 1111 OEP in emulator mode*/
+		need_page_fault = 1;
+		break;
+	}
+
+	if (need_page_fault) {
+		err_code = fs_err_code[fs];
+		if ((fs == 13) && (mmusr & MMUSR_WF)) /* rd-mod-wr access */
+			err_code |= 2; /* bit1 - write, bit0 - protection */
+		do_page_fault(&fp->ptregs, addr, err_code);
+	}
+}
+#endif /* CONFIG_COLDFIRE CONFIG_MMU */
+
+asmlinkage void buserr_c(struct frame *fp)
+{
+	/* Only set esp0 if coming from user mode */
+	if (user_mode(&fp->ptregs))
+		current->thread.esp0 = (unsigned long) fp;
+
+	pr_debug("*** Bus Error *** Format is %x\n", fp->ptregs.format);
+
+#if defined(CONFIG_COLDFIRE) && defined(CONFIG_MMU)
+	if (CPU_IS_COLDFIRE) {
+		unsigned int fs;
+		fs = (fp->ptregs.vector & 0x3) |
+			((fp->ptregs.vector & 0xc00) >> 8);
+		switch (fs) {
+		case 0x5:
+		case 0x6:
+		case 0x7:
+		case 0x9:
+		case 0xa:
+		case 0xd:
+		case 0xe:
+		case 0xf:
+			access_errorcf(fs, fp);
+			return;
+		default:
+			break;
+		}
+	}
+#endif /* CONFIG_COLDFIRE && CONFIG_MMU */
+
+	switch (fp->ptregs.format) {
+#if defined (CONFIG_M68060)
+	case 4:				/* 68060 access error */
+	  access_error060 (fp);
+	  break;
+#endif
+#if defined (CONFIG_M68040)
+	case 0x7:			/* 68040 access error */
+	  access_error040 (fp);
+	  break;
+#endif
+#if defined (CPU_M68020_OR_M68030)
+	case 0xa:
+	case 0xb:
+	  bus_error030 (fp);
+	  break;
+#endif
+	default:
+	  die_if_kernel("bad frame format",&fp->ptregs,0);
+	  pr_debug("Unknown SIGSEGV - 4\n");
+	  force_sig(SIGSEGV, current);
+	}
+}
+
+
+static int kstack_depth_to_print = 48;
+
+void show_trace(unsigned long *stack)
+{
+	unsigned long *endstack;
+	unsigned long addr;
+	int i;
+
+	pr_info("Call Trace:");
+	addr = (unsigned long)stack + THREAD_SIZE - 1;
+	endstack = (unsigned long *)(addr & -THREAD_SIZE);
+	i = 0;
+	while (stack + 1 <= endstack) {
+		addr = *stack++;
+		/*
+		 * If the address is either in the text segment of the
+		 * kernel, or in the region which contains vmalloc'ed
+		 * memory, it *may* be the address of a calling
+		 * routine; if so, print it so that someone tracing
+		 * down the cause of the crash will be able to figure
+		 * out the call path that was taken.
+		 */
+		if (__kernel_text_address(addr)) {
+#ifndef CONFIG_KALLSYMS
+			if (i % 5 == 0)
+				pr_cont("\n       ");
+#endif
+			pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
+			i++;
+		}
+	}
+	pr_cont("\n");
+}
+
+void show_registers(struct pt_regs *regs)
+{
+	struct frame *fp = (struct frame *)regs;
+	mm_segment_t old_fs = get_fs();
+	u16 c, *cp;
+	unsigned long addr;
+	int i;
+
+	print_modules();
+	pr_info("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
+	pr_info("SR: %04x  SP: %p  a2: %08lx\n", regs->sr, regs, regs->a2);
+	pr_info("d0: %08lx    d1: %08lx    d2: %08lx    d3: %08lx\n",
+	       regs->d0, regs->d1, regs->d2, regs->d3);
+	pr_info("d4: %08lx    d5: %08lx    a0: %08lx    a1: %08lx\n",
+	       regs->d4, regs->d5, regs->a0, regs->a1);
+
+	pr_info("Process %s (pid: %d, task=%p)\n",
+		current->comm, task_pid_nr(current), current);
+	addr = (unsigned long)&fp->un;
+	pr_info("Frame format=%X ", regs->format);
+	switch (regs->format) {
+	case 0x2:
+		pr_cont("instr addr=%08lx\n", fp->un.fmt2.iaddr);
+		addr += sizeof(fp->un.fmt2);
+		break;
+	case 0x3:
+		pr_cont("eff addr=%08lx\n", fp->un.fmt3.effaddr);
+		addr += sizeof(fp->un.fmt3);
+		break;
+	case 0x4:
+		if (CPU_IS_060)
+			pr_cont("fault addr=%08lx fslw=%08lx\n",
+				fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+		else
+			pr_cont("eff addr=%08lx pc=%08lx\n",
+				fp->un.fmt4.effaddr, fp->un.fmt4.pc);
+		addr += sizeof(fp->un.fmt4);
+		break;
+	case 0x7:
+		pr_cont("eff addr=%08lx ssw=%04x faddr=%08lx\n",
+			fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
+		pr_info("wb 1 stat/addr/data: %04x %08lx %08lx\n",
+			fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
+		pr_info("wb 2 stat/addr/data: %04x %08lx %08lx\n",
+			fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
+		pr_info("wb 3 stat/addr/data: %04x %08lx %08lx\n",
+			fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
+		pr_info("push data: %08lx %08lx %08lx %08lx\n",
+			fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
+			fp->un.fmt7.pd3);
+		addr += sizeof(fp->un.fmt7);
+		break;
+	case 0x9:
+		pr_cont("instr addr=%08lx\n", fp->un.fmt9.iaddr);
+		addr += sizeof(fp->un.fmt9);
+		break;
+	case 0xa:
+		pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+			fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
+			fp->un.fmta.daddr, fp->un.fmta.dobuf);
+		addr += sizeof(fp->un.fmta);
+		break;
+	case 0xb:
+		pr_cont("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
+			fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
+			fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
+		pr_info("baddr=%08lx dibuf=%08lx ver=%x\n",
+			fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
+		addr += sizeof(fp->un.fmtb);
+		break;
+	default:
+		pr_cont("\n");
+	}
+	show_stack(NULL, (unsigned long *)addr);
+
+	pr_info("Code:");
+	set_fs(KERNEL_DS);
+	cp = (u16 *)regs->pc;
+	for (i = -8; i < 16; i++) {
+		if (get_user(c, cp + i) && i >= 0) {
+			pr_cont(" Bad PC value.");
+			break;
+		}
+		if (i)
+			pr_cont(" %04x", c);
+		else
+			pr_cont(" <%04x>", c);
+	}
+	set_fs(old_fs);
+	pr_cont("\n");
+}
+
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+	unsigned long *p;
+	unsigned long *endstack;
+	int i;
+
+	if (!stack) {
+		if (task)
+			stack = (unsigned long *)task->thread.esp0;
+		else
+			stack = (unsigned long *)&stack;
+	}
+	endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
+
+	pr_info("Stack from %08lx:", (unsigned long)stack);
+	p = stack;
+	for (i = 0; i < kstack_depth_to_print; i++) {
+		if (p + 1 > endstack)
+			break;
+		if (i % 8 == 0)
+			pr_cont("\n       ");
+		pr_cont(" %08lx", *p++);
+	}
+	pr_cont("\n");
+	show_trace(stack);
+}
+
+/*
+ * The vector number returned in the frame pointer may also contain
+ * the "fs" (Fault Status) bits on ColdFire. These are in the bottom
+ * 2 bits, and upper 2 bits. So we need to mask out the real vector
+ * number before using it in comparisons. You don't need to do this on
+ * real 68k parts, but it won't hurt either.
+ */
+
+void bad_super_trap (struct frame *fp)
+{
+	int vector = (fp->ptregs.vector >> 2) & 0xff;
+
+	console_verbose();
+	if (vector < ARRAY_SIZE(vec_names))
+		pr_err("*** %s ***   FORMAT=%X\n",
+			vec_names[vector],
+			fp->ptregs.format);
+	else
+		pr_err("*** Exception %d ***   FORMAT=%X\n",
+			vector, fp->ptregs.format);
+	if (vector == VEC_ADDRERR && CPU_IS_020_OR_030) {
+		unsigned short ssw = fp->un.fmtb.ssw;
+
+		pr_err("SSW=%#06x  ", ssw);
+
+		if (ssw & RC)
+			pr_err("Pipe stage C instruction fault at %#010lx\n",
+				(fp->ptregs.format) == 0xA ?
+				fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
+		if (ssw & RB)
+			pr_err("Pipe stage B instruction fault at %#010lx\n",
+				(fp->ptregs.format) == 0xA ?
+				fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
+		if (ssw & DF)
+			pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
+				ssw & RW ? "read" : "write",
+				fp->un.fmtb.daddr, space_names[ssw & DFC],
+				fp->ptregs.pc);
+	}
+	pr_err("Current process id is %d\n", task_pid_nr(current));
+	die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
+}
+
+asmlinkage void trap_c(struct frame *fp)
+{
+	int sig, si_code;
+	void __user *addr;
+	int vector = (fp->ptregs.vector >> 2) & 0xff;
+
+	if (fp->ptregs.sr & PS_S) {
+		if (vector == VEC_TRACE) {
+			/* traced a trapping instruction on a 68020/30,
+			 * real exception will be executed afterwards.
+			 */
+			return;
+		}
+#ifdef CONFIG_MMU
+		if (fixup_exception(&fp->ptregs))
+			return;
+#endif
+		bad_super_trap(fp);
+		return;
+	}
+
+	/* send the appropriate signal to the user program */
+	switch (vector) {
+	    case VEC_ADDRERR:
+		si_code = BUS_ADRALN;
+		sig = SIGBUS;
+		break;
+	    case VEC_ILLEGAL:
+	    case VEC_LINE10:
+	    case VEC_LINE11:
+		si_code = ILL_ILLOPC;
+		sig = SIGILL;
+		break;
+	    case VEC_PRIV:
+		si_code = ILL_PRVOPC;
+		sig = SIGILL;
+		break;
+	    case VEC_COPROC:
+		si_code = ILL_COPROC;
+		sig = SIGILL;
+		break;
+	    case VEC_TRAP1:
+	    case VEC_TRAP2:
+	    case VEC_TRAP3:
+	    case VEC_TRAP4:
+	    case VEC_TRAP5:
+	    case VEC_TRAP6:
+	    case VEC_TRAP7:
+	    case VEC_TRAP8:
+	    case VEC_TRAP9:
+	    case VEC_TRAP10:
+	    case VEC_TRAP11:
+	    case VEC_TRAP12:
+	    case VEC_TRAP13:
+	    case VEC_TRAP14:
+		si_code = ILL_ILLTRP;
+		sig = SIGILL;
+		break;
+	    case VEC_FPBRUC:
+	    case VEC_FPOE:
+	    case VEC_FPNAN:
+		si_code = FPE_FLTINV;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPIR:
+		si_code = FPE_FLTRES;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPDIVZ:
+		si_code = FPE_FLTDIV;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPUNDER:
+		si_code = FPE_FLTUND;
+		sig = SIGFPE;
+		break;
+	    case VEC_FPOVER:
+		si_code = FPE_FLTOVF;
+		sig = SIGFPE;
+		break;
+	    case VEC_ZERODIV:
+		si_code = FPE_INTDIV;
+		sig = SIGFPE;
+		break;
+	    case VEC_CHK:
+	    case VEC_TRAP:
+		si_code = FPE_INTOVF;
+		sig = SIGFPE;
+		break;
+	    case VEC_TRACE:		/* ptrace single step */
+		si_code = TRAP_TRACE;
+		sig = SIGTRAP;
+		break;
+	    case VEC_TRAP15:		/* breakpoint */
+		si_code = TRAP_BRKPT;
+		sig = SIGTRAP;
+		break;
+	    default:
+		si_code = ILL_ILLOPC;
+		sig = SIGILL;
+		break;
+	}
+	switch (fp->ptregs.format) {
+	    default:
+		addr = (void __user *) fp->ptregs.pc;
+		break;
+	    case 2:
+		addr = (void __user *) fp->un.fmt2.iaddr;
+		break;
+	    case 7:
+		addr = (void __user *) fp->un.fmt7.effaddr;
+		break;
+	    case 9:
+		addr = (void __user *) fp->un.fmt9.iaddr;
+		break;
+	    case 10:
+		addr = (void __user *) fp->un.fmta.daddr;
+		break;
+	    case 11:
+		addr = (void __user*) fp->un.fmtb.daddr;
+		break;
+	}
+	force_sig_fault(sig, si_code, addr, current);
+}
+
+void die_if_kernel (char *str, struct pt_regs *fp, int nr)
+{
+	if (!(fp->sr & PS_S))
+		return;
+
+	console_verbose();
+	pr_crit("%s: %08x\n", str, nr);
+	show_registers(fp);
+	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+	do_exit(SIGSEGV);
+}
+
+asmlinkage void set_esp0(unsigned long ssp)
+{
+	current->thread.esp0 = ssp;
+}
+
+/*
+ * This function is called if an error occur while accessing
+ * user-space from the fpsp040 code.
+ */
+asmlinkage void fpsp040_die(void)
+{
+	do_exit(SIGSEGV);
+}
+
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpemu_signal(int signal, int code, void *addr)
+{
+	force_sig_fault(signal, code, addr, current);
+}
+#endif
diff --git a/arch/m68k/kernel/uboot.c b/arch/m68k/kernel/uboot.c
new file mode 100644
index 0000000..b29c3b2
--- /dev/null
+++ b/arch/m68k/kernel/uboot.c
@@ -0,0 +1,106 @@
+/*
+ * uboot.c -- uboot arguments support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/bootmem.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/rtc.h>
+
+#include <asm/setup.h>
+#include <asm/irq.h>
+#include <asm/machdep.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
+
+/*
+ * parse_uboot_commandline
+ *
+ * Copies u-boot commandline arguments and store them in the proper linux
+ * variables.
+ *
+ * Assumes:
+ *	_init_sp global contains the address in the stack pointer when the
+ *	kernel starts (see head.S::_start)
+ *
+ *	U-Boot calling convention:
+ *	(*kernel) (kbd, initrd_start, initrd_end, cmd_start, cmd_end);
+ *
+ *	_init_sp can be parsed as such
+ *
+ *	_init_sp+00 = u-boot cmd after jsr into kernel (skip)
+ *	_init_sp+04 = &kernel board_info (residual data)
+ *	_init_sp+08 = &initrd_start
+ *	_init_sp+12 = &initrd_end
+ *	_init_sp+16 = &cmd_start
+ *	_init_sp+20 = &cmd_end
+ *
+ *	This also assumes that the memory locations pointed to are still
+ *	unmodified. U-boot places them near the end of external SDRAM.
+ *
+ * Argument(s):
+ *	commandp = the linux commandline arg container to fill.
+ *	size     = the sizeof commandp.
+ *
+ * Returns:
+ */
+static void __init parse_uboot_commandline(char *commandp, int size)
+{
+	extern unsigned long _init_sp;
+	unsigned long *sp;
+	unsigned long uboot_kbd;
+	unsigned long uboot_initrd_start, uboot_initrd_end;
+	unsigned long uboot_cmd_start, uboot_cmd_end;
+
+	sp = (unsigned long *)_init_sp;
+	uboot_kbd = sp[1];
+	uboot_initrd_start = sp[2];
+	uboot_initrd_end = sp[3];
+	uboot_cmd_start = sp[4];
+	uboot_cmd_end = sp[5];
+
+	if (uboot_cmd_start && uboot_cmd_end)
+		strncpy(commandp, (const char *)uboot_cmd_start, size);
+#if defined(CONFIG_BLK_DEV_INITRD)
+	if (uboot_initrd_start && uboot_initrd_end &&
+	    (uboot_initrd_end > uboot_initrd_start)) {
+		initrd_start = uboot_initrd_start;
+		initrd_end = uboot_initrd_end;
+		ROOT_DEV = Root_RAM0;
+		pr_info("initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end);
+	}
+#endif /* if defined(CONFIG_BLK_DEV_INITRD) */
+}
+
+__init void process_uboot_commandline(char *commandp, int size)
+{
+	int len, n;
+
+	n = strnlen(commandp, size);
+	commandp += n;
+	len = size - n;
+	if (len) {
+		/* Add the whitespace separator */
+		*commandp++ = ' ';
+		len--;
+	}
+
+	parse_uboot_commandline(commandp, len);
+	commandp[size - 1] = 0;
+}
diff --git a/arch/m68k/kernel/vectors.c b/arch/m68k/kernel/vectors.c
new file mode 100644
index 0000000..322c977
--- /dev/null
+++ b/arch/m68k/kernel/vectors.c
@@ -0,0 +1,144 @@
+/*
+ *  vectors.c
+ *
+ *  Copyright (C) 1993, 1994 by Hamish Macdonald
+ *
+ *  68040 fixes by Michael Rausch
+ *  68040 fixes by Martin Apel
+ *  68040 fixes and writeback by Richard Zidlicky
+ *  68060 fixes by Roman Hodek
+ *  68060 fixes by Jesper Skov
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+/*
+ * Sets up all exception vectors
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+
+#include <asm/setup.h>
+#include <asm/fpu.h>
+#include <asm/traps.h>
+
+/* assembler routines */
+asmlinkage void system_call(void);
+asmlinkage void buserr(void);
+asmlinkage void trap(void);
+asmlinkage void nmihandler(void);
+#ifdef CONFIG_M68KFPU_EMU
+asmlinkage void fpu_emu(void);
+#endif
+
+e_vector vectors[256];
+
+/* nmi handler for the Amiga */
+asm(".text\n"
+    __ALIGN_STR "\n"
+    "nmihandler: rte");
+
+/*
+ * this must be called very early as the kernel might
+ * use some instruction that are emulated on the 060
+ * and so we're prepared for early probe attempts (e.g. nf_init).
+ */
+void __init base_trap_init(void)
+{
+	if (MACH_IS_SUN3X) {
+		extern e_vector *sun3x_prom_vbr;
+
+		__asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
+	}
+
+	/* setup the exception vector table */
+	__asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
+
+	if (CPU_IS_060) {
+		/* set up ISP entry points */
+		asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
+
+		vectors[VEC_UNIMPII] = unimp_vec;
+	}
+
+	vectors[VEC_BUSERR] = buserr;
+	vectors[VEC_ILLEGAL] = trap;
+	vectors[VEC_SYS] = system_call;
+}
+
+void __init trap_init (void)
+{
+	int i;
+
+	for (i = VEC_SPUR; i <= VEC_INT7; i++)
+		vectors[i] = bad_inthandler;
+
+	for (i = 0; i < VEC_USER; i++)
+		if (!vectors[i])
+			vectors[i] = trap;
+
+	for (i = VEC_USER; i < 256; i++)
+		vectors[i] = bad_inthandler;
+
+#ifdef CONFIG_M68KFPU_EMU
+	if (FPU_IS_EMU)
+		vectors[VEC_LINE11] = fpu_emu;
+#endif
+
+	if (CPU_IS_040 && !FPU_IS_EMU) {
+		/* set up FPSP entry points */
+		asmlinkage void dz_vec(void) asm ("dz");
+		asmlinkage void inex_vec(void) asm ("inex");
+		asmlinkage void ovfl_vec(void) asm ("ovfl");
+		asmlinkage void unfl_vec(void) asm ("unfl");
+		asmlinkage void snan_vec(void) asm ("snan");
+		asmlinkage void operr_vec(void) asm ("operr");
+		asmlinkage void bsun_vec(void) asm ("bsun");
+		asmlinkage void fline_vec(void) asm ("fline");
+		asmlinkage void unsupp_vec(void) asm ("unsupp");
+
+		vectors[VEC_FPDIVZ] = dz_vec;
+		vectors[VEC_FPIR] = inex_vec;
+		vectors[VEC_FPOVER] = ovfl_vec;
+		vectors[VEC_FPUNDER] = unfl_vec;
+		vectors[VEC_FPNAN] = snan_vec;
+		vectors[VEC_FPOE] = operr_vec;
+		vectors[VEC_FPBRUC] = bsun_vec;
+		vectors[VEC_LINE11] = fline_vec;
+		vectors[VEC_FPUNSUP] = unsupp_vec;
+	}
+
+	if (CPU_IS_060 && !FPU_IS_EMU) {
+		/* set up IFPSP entry points */
+		asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
+		asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
+		asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
+		asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
+		asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
+		asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
+		asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
+		asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
+		asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
+
+		vectors[VEC_FPNAN] = snan_vec6;
+		vectors[VEC_FPOE] = operr_vec6;
+		vectors[VEC_FPOVER] = ovfl_vec6;
+		vectors[VEC_FPUNDER] = unfl_vec6;
+		vectors[VEC_FPDIVZ] = dz_vec6;
+		vectors[VEC_FPIR] = inex_vec6;
+		vectors[VEC_LINE11] = fline_vec6;
+		vectors[VEC_FPUNSUP] = unsupp_vec6;
+		vectors[VEC_UNIMPEA] = effadd_vec6;
+	}
+
+        /* if running on an amiga, make the NMI interrupt do nothing */
+	if (MACH_IS_AMIGA) {
+		vectors[VEC_INT7] = nmihandler;
+	}
+}
+
diff --git a/arch/m68k/kernel/vmlinux-nommu.lds b/arch/m68k/kernel/vmlinux-nommu.lds
new file mode 100644
index 0000000..cf6edda
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-nommu.lds
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *	vmlinux.lds.S -- master linker script for m68knommu arch
+ *
+ *	(C) Copyright 2002-2012, Greg Ungerer <gerg@snapgear.com>
+ *
+ *	This linker script is equipped to build either ROM loaded or RAM
+ *	run kernels.
+ */
+
+#if defined(CONFIG_RAMKERNEL)
+#define	KTEXT_ADDR	CONFIG_KERNELBASE
+#endif
+#if defined(CONFIG_ROMKERNEL)
+#define	KTEXT_ADDR	CONFIG_ROMSTART
+#define	KDATA_ADDR	CONFIG_KERNELBASE
+#define	LOAD_OFFSET	KDATA_ADDR + (ADDR(.text) + SIZEOF(.text))
+#endif
+
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm-generic/vmlinux.lds.h>
+
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+
+jiffies = jiffies_64 + 4;
+
+SECTIONS {
+
+#ifdef CONFIG_ROMVEC
+	. = CONFIG_ROMVEC;
+	.romvec : {
+		__rom_start = .;
+		_romvec = .;
+		*(.romvec)
+		*(.data..initvect)
+	}
+#endif
+
+	. = KTEXT_ADDR;
+
+	_text = .;
+	_stext = .;
+	.text : {
+		HEAD_TEXT
+		TEXT_TEXT
+		IRQENTRY_TEXT
+		SOFTIRQENTRY_TEXT
+		SCHED_TEXT
+		CPUIDLE_TEXT
+		LOCK_TEXT
+		*(.fixup)
+		. = ALIGN(16);
+	}
+	_etext = .;
+
+#ifdef KDATA_ADDR
+	. = KDATA_ADDR;
+#endif
+
+	_sdata = .;
+	RO_DATA_SECTION(PAGE_SIZE)
+	RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+	_edata = .;
+
+	EXCEPTION_TABLE(16)
+	NOTES
+
+	. = ALIGN(PAGE_SIZE);
+	__init_begin = .;
+	INIT_TEXT_SECTION(PAGE_SIZE)
+	INIT_DATA_SECTION(16)
+	PERCPU_SECTION(16)
+	.m68k_fixup : {
+		__start_fixup = .;
+		*(.m68k_fixup)
+		__stop_fixup = .;
+	}
+	.init.data : {
+		. = ALIGN(PAGE_SIZE);
+		__init_end = .;
+	}
+
+	BSS_SECTION(0, 0, 0)
+
+	_end = .;
+
+	STABS_DEBUG
+	.comment 0 : { *(.comment) }
+
+	/* Sections to be discarded */
+	DISCARDS
+}
+
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds
new file mode 100644
index 0000000..625a578
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-std.lds
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  . = 0x1000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	HEAD_TEXT
+	TEXT_TEXT
+	IRQENTRY_TEXT
+	SOFTIRQENTRY_TEXT
+	SCHED_TEXT
+	CPUIDLE_TEXT
+	LOCK_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} :text = 0x4e75
+
+  _etext = .;			/* End of text section */
+
+  EXCEPTION_TABLE(16)
+
+  _sdata = .;			/* Start of data section */
+
+  RODATA
+
+  RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE)
+
+  BSS_SECTION(0, 0, 0)
+
+  _edata = .;			/* End of data section */
+
+  /* will be freed after init */
+  . = ALIGN(PAGE_SIZE);		/* Init code and data */
+  __init_begin = .;
+  INIT_TEXT_SECTION(PAGE_SIZE) :data
+  INIT_DATA_SECTION(16)
+  .m68k_fixup : {
+	__start_fixup = .;
+	*(.m68k_fixup)
+	__stop_fixup = .;
+  }
+  NOTES
+  .init_end : {
+	/* This ALIGN be in a section so that _end is at the end of the
+	   load segment. */
+	. = ALIGN(PAGE_SIZE);
+	__init_end = .;
+  }
+
+  _end = . ;
+
+  STABS_DEBUG
+  .comment 0 : { *(.comment) }
+
+  /* Sections to be discarded */
+  DISCARDS
+}
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds
new file mode 100644
index 0000000..9868270
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux-sun3.lds
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* ld script to make m68k Linux kernel */
+
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-m68k", "elf32-m68k", "elf32-m68k")
+OUTPUT_ARCH(m68k)
+ENTRY(_start)
+jiffies = jiffies_64 + 4;
+SECTIONS
+{
+  . = 0xE002000;
+  _text = .;			/* Text and read-only data */
+  .text : {
+	HEAD_TEXT
+	TEXT_TEXT
+	IRQENTRY_TEXT
+	SOFTIRQENTRY_TEXT
+	SCHED_TEXT
+	CPUIDLE_TEXT
+	LOCK_TEXT
+	*(.fixup)
+	*(.gnu.warning)
+	} :text = 0x4e75
+	RODATA
+
+  _etext = .;			/* End of text section */
+
+  EXCEPTION_TABLE(16) :data
+  _sdata = .;			/* Start of rw data section */
+  RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) :data
+  /* End of data goes *here* so that freeing init code works properly. */
+  _edata = .;
+  NOTES
+
+  /* will be freed after init */
+  . = ALIGN(PAGE_SIZE);	/* Init code and data */
+__init_begin = .;
+	INIT_TEXT_SECTION(PAGE_SIZE)
+	INIT_DATA_SECTION(16)
+	.m68k_fixup : {
+		__start_fixup = .;
+		*(.m68k_fixup)
+		__stop_fixup = .;
+	}
+	. = ALIGN(PAGE_SIZE);
+	__init_end = .;
+
+  BSS_SECTION(0, 0, 0)
+
+  _end = . ;
+
+  STABS_DEBUG
+
+  /* Sections to be discarded */
+  DISCARDS
+}
diff --git a/arch/m68k/kernel/vmlinux.lds.S b/arch/m68k/kernel/vmlinux.lds.S
new file mode 100644
index 0000000..d3d3c30
--- /dev/null
+++ b/arch/m68k/kernel/vmlinux.lds.S
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
+PHDRS
+{
+  text PT_LOAD FILEHDR PHDRS FLAGS (7);
+  data PT_LOAD FLAGS (7);
+}
+#ifdef CONFIG_SUN3
+#include "vmlinux-sun3.lds"
+#else
+#include "vmlinux-std.lds"
+#endif
+#else
+#include "vmlinux-nommu.lds"
+#endif