v4.19.13 snapshot.
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
new file mode 100644
index 0000000..eb87cd8
--- /dev/null
+++ b/arch/openrisc/include/asm/Kbuild
@@ -0,0 +1,44 @@
+generic-y += barrier.h
+generic-y += bug.h
+generic-y += bugs.h
+generic-y += checksum.h
+generic-y += compat.h
+generic-y += current.h
+generic-y += device.h
+generic-y += div64.h
+generic-y += dma.h
+generic-y += dma-mapping.h
+generic-y += emergency-restart.h
+generic-y += exec.h
+generic-y += extable.h
+generic-y += fb.h
+generic-y += ftrace.h
+generic-y += hardirq.h
+generic-y += hw_irq.h
+generic-y += irq.h
+generic-y += irq_regs.h
+generic-y += irq_work.h
+generic-y += kdebug.h
+generic-y += kmap_types.h
+generic-y += kprobes.h
+generic-y += local.h
+generic-y += mcs_spinlock.h
+generic-y += mm-arch-hooks.h
+generic-y += module.h
+generic-y += pci.h
+generic-y += percpu.h
+generic-y += preempt.h
+generic-y += qspinlock_types.h
+generic-y += qspinlock.h
+generic-y += qrwlock_types.h
+generic-y += qrwlock.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += string.h
+generic-y += switch_to.h
+generic-y += topology.h
+generic-y += trace_clock.h
+generic-y += user.h
+generic-y += vga.h
+generic-y += word-at-a-time.h
+generic-y += xor.h
diff --git a/arch/openrisc/include/asm/asm-offsets.h b/arch/openrisc/include/asm/asm-offsets.h
new file mode 100644
index 0000000..d370ee3
--- /dev/null
+++ b/arch/openrisc/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/openrisc/include/asm/atomic.h b/arch/openrisc/include/asm/atomic.h
new file mode 100644
index 0000000..b589fac
--- /dev/null
+++ b/arch/openrisc/include/asm/atomic.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_ATOMIC_H
+#define __ASM_OPENRISC_ATOMIC_H
+
+#include <linux/types.h>
+
+/* Atomically perform op with v->counter and i */
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	int tmp;							\
+									\
+	__asm__ __volatile__(						\
+		"1:	l.lwa	%0,0(%1)	\n"			\
+		"	l." #op " %0,%0,%2	\n"			\
+		"	l.swa	0(%1),%0	\n"			\
+		"	l.bnf	1b		\n"			\
+		"	 l.nop			\n"			\
+		: "=&r"(tmp)						\
+		: "r"(&v->counter), "r"(i)				\
+		: "cc", "memory");					\
+}
+
+/* Atomically perform op with v->counter and i, return the result */
+#define ATOMIC_OP_RETURN(op)						\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	int tmp;							\
+									\
+	__asm__ __volatile__(						\
+		"1:	l.lwa	%0,0(%1)	\n"			\
+		"	l." #op " %0,%0,%2	\n"			\
+		"	l.swa	0(%1),%0	\n"			\
+		"	l.bnf	1b		\n"			\
+		"	 l.nop			\n"			\
+		: "=&r"(tmp)						\
+		: "r"(&v->counter), "r"(i)				\
+		: "cc", "memory");					\
+									\
+	return tmp;							\
+}
+
+/* Atomically perform op with v->counter and i, return orig v->counter */
+#define ATOMIC_FETCH_OP(op)						\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	int tmp, old;							\
+									\
+	__asm__ __volatile__(						\
+		"1:	l.lwa	%0,0(%2)	\n"			\
+		"	l." #op " %1,%0,%3	\n"			\
+		"	l.swa	0(%2),%1	\n"			\
+		"	l.bnf	1b		\n"			\
+		"	 l.nop			\n"			\
+		: "=&r"(old), "=&r"(tmp)				\
+		: "r"(&v->counter), "r"(i)				\
+		: "cc", "memory");					\
+									\
+	return old;							\
+}
+
+ATOMIC_OP_RETURN(add)
+ATOMIC_OP_RETURN(sub)
+
+ATOMIC_FETCH_OP(add)
+ATOMIC_FETCH_OP(sub)
+ATOMIC_FETCH_OP(and)
+ATOMIC_FETCH_OP(or)
+ATOMIC_FETCH_OP(xor)
+
+ATOMIC_OP(and)
+ATOMIC_OP(or)
+ATOMIC_OP(xor)
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+#undef ATOMIC_OP
+
+#define atomic_add_return	atomic_add_return
+#define atomic_sub_return	atomic_sub_return
+#define atomic_fetch_add	atomic_fetch_add
+#define atomic_fetch_sub	atomic_fetch_sub
+#define atomic_fetch_and	atomic_fetch_and
+#define atomic_fetch_or		atomic_fetch_or
+#define atomic_fetch_xor	atomic_fetch_xor
+#define atomic_and	atomic_and
+#define atomic_or	atomic_or
+#define atomic_xor	atomic_xor
+
+/*
+ * Atomically add a to v->counter as long as v is not already u.
+ * Returns the original value at v->counter.
+ *
+ * This is often used through atomic_inc_not_zero()
+ */
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+	int old, tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa %0, 0(%2)		\n"
+		"	l.sfeq %0, %4		\n"
+		"	l.bf 2f			\n"
+		"	 l.add %1, %0, %3	\n"
+		"	l.swa 0(%2), %1		\n"
+		"	l.bnf 1b		\n"
+		"	 l.nop			\n"
+		"2:				\n"
+		: "=&r"(old), "=&r" (tmp)
+		: "r"(&v->counter), "r"(a), "r"(u)
+		: "cc", "memory");
+
+	return old;
+}
+#define atomic_fetch_add_unless	atomic_fetch_add_unless
+
+#include <asm-generic/atomic.h>
+
+#endif /* __ASM_OPENRISC_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
new file mode 100644
index 0000000..689f568
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops.h
@@ -0,0 +1,53 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_BITOPS_H
+#define __ASM_OPENRISC_BITOPS_H
+
+/*
+ * Where we haven't written assembly versions yet, we fall back to the
+ * generic implementations.  Otherwise, we pull in our (hopefully)
+ * optimized versions.
+ */
+
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+#include <asm/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm/bitops/fls.h>
+#include <asm/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/arch/openrisc/include/asm/bitops/__ffs.h b/arch/openrisc/include/asm/bitops/__ffs.h
new file mode 100644
index 0000000..6c8368a
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__ffs.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FFS_H
+#define __ASM_OPENRISC___FFS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline unsigned long __ffs(unsigned long x)
+{
+	int ret;
+
+	__asm__ ("l.ff1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/__fls.h b/arch/openrisc/include/asm/bitops/__fls.h
new file mode 100644
index 0000000..c4ecdb4
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/__fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC___FLS_H
+#define __ASM_OPENRISC___FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline unsigned long __fls(unsigned long x)
+{
+	int ret;
+
+	__asm__ ("l.fl1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret-1;
+}
+
+#else
+#include <asm-generic/bitops/__fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC___FLS_H */
diff --git a/arch/openrisc/include/asm/bitops/atomic.h b/arch/openrisc/include/asm/bitops/atomic.h
new file mode 100644
index 0000000..35fb85f
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/atomic.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_BITOPS_ATOMIC_H
+#define __ASM_OPENRISC_BITOPS_ATOMIC_H
+
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%1)	\n"
+		"	l.or	%0,%0,%2	\n"
+		"	l.swa	0(%1),%0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(tmp)
+		: "r"(p), "r"(mask)
+		: "cc", "memory");
+}
+
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%1)	\n"
+		"	l.and	%0,%0,%2	\n"
+		"	l.swa	0(%1),%0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(tmp)
+		: "r"(p), "r"(~mask)
+		: "cc", "memory");
+}
+
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%1)	\n"
+		"	l.xor	%0,%0,%2	\n"
+		"	l.swa	0(%1),%0	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(tmp)
+		: "r"(p), "r"(mask)
+		: "cc", "memory");
+}
+
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%2)	\n"
+		"	l.or	%1,%0,%3	\n"
+		"	l.swa	0(%2),%1	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(old), "=&r"(tmp)
+		: "r"(p), "r"(mask)
+		: "cc", "memory");
+
+	return (old & mask) != 0;
+}
+
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%2)	\n"
+		"	l.and	%1,%0,%3	\n"
+		"	l.swa	0(%2),%1	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(old), "=&r"(tmp)
+		: "r"(p), "r"(~mask)
+		: "cc", "memory");
+
+	return (old & mask) != 0;
+}
+
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+	unsigned long mask = BIT_MASK(nr);
+	unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+	unsigned long old;
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+		"1:	l.lwa	%0,0(%2)	\n"
+		"	l.xor	%1,%0,%3	\n"
+		"	l.swa	0(%2),%1	\n"
+		"	l.bnf	1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(old), "=&r"(tmp)
+		: "r"(p), "r"(mask)
+		: "cc", "memory");
+
+	return (old & mask) != 0;
+}
+
+#endif /* __ASM_OPENRISC_BITOPS_ATOMIC_H */
diff --git a/arch/openrisc/include/asm/bitops/ffs.h b/arch/openrisc/include/asm/bitops/ffs.h
new file mode 100644
index 0000000..9de4624
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/ffs.h
@@ -0,0 +1,32 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FFS_H
+#define __ASM_OPENRISC_FFS_H
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FF1
+
+static inline int ffs(int x)
+{
+	int ret;
+
+	__asm__ ("l.ff1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret;
+}
+
+#else
+#include <asm-generic/bitops/ffs.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FFS_H */
diff --git a/arch/openrisc/include/asm/bitops/fls.h b/arch/openrisc/include/asm/bitops/fls.h
new file mode 100644
index 0000000..9efbf9a
--- /dev/null
+++ b/arch/openrisc/include/asm/bitops/fls.h
@@ -0,0 +1,33 @@
+/*
+ * OpenRISC Linux
+ *
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FLS_H
+#define __ASM_OPENRISC_FLS_H
+
+
+#ifdef CONFIG_OPENRISC_HAVE_INST_FL1
+
+static inline int fls(int x)
+{
+	int ret;
+
+	__asm__ ("l.fl1 %0,%1"
+		 : "=r" (ret)
+		 : "r" (x));
+
+	return ret;
+}
+
+#else
+#include <asm-generic/bitops/fls.h>
+#endif
+
+#endif /* __ASM_OPENRISC_FLS_H */
diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
new file mode 100644
index 0000000..5f55da9
--- /dev/null
+++ b/arch/openrisc/include/asm/cache.h
@@ -0,0 +1,31 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CACHE_H
+#define __ASM_OPENRISC_CACHE_H
+
+/* FIXME: How can we replace these with values from the CPU...
+ * they shouldn't be hard-coded!
+ */
+
+#define __ro_after_init __read_mostly
+
+#define L1_CACHE_BYTES 16
+#define L1_CACHE_SHIFT 4
+
+#endif /* __ASM_OPENRISC_CACHE_H */
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
new file mode 100644
index 0000000..70f46fd
--- /dev/null
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -0,0 +1,96 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) Jan Henrik Weinstock <jan.weinstock@rwth-aachen.de>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_CACHEFLUSH_H
+#define __ASM_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+/*
+ * Helper function for flushing or invalidating entire pages from data
+ * and instruction caches. SMP needs a little extra work, since we need
+ * to flush the pages on all cpus.
+ */
+extern void local_dcache_page_flush(struct page *page);
+extern void local_icache_page_inv(struct page *page);
+
+/*
+ * Data cache flushing always happen on the local cpu. Instruction cache
+ * invalidations need to be broadcasted to all other cpu in the system in
+ * case of SMP configurations.
+ */
+#ifndef CONFIG_SMP
+#define dcache_page_flush(page)      local_dcache_page_flush(page)
+#define icache_page_inv(page)        local_icache_page_inv(page)
+#else  /* CONFIG_SMP */
+#define dcache_page_flush(page)      local_dcache_page_flush(page)
+#define icache_page_inv(page)        smp_icache_page_inv(page)
+extern void smp_icache_page_inv(struct page *page);
+#endif /* CONFIG_SMP */
+
+/*
+ * Synchronizes caches. Whenever a cpu writes executable code to memory, this
+ * should be called to make sure the processor sees the newly written code.
+ */
+static inline void sync_icache_dcache(struct page *page)
+{
+	if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))
+		dcache_page_flush(page);
+	icache_page_inv(page);
+}
+
+/*
+ * Pages with this bit set need not be flushed/invalidated, since
+ * they have not changed since last flush. New pages start with
+ * PG_arch_1 not set and are therefore dirty by default.
+ */
+#define PG_dc_clean                  PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+	clear_bit(PG_dc_clean, &page->flags);
+}
+
+/*
+ * Other interfaces are not required since we do not have virtually
+ * indexed or tagged caches. So we can use the default here.
+ */
+#define flush_cache_all()				do { } while (0)
+#define flush_cache_mm(mm)				do { } while (0)
+#define flush_cache_dup_mm(mm)				do { } while (0)
+#define flush_cache_range(vma, start, end)		do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)		do { } while (0)
+#define flush_dcache_mmap_lock(mapping)			do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)		do { } while (0)
+#define flush_icache_range(start, end)			do { } while (0)
+#define flush_icache_page(vma, pg)			do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
+#define flush_cache_vmap(start, end)			do { } while (0)
+#define flush_cache_vunmap(start, end)			do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len)           \
+	do {                                                         \
+		memcpy(dst, src, len);                               \
+		if (vma->vm_flags & VM_EXEC)                         \
+			sync_icache_dcache(page);                    \
+	} while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len)         \
+	memcpy(dst, src, len)
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h
new file mode 100644
index 0000000..f9cd43a
--- /dev/null
+++ b/arch/openrisc/include/asm/cmpxchg.h
@@ -0,0 +1,171 @@
+/*
+ * 1,2 and 4 byte cmpxchg and xchg implementations for OpenRISC.
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * Note:
+ * The portable implementations of 1 and 2 byte xchg and cmpxchg using a 4
+ * byte cmpxchg is sourced heavily from the sh and mips implementations.
+ */
+
+#ifndef __ASM_OPENRISC_CMPXCHG_H
+#define __ASM_OPENRISC_CMPXCHG_H
+
+#include  <linux/bits.h>
+#include  <linux/compiler.h>
+#include  <linux/types.h>
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+static inline unsigned long cmpxchg_u32(volatile void *ptr,
+		unsigned long old, unsigned long new)
+{
+	__asm__ __volatile__(
+		"1:	l.lwa %0, 0(%1)		\n"
+		"	l.sfeq %0, %2		\n"
+		"	l.bnf 2f		\n"
+		"	 l.nop			\n"
+		"	l.swa 0(%1), %3		\n"
+		"	l.bnf 1b		\n"
+		"	 l.nop			\n"
+		"2:				\n"
+		: "=&r"(old)
+		: "r"(ptr), "r"(old), "r"(new)
+		: "cc", "memory");
+
+	return old;
+}
+
+static inline unsigned long xchg_u32(volatile void *ptr,
+		unsigned long val)
+{
+	__asm__ __volatile__(
+		"1:	l.lwa %0, 0(%1)		\n"
+		"	l.swa 0(%1), %2		\n"
+		"	l.bnf 1b		\n"
+		"	 l.nop			\n"
+		: "=&r"(val)
+		: "r"(ptr), "r"(val)
+		: "cc", "memory");
+
+	return val;
+}
+
+static inline u32 cmpxchg_small(volatile void *ptr, u32 old, u32 new,
+				int size)
+{
+	int off = (unsigned long)ptr % sizeof(u32);
+	volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+	int bitoff = off * BITS_PER_BYTE;
+#endif
+	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+	u32 load32, old32, new32;
+	u32 ret;
+
+	load32 = READ_ONCE(*p);
+
+	while (true) {
+		ret = (load32 & bitmask) >> bitoff;
+		if (old != ret)
+			return ret;
+
+		old32 = (load32 & ~bitmask) | (old << bitoff);
+		new32 = (load32 & ~bitmask) | (new << bitoff);
+
+		/* Do 32 bit cmpxchg */
+		load32 = cmpxchg_u32(p, old32, new32);
+		if (load32 == old32)
+			return old;
+	}
+}
+
+/* xchg */
+
+static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
+{
+	int off = (unsigned long)ptr % sizeof(u32);
+	volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+	int bitoff = off * BITS_PER_BYTE;
+#endif
+	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+	u32 oldv, newv;
+	u32 ret;
+
+	do {
+		oldv = READ_ONCE(*p);
+		ret = (oldv & bitmask) >> bitoff;
+		newv = (oldv & ~bitmask) | (x << bitoff);
+	} while (cmpxchg_u32(p, oldv, newv) != oldv);
+
+	return ret;
+}
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern unsigned long __cmpxchg_called_with_bad_pointer(void)
+	__compiletime_error("Bad argument size for cmpxchg");
+
+static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+		unsigned long new, int size)
+{
+	switch (size) {
+	case 1:
+	case 2:
+		return cmpxchg_small(ptr, old, new, size);
+	case 4:
+		return cmpxchg_u32(ptr, old, new);
+	default:
+		return __cmpxchg_called_with_bad_pointer();
+	}
+}
+
+#define cmpxchg(ptr, o, n)						\
+	({								\
+		(__typeof__(*(ptr))) __cmpxchg((ptr),			\
+					       (unsigned long)(o),	\
+					       (unsigned long)(n),	\
+					       sizeof(*(ptr)));		\
+	})
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalidly-sized xchg().
+ */
+extern unsigned long __xchg_called_with_bad_pointer(void)
+	__compiletime_error("Bad argument size for xchg");
+
+static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
+		int size)
+{
+	switch (size) {
+	case 1:
+	case 2:
+		return xchg_small(ptr, with, size);
+	case 4:
+		return xchg_u32(ptr, with);
+	default:
+		return __xchg_called_with_bad_pointer();
+	}
+}
+
+#define xchg(ptr, with) 						\
+	({								\
+		(__typeof__(*(ptr))) __xchg((ptr),			\
+					    (unsigned long)(with),	\
+					    sizeof(*(ptr)));		\
+	})
+
+#endif /* __ASM_OPENRISC_CMPXCHG_H */
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
new file mode 100644
index 0000000..4ea0a33
--- /dev/null
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -0,0 +1,39 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_CPUINFO_H
+#define __ASM_OPENRISC_CPUINFO_H
+
+struct cpuinfo_or1k {
+	u32 clock_frequency;
+
+	u32 icache_size;
+	u32 icache_block_size;
+	u32 icache_ways;
+
+	u32 dcache_size;
+	u32 dcache_block_size;
+	u32 dcache_ways;
+
+	u16 coreid;
+};
+
+extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
+extern void setup_cpuinfo(void);
+
+#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/delay.h b/arch/openrisc/include/asm/delay.h
new file mode 100644
index 0000000..17f8bf5
--- /dev/null
+++ b/arch/openrisc/include/asm/delay.h
@@ -0,0 +1,24 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_DELAY_H
+#define __ASM_OPENRISC_DELAY_H
+
+#include <asm-generic/delay.h>
+
+extern unsigned long loops_per_jiffy;
+
+#endif
diff --git a/arch/openrisc/include/asm/elf.h b/arch/openrisc/include/asm/elf.h
new file mode 100644
index 0000000..d334e20
--- /dev/null
+++ b/arch/openrisc/include/asm/elf.h
@@ -0,0 +1,65 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_OPENRISC_ELF_H
+#define __ASM_OPENRISC_ELF_H
+
+
+#include <linux/types.h>
+#include <uapi/asm/elf.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+
+#define elf_check_arch(x) \
+	(((x)->e_machine == EM_OR32) || ((x)->e_machine == EM_OPENRISC))
+
+/* This is the location that an ET_DYN program is loaded if exec'ed.  Typical
+   use of this is to invoke "./ld.so someprog" to test out a new version of
+   the loader.  We need to make sure that it is out of the way of the program
+   that it will "exec", and that there is sufficient room for the brk.  */
+
+#define ELF_ET_DYN_BASE         (0x08000000)
+
+/*
+ * Enable dump using regset.
+ * This covers all of general/DSP/FPU regs.
+ */
+#define CORE_DUMP_USE_REGSET
+
+#define ELF_EXEC_PAGESIZE	8192
+
+extern void dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt);
+#define ELF_CORE_COPY_REGS(dest, regs) dump_elf_thread(dest, regs);
+
+/* This yields a mask that user programs can use to figure out what
+   instruction set this cpu supports.  This could be done in userspace,
+   but it's not easy, and we've already done it here.  */
+
+#define ELF_HWCAP	(0)
+
+/* This yields a string that ld.so will use to load implementation
+   specific libraries for optimization.  This is more specific in
+   intent than poking at uname or /proc/cpuinfo.
+
+   For the moment, we have only optimizations for the Intel generations,
+   but that could change... */
+
+#define ELF_PLATFORM	(NULL)
+
+#endif
diff --git a/arch/openrisc/include/asm/fixmap.h b/arch/openrisc/include/asm/fixmap.h
new file mode 100644
index 0000000..5a01595
--- /dev/null
+++ b/arch/openrisc/include/asm/fixmap.h
@@ -0,0 +1,88 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_FIXMAP_H
+#define __ASM_OPENRISC_FIXMAP_H
+
+/* Why exactly do we need 2 empty pages between the top of the fixed
+ * addresses and the top of virtual memory?  Something is using that
+ * memory space but not sure what right now... If you find it, leave
+ * a comment here.
+ */
+#define FIXADDR_TOP	((unsigned long) (-2*PAGE_SIZE))
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <asm/page.h>
+
+/*
+ * On OpenRISC we use these special fixed_addresses for doing ioremap
+ * early in the boot process before memory initialization is complete.
+ * This is used, in particular, by the early serial console code.
+ *
+ * It's not really 'fixmap', per se, but fits loosely into the same
+ * paradigm.
+ */
+enum fixed_addresses {
+	/*
+	 * FIX_IOREMAP entries are useful for mapping physical address
+	 * space before ioremap() is useable, e.g. really early in boot
+	 * before kmalloc() is working.
+	 */
+#define FIX_N_IOREMAPS  32
+	FIX_IOREMAP_BEGIN,
+	FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS - 1,
+	__end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE		(__end_of_fixed_addresses << PAGE_SHIFT)
+/* FIXADDR_BOTTOM might be a better name here... */
+#define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without tranlation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+	/*
+	 * this branch gets completely eliminated after inlining,
+	 * except when someone tries to use fixaddr indices in an
+	 * illegal way. (such as mixing up address types or using
+	 * out-of-range indices).
+	 *
+	 * If it doesn't get removed, the linker will complain
+	 * loudly with a reasonably clear error message..
+	 */
+	if (idx >= __end_of_fixed_addresses)
+		BUG();
+
+	return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+	BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+	return __virt_to_fix(vaddr);
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/futex.h b/arch/openrisc/include/asm/futex.h
new file mode 100644
index 0000000..618da4a
--- /dev/null
+++ b/arch/openrisc/include/asm/futex.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_OPENRISC_FUTEX_H
+#define __ASM_OPENRISC_FUTEX_H
+
+#ifdef __KERNEL__
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+({								\
+	__asm__ __volatile__ (					\
+		"1:	l.lwa	%0, %2			\n"	\
+			insn				"\n"	\
+		"2:	l.swa	%2, %1			\n"	\
+		"	l.bnf	1b			\n"	\
+		"	 l.ori	%1, r0, 0		\n"	\
+		"3:					\n"	\
+		".section .fixup,\"ax\"			\n"	\
+		"4:	l.j	3b			\n"	\
+		"	 l.addi	%1, r0, %3		\n"	\
+		".previous				\n"	\
+		".section __ex_table,\"a\"		\n"	\
+		".word	1b,4b,2b,4b			\n"	\
+		".previous				\n"	\
+		: "=&r" (oldval), "=&r" (ret), "+m" (*uaddr)	\
+		: "i" (-EFAULT), "r" (oparg)			\
+		: "cc", "memory"				\
+		);						\
+})
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+	int oldval = 0, ret;
+
+	pagefault_disable();
+
+	switch (op) {
+	case FUTEX_OP_SET:
+		__futex_atomic_op("l.or %1,%4,%4", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ADD:
+		__futex_atomic_op("l.add %1,%0,%4", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_OR:
+		__futex_atomic_op("l.or %1,%0,%4", ret, oldval, uaddr, oparg);
+		break;
+	case FUTEX_OP_ANDN:
+		__futex_atomic_op("l.and %1,%0,%4", ret, oldval, uaddr, ~oparg);
+		break;
+	case FUTEX_OP_XOR:
+		__futex_atomic_op("l.xor %1,%0,%4", ret, oldval, uaddr, oparg);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	pagefault_enable();
+
+	if (!ret)
+		*oval = oldval;
+
+	return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+			      u32 oldval, u32 newval)
+{
+	int ret = 0;
+	u32 prev;
+
+	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+		return -EFAULT;
+
+	__asm__ __volatile__ (				\
+		"1:	l.lwa	%1, %2		\n"	\
+		"	l.sfeq	%1, %3		\n"	\
+		"	l.bnf	3f		\n"	\
+		"	 l.nop			\n"	\
+		"2:	l.swa	%2, %4		\n"	\
+		"	l.bnf	1b		\n"	\
+		"	 l.nop			\n"	\
+		"3:				\n"	\
+		".section .fixup,\"ax\"		\n"	\
+		"4:	l.j	3b		\n"	\
+		"	 l.addi	%0, r0, %5	\n"	\
+		".previous			\n"	\
+		".section __ex_table,\"a\"	\n"	\
+		".word	1b,4b,2b,4b		\n"	\
+		".previous			\n"	\
+		: "+r" (ret), "=&r" (prev), "+m" (*uaddr) \
+		: "r" (oldval), "r" (newval), "i" (-EFAULT) \
+		: "cc",	"memory"			\
+		);
+
+	*uval = prev;
+	return ret;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_FUTEX_H */
diff --git a/arch/openrisc/include/asm/io.h b/arch/openrisc/include/asm/io.h
new file mode 100644
index 0000000..6709b28
--- /dev/null
+++ b/arch/openrisc/include/asm/io.h
@@ -0,0 +1,53 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IO_H
+#define __ASM_OPENRISC_IO_H
+
+/*
+ * PCI: can we really do 0 here if we have no port IO?
+ */
+#define IO_SPACE_LIMIT		0
+
+/* OpenRISC has no port IO */
+#define HAVE_ARCH_PIO_SIZE	1
+#define PIO_RESERVED		0X0UL
+#define PIO_OFFSET		0
+#define PIO_MASK		0
+
+#define ioremap_nocache ioremap_nocache
+#include <asm-generic/io.h>
+#include <asm/pgtable.h>
+
+extern void __iomem *__ioremap(phys_addr_t offset, unsigned long size,
+				pgprot_t prot);
+
+static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
+{
+	return __ioremap(offset, size, PAGE_KERNEL);
+}
+
+/* #define _PAGE_CI       0x002 */
+static inline void __iomem *ioremap_nocache(phys_addr_t offset,
+					     unsigned long size)
+{
+	return __ioremap(offset, size,
+			 __pgprot(pgprot_val(PAGE_KERNEL) | _PAGE_CI));
+}
+
+extern void iounmap(void *addr);
+#endif
diff --git a/arch/openrisc/include/asm/irq.h b/arch/openrisc/include/asm/irq.h
new file mode 100644
index 0000000..eb612b1
--- /dev/null
+++ b/arch/openrisc/include/asm/irq.h
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_IRQ_H__
+#define __ASM_OPENRISC_IRQ_H__
+
+#define	NR_IRQS		32
+#include <asm-generic/irq.h>
+
+#define NO_IRQ		(-1)
+
+#endif /* __ASM_OPENRISC_IRQ_H__ */
diff --git a/arch/openrisc/include/asm/irqflags.h b/arch/openrisc/include/asm/irqflags.h
new file mode 100644
index 0000000..dc86c65
--- /dev/null
+++ b/arch/openrisc/include/asm/irqflags.h
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef ___ASM_OPENRISC_IRQFLAGS_H
+#define ___ASM_OPENRISC_IRQFLAGS_H
+
+#include <asm/spr_defs.h>
+
+#define ARCH_IRQ_DISABLED        0x00
+#define ARCH_IRQ_ENABLED         (SPR_SR_IEE|SPR_SR_TEE)
+
+#include <asm-generic/irqflags.h>
+
+#endif /* ___ASM_OPENRISC_IRQFLAGS_H */
diff --git a/arch/openrisc/include/asm/linkage.h b/arch/openrisc/include/asm/linkage.h
new file mode 100644
index 0000000..e263875
--- /dev/null
+++ b/arch/openrisc/include/asm/linkage.h
@@ -0,0 +1,25 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_LINKAGE_H
+#define __ASM_OPENRISC_LINKAGE_H
+
+#define __ALIGN      .align 0
+#define __ALIGN_STR ".align 0"
+
+#endif /* __ASM_OPENRISC_LINKAGE_H */
diff --git a/arch/openrisc/include/asm/mmu.h b/arch/openrisc/include/asm/mmu.h
new file mode 100644
index 0000000..d069bc2
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu.h
@@ -0,0 +1,26 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_H
+#define __ASM_OPENRISC_MMU_H
+
+#ifndef __ASSEMBLY__
+typedef unsigned long mm_context_t;
+#endif
+
+#endif
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
new file mode 100644
index 0000000..c380d8c
--- /dev/null
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -0,0 +1,43 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_MMU_CONTEXT_H
+#define __ASM_OPENRISC_MMU_CONTEXT_H
+
+#include <asm-generic/mm_hooks.h>
+
+extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+extern void destroy_context(struct mm_struct *mm);
+extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+		      struct task_struct *tsk);
+
+#define deactivate_mm(tsk, mm)	do { } while (0)
+
+#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
+
+/* current active pgd - this is similar to other processors pgd
+ * registers like cr3 on the i386
+ */
+
+extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
+
+static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
+{
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/page.h b/arch/openrisc/include/asm/page.h
new file mode 100644
index 0000000..35bcb7c
--- /dev/null
+++ b/arch/openrisc/include/asm/page.h
@@ -0,0 +1,101 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PAGE_H
+#define __ASM_OPENRISC_PAGE_H
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT      13
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE       (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE       (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK       (~(PAGE_SIZE-1))
+
+#define PAGE_OFFSET	0xc0000000
+#define KERNELBASE	PAGE_OFFSET
+
+/* This is not necessarily the right place for this, but it's needed by
+ * drivers/of/fdt.c
+ */
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define clear_page(page)	memset((page), 0, PAGE_SIZE)
+#define copy_page(to, from)	memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg)        clear_page(page)
+#define copy_user_page(to, from, vaddr, pg)     copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+	unsigned long pte;
+} pte_t;
+typedef struct {
+	unsigned long pgd;
+} pgd_t;
+typedef struct {
+	unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x)	((x).pte)
+#define pgd_val(x)	((x).pgd)
+#define pgprot_val(x)	((x).pgprot)
+
+#define __pte(x)	((pte_t) { (x) })
+#define __pgd(x)	((pgd_t) { (x) })
+#define __pgprot(x)	((pgprot_t) { (x) })
+
+#endif /* !__ASSEMBLY__ */
+
+
+#ifndef __ASSEMBLY__
+
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+
+#define virt_to_pfn(kaddr)      (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn)        __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) \
+	(mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
+
+#define page_to_phys(page)      ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+
+#define pfn_valid(pfn)          ((pfn) < max_mapnr)
+
+#define virt_addr_valid(kaddr)	(pfn_valid(virt_to_pfn(kaddr)))
+
+#endif /* __ASSEMBLY__ */
+
+
+#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
+				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* __ASM_OPENRISC_PAGE_H */
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
new file mode 100644
index 0000000..8999b92
--- /dev/null
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -0,0 +1,111 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PGALLOC_H
+#define __ASM_OPENRISC_PGALLOC_H
+
+#include <asm/page.h>
+#include <linux/threads.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+
+extern int mem_init_done;
+
+#define pmd_populate_kernel(mm, pmd, pte) \
+	set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
+				struct page *pte)
+{
+	set_pmd(pmd, __pmd(_KERNPG_TABLE +
+		     ((unsigned long)page_to_pfn(pte) <<
+		     (unsigned long) PAGE_SHIFT)));
+}
+
+/*
+ * Allocate and free page tables.
+ */
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
+
+	if (ret) {
+		memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+		memcpy(ret + USER_PTRS_PER_PGD,
+		       swapper_pg_dir + USER_PTRS_PER_PGD,
+		       (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+
+	}
+	return ret;
+}
+
+#if 0
+/* FIXME: This seems to be the preferred style, but we are using
+ * current_pgd (from mm->pgd) to load kernel pages so we need it
+ * initialized.  This needs to be looked into.
+ */
+extern inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+	return (pgd_t *)get_zeroed_page(GFP_KERNEL);
+}
+#endif
+
+static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+{
+	free_page((unsigned long)pgd);
+}
+
+extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
+
+static inline struct page *pte_alloc_one(struct mm_struct *mm,
+					 unsigned long address)
+{
+	struct page *pte;
+	pte = alloc_pages(GFP_KERNEL, 0);
+	if (!pte)
+		return NULL;
+	clear_page(page_address(pte));
+	if (!pgtable_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
+	return pte;
+}
+
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+static inline void pte_free(struct mm_struct *mm, struct page *pte)
+{
+	pgtable_page_dtor(pte);
+	__free_page(pte);
+}
+
+#define __pte_free_tlb(tlb, pte, addr)	\
+do {					\
+	pgtable_page_dtor(pte);		\
+	tlb_remove_page((tlb), (pte));	\
+} while (0)
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+#define check_pgt_cache()          do { } while (0)
+
+#endif
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
new file mode 100644
index 0000000..21c7130
--- /dev/null
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -0,0 +1,458 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/* or32 pgtable.h - macros and functions to manipulate page tables
+ *
+ * Based on:
+ * include/asm-cris/pgtable.h
+ */
+
+#ifndef __ASM_OPENRISC_PGTABLE_H
+#define __ASM_OPENRISC_PGTABLE_H
+
+#define __ARCH_USE_5LEVEL_HACK
+#include <asm-generic/pgtable-nopmd.h>
+
+#ifndef __ASSEMBLY__
+#include <asm/mmu.h>
+#include <asm/fixmap.h>
+
+/*
+ * The Linux memory management assumes a three-level page table setup. On
+ * or32, we use that, but "fold" the mid level into the top-level page
+ * table. Since the MMU TLB is software loaded through an interrupt, it
+ * supports any page table structure, so we could have used a three-level
+ * setup, but for the amounts of memory we normally use, a two-level is
+ * probably more efficient.
+ *
+ * This file contains the functions and defines necessary to modify and use
+ * the or32 page table tree.
+ */
+
+extern void paging_init(void);
+
+/* Certain architectures need to do special things when pte's
+ * within a page table are directly modified.  Thus, the following
+ * hook is made available.
+ */
+#define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
+#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
+/*
+ * (pmds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
+
+#define PGDIR_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT-2))
+#define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
+#define PGDIR_MASK	(~(PGDIR_SIZE-1))
+
+/*
+ * entries per page directory level: we use a two-level, so
+ * we don't really have any PMD directory physically.
+ * pointers are 4 bytes so we can use the page size and
+ * divide it by 4 (shift by 2).
+ */
+#define PTRS_PER_PTE	(1UL << (PAGE_SHIFT-2))
+
+#define PTRS_PER_PGD	(1UL << (32-PGDIR_SHIFT))
+
+/* calculate how many PGD entries a user-level program can use
+ * the first mappable virtual address is 0
+ * (TASK_SIZE is the maximum virtual address space)
+ */
+
+#define USER_PTRS_PER_PGD       (TASK_SIZE/PGDIR_SIZE)
+#define FIRST_USER_ADDRESS      0UL
+
+/*
+ * Kernels own virtual memory area.
+ */
+
+/*
+ * The size and location of the vmalloc area are chosen so that modules
+ * placed in this area aren't more than a 28-bit signed offset from any
+ * kernel functions that they may need.  This greatly simplifies handling
+ * of the relocations for l.j and l.jal instructions as we don't need to
+ * introduce any trampolines for reaching "distant" code.
+ *
+ * 64 MB of vmalloc area is comparable to what's available on other arches.
+ */
+
+#define VMALLOC_START	(PAGE_OFFSET-0x04000000UL)
+#define VMALLOC_END	(PAGE_OFFSET)
+#define VMALLOC_VMADDR(x) ((unsigned long)(x))
+
+/* Define some higher level generic page attributes.
+ *
+ * If you change _PAGE_CI definition be sure to change it in
+ * io.h for ioremap_nocache() too.
+ */
+
+/*
+ * An OR32 PTE looks like this:
+ *
+ * |  31 ... 10 |  9  |  8 ... 6  |  5  |  4  |  3  |  2  |  1  |  0  |
+ *  Phys pg.num    L     PP Index    D     A    WOM   WBC   CI    CC
+ *
+ *  L  : link
+ *  PPI: Page protection index
+ *  D  : Dirty
+ *  A  : Accessed
+ *  WOM: Weakly ordered memory
+ *  WBC: Write-back cache
+ *  CI : Cache inhibit
+ *  CC : Cache coherent
+ *
+ * The protection bits below should correspond to the layout of the actual
+ * PTE as per above
+ */
+
+#define _PAGE_CC       0x001 /* software: pte contains a translation */
+#define _PAGE_CI       0x002 /* cache inhibit          */
+#define _PAGE_WBC      0x004 /* write back cache       */
+#define _PAGE_WOM      0x008 /* weakly ordered memory  */
+
+#define _PAGE_A        0x010 /* accessed               */
+#define _PAGE_D        0x020 /* dirty                  */
+#define _PAGE_URE      0x040 /* user read enable       */
+#define _PAGE_UWE      0x080 /* user write enable      */
+
+#define _PAGE_SRE      0x100 /* superuser read enable  */
+#define _PAGE_SWE      0x200 /* superuser write enable */
+#define _PAGE_EXEC     0x400 /* software: page is executable */
+#define _PAGE_U_SHARED 0x800 /* software: page is shared in user space */
+
+/* 0x001 is cache coherency bit, which should always be set to
+ *       1 - for SMP (when we support it)
+ *       0 - otherwise
+ *
+ * we just reuse this bit in software for _PAGE_PRESENT and
+ * force it to 0 when loading it into TLB.
+ */
+#define _PAGE_PRESENT  _PAGE_CC
+#define _PAGE_USER     _PAGE_URE
+#define _PAGE_WRITE    (_PAGE_UWE | _PAGE_SWE)
+#define _PAGE_DIRTY    _PAGE_D
+#define _PAGE_ACCESSED _PAGE_A
+#define _PAGE_NO_CACHE _PAGE_CI
+#define _PAGE_SHARED   _PAGE_U_SHARED
+#define _PAGE_READ     (_PAGE_URE | _PAGE_SRE)
+
+#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _PAGE_BASE     (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _PAGE_ALL      (_PAGE_PRESENT | _PAGE_ACCESSED)
+#define _KERNPG_TABLE \
+	(_PAGE_BASE | _PAGE_SRE | _PAGE_SWE | _PAGE_ACCESSED | _PAGE_DIRTY)
+
+#define PAGE_NONE       __pgprot(_PAGE_ALL)
+#define PAGE_READONLY   __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_READONLY_X __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+#define PAGE_SHARED \
+	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+		 | _PAGE_SHARED)
+#define PAGE_SHARED_X \
+	__pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_UWE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_EXEC)
+#define PAGE_COPY       __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE)
+#define PAGE_COPY_X     __pgprot(_PAGE_ALL | _PAGE_URE | _PAGE_SRE | _PAGE_EXEC)
+
+#define PAGE_KERNEL \
+	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_RO \
+	__pgprot(_PAGE_ALL | _PAGE_SRE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC)
+#define PAGE_KERNEL_NOCACHE \
+	__pgprot(_PAGE_ALL | _PAGE_SRE | _PAGE_SWE \
+		 | _PAGE_SHARED | _PAGE_DIRTY | _PAGE_EXEC | _PAGE_CI)
+
+#define __P000	PAGE_NONE
+#define __P001	PAGE_READONLY_X
+#define __P010	PAGE_COPY
+#define __P011	PAGE_COPY_X
+#define __P100	PAGE_READONLY
+#define __P101	PAGE_READONLY_X
+#define __P110	PAGE_COPY
+#define __P111	PAGE_COPY_X
+
+#define __S000	PAGE_NONE
+#define __S001	PAGE_READONLY_X
+#define __S010	PAGE_SHARED
+#define __S011	PAGE_SHARED_X
+#define __S100	PAGE_READONLY
+#define __S101	PAGE_READONLY_X
+#define __S110	PAGE_SHARED
+#define __S111	PAGE_SHARED_X
+
+/* zero page used for uninitialized stuff */
+extern unsigned long empty_zero_page[2048];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+/* number of bits that fit into a memory pointer */
+#define BITS_PER_PTR			(8*sizeof(unsigned long))
+
+/* to align the pointer to a pointer address */
+#define PTR_MASK			(~(sizeof(void *)-1))
+
+/* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
+/* 64-bit machines, beware!  SRB. */
+#define SIZEOF_PTR_LOG2			2
+
+/* to find an entry in a page-table */
+#define PAGE_PTR(address) \
+((unsigned long)(address)>>(PAGE_SHIFT-SIZEOF_PTR_LOG2)&PTR_MASK&~PAGE_MASK)
+
+/* to set the page-dir */
+#define SET_PAGE_DIR(tsk, pgdir)
+
+#define pte_none(x)	(!pte_val(x))
+#define pte_present(x)	(pte_val(x) & _PAGE_PRESENT)
+#define pte_clear(mm, addr, xp)	do { pte_val(*(xp)) = 0; } while (0)
+
+#define pmd_none(x)	(!pmd_val(x))
+#define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK)) != _KERNPG_TABLE)
+#define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
+#define pmd_clear(xp)	do { pmd_val(*(xp)) = 0; } while (0)
+
+/*
+ * The following only work if pte_present() is true.
+ * Undefined behaviour if not..
+ */
+
+static inline int pte_read(pte_t pte)  { return pte_val(pte) & _PAGE_READ; }
+static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_exec(pte_t pte)  { return pte_val(pte) & _PAGE_EXEC; }
+static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte) { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_WRITE);
+	return pte;
+}
+
+static inline pte_t pte_rdprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_READ);
+	return pte;
+}
+
+static inline pte_t pte_exprotect(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_EXEC);
+	return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_DIRTY);
+	return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+	pte_val(pte) &= ~(_PAGE_ACCESSED);
+	return pte;
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_WRITE;
+	return pte;
+}
+
+static inline pte_t pte_mkread(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_READ;
+	return pte;
+}
+
+static inline pte_t pte_mkexec(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_EXEC;
+	return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_DIRTY;
+	return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+	pte_val(pte) |= _PAGE_ACCESSED;
+	return pte;
+}
+
+/*
+ * Conversion functions: convert a page and protection to a page entry,
+ * and a page entry and page directory to the page they refer to.
+ */
+
+/* What actually goes as arguments to the various functions is less than
+ * obvious, but a rule of thumb is that struct page's goes as struct page *,
+ * really physical DRAM addresses are unsigned long's, and DRAM "virtual"
+ * addresses (the 0xc0xxxxxx's) goes as void *'s.
+ */
+
+static inline pte_t __mk_pte(void *page, pgprot_t pgprot)
+{
+	pte_t pte;
+	/* the PTE needs a physical address */
+	pte_val(pte) = __pa(page) | pgprot_val(pgprot);
+	return pte;
+}
+
+#define mk_pte(page, pgprot) __mk_pte(page_address(page), (pgprot))
+
+#define mk_pte_phys(physpage, pgprot) \
+({                                                                      \
+	pte_t __pte;                                                    \
+									\
+	pte_val(__pte) = (physpage) + pgprot_val(pgprot);               \
+	__pte;                                                          \
+})
+
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+	pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+	return pte;
+}
+
+
+/*
+ * pte_val refers to a page in the 0x0xxxxxxx physical DRAM interval
+ * __pte_page(pte_val) refers to the "virtual" DRAM interval
+ * pte_pagenr refers to the page-number counted starting from the virtual
+ * DRAM start
+ */
+
+static inline unsigned long __pte_page(pte_t pte)
+{
+	/* the PTE contains a physical address */
+	return (unsigned long)__va(pte_val(pte) & PAGE_MASK);
+}
+
+#define pte_pagenr(pte)         ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)
+
+/* permanent address of a page */
+
+#define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT))
+#define pte_page(pte)		(mem_map+pte_pagenr(pte))
+
+/*
+ * only the pte's themselves need to point to physical DRAM (see above)
+ * the pagetable links are purely handled within the kernel SW and thus
+ * don't need the __pa and __va transformations.
+ */
+static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
+{
+	pmd_val(*pmdp) = _KERNPG_TABLE | (unsigned long) ptep;
+}
+
+#define pmd_page(pmd)		(pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
+#define pmd_page_kernel(pmd)    ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+/* to find an entry in a page-table-directory. */
+#define pgd_index(address)      ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+#define __pgd_offset(address)   pgd_index(address)
+
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+
+/* to find an entry in a kernel page-table-directory */
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+
+#define __pmd_offset(address) \
+	(((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
+
+/*
+ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
+ *
+ * this macro returns the index of the entry in the pte page which would
+ * control the given virtual address
+ */
+#define __pte_offset(address)                   \
+	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_offset_kernel(dir, address)         \
+	((pte_t *) pmd_page_kernel(*(dir)) +  __pte_offset(address))
+#define pte_offset_map(dir, address)	        \
+	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
+#define pte_offset_map_nested(dir, address)     \
+	pte_offset_map(dir, address)
+
+#define pte_unmap(pte)          do { } while (0)
+#define pte_unmap_nested(pte)   do { } while (0)
+#define pte_pfn(x)		((unsigned long)(((x).pte)) >> PAGE_SHIFT)
+#define pfn_pte(pfn, prot)  __pte((((pfn) << PAGE_SHIFT)) | pgprot_val(prot))
+
+#define pte_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pte %p(%08lx).\n", \
+	       __FILE__, __LINE__, &(e), pte_val(e))
+#define pgd_ERROR(e) \
+	printk(KERN_ERR "%s:%d: bad pgd %p(%08lx).\n", \
+	       __FILE__, __LINE__, &(e), pgd_val(e))
+
+extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* defined in head.S */
+
+struct vm_area_struct;
+
+static inline void update_tlb(struct vm_area_struct *vma,
+	unsigned long address, pte_t *pte)
+{
+}
+
+extern void update_cache(struct vm_area_struct *vma,
+	unsigned long address, pte_t *pte);
+
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+	unsigned long address, pte_t *pte)
+{
+	update_tlb(vma, address, pte);
+	update_cache(vma, address, pte);
+}
+
+/* __PHX__ FIXME, SWAP, this probably doesn't work */
+
+/* Encode and de-code a swap entry (must be !pte_none(e) && !pte_present(e)) */
+/* Since the PAGE_PRESENT bit is bit 4, we can use the bits above */
+
+#define __swp_type(x)			(((x).val >> 5) & 0x7f)
+#define __swp_offset(x)			((x).val >> 12)
+#define __swp_entry(type, offset) \
+	((swp_entry_t) { ((type) << 5) | ((offset) << 12) })
+#define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x)		((pte_t) { (x).val })
+
+#define kern_addr_valid(addr)           (1)
+
+#include <asm-generic/pgtable.h>
+
+/*
+ * No page table caches to initialise
+ */
+#define pgtable_cache_init()		do { } while (0)
+
+typedef pte_t *pte_addr_t;
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PGTABLE_H */
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h
new file mode 100644
index 0000000..af31a9f
--- /dev/null
+++ b/arch/openrisc/include/asm/processor.h
@@ -0,0 +1,90 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_PROCESSOR_H
+#define __ASM_OPENRISC_PROCESSOR_H
+
+#include <asm/spr_defs.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+
+#define STACK_TOP       TASK_SIZE
+#define STACK_TOP_MAX	STACK_TOP
+/* Kernel and user SR register setting */
+#define KERNEL_SR (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+		   | SPR_SR_DCE | SPR_SR_SM)
+#define USER_SR   (SPR_SR_DME | SPR_SR_IME | SPR_SR_ICE \
+		   | SPR_SR_DCE | SPR_SR_IEE | SPR_SR_TEE)
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr() ({ __label__ _l; _l: &&_l; })
+
+/*
+ * User space process size. This is hardcoded into a few places,
+ * so don't change it unless you know what you are doing.
+ */
+
+#define TASK_SIZE       (0x80000000UL)
+
+/* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE      (TASK_SIZE / 8 * 3)
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+struct thread_struct {
+};
+
+/*
+ * At user->kernel entry, the pt_regs struct is stacked on the top of the
+ * kernel-stack.  This macro allows us to find those regs for a task.
+ * Notice that subsequent pt_regs stackings, like recursive interrupts
+ * occurring while we're in the kernel, won't affect this - only the first
+ * user->kernel transition registers are reached by this (i.e. not regs
+ * for running signal handler)
+ */
+#define user_regs(thread_info)  (((struct pt_regs *)((unsigned long)(thread_info) + THREAD_SIZE - STACK_FRAME_OVERHEAD)) - 1)
+
+/*
+ * Dito but for the currently running task
+ */
+
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+
+#define INIT_SP         (sizeof(init_stack) + (unsigned long) &init_stack)
+
+#define INIT_THREAD  { }
+
+
+#define KSTK_EIP(tsk)   (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)   (task_pt_regs(tsk)->sp)
+
+
+void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
+void release_thread(struct task_struct *);
+unsigned long get_wchan(struct task_struct *p);
+
+#define cpu_relax()     barrier()
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_OPENRISC_PROCESSOR_H */
diff --git a/arch/openrisc/include/asm/ptrace.h b/arch/openrisc/include/asm/ptrace.h
new file mode 100644
index 0000000..6ca1726
--- /dev/null
+++ b/arch/openrisc/include/asm/ptrace.h
@@ -0,0 +1,124 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __ASM_OPENRISC_PTRACE_H
+#define __ASM_OPENRISC_PTRACE_H
+
+
+#include <asm/spr_defs.h>
+#include <uapi/asm/ptrace.h>
+
+/*
+ * Make kernel PTrace/register structures opaque to userspace... userspace can
+ * access thread state via the regset mechanism.  This allows us a bit of
+ * flexibility in how we order the registers on the stack, permitting some
+ * optimizations like packing call-clobbered registers together so that
+ * they share a cacheline (not done yet, though... future optimization).
+ */
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct describes how the registers are laid out on the kernel stack
+ * during a syscall or other kernel entry.
+ *
+ * This structure should always be cacheline aligned on the stack.
+ * FIXME: I don't think that's the case right now.  The alignment is
+ * taken care of elsewhere... head.S, process.c, etc.
+ */
+
+struct pt_regs {
+	union {
+		struct {
+			/* Named registers */
+			long  sr;	/* Stored in place of r0 */
+			long  sp;	/* r1 */
+		};
+		struct {
+			/* Old style */
+			long offset[2];
+			long gprs[30];
+		};
+		struct {
+			/* New style */
+			long gpr[32];
+		};
+	};
+	long  pc;
+	/* For restarting system calls:
+	 * Set to syscall number for syscall exceptions,
+	 * -1 for all other exceptions.
+	 */
+	long  orig_gpr11;	/* For restarting system calls */
+	long dummy;		/* Cheap alignment fix */
+	long dummy2;		/* Cheap alignment fix */
+};
+
+/* TODO: Rename this to REDZONE because that's what it is */
+#define STACK_FRAME_OVERHEAD  128  /* size of minimum stack frame */
+
+#define instruction_pointer(regs)	((regs)->pc)
+#define user_mode(regs)			(((regs)->sr & SPR_SR_SM) == 0)
+#define user_stack_pointer(regs)	((unsigned long)(regs)->sp)
+#define profile_pc(regs)		instruction_pointer(regs)
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+	return regs->gpr[11];
+}
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Offsets used by 'ptrace' system call interface.
+ */
+#define PT_SR         0
+#define PT_SP         4
+#define PT_GPR2       8
+#define PT_GPR3       12
+#define PT_GPR4       16
+#define PT_GPR5       20
+#define PT_GPR6       24
+#define PT_GPR7       28
+#define PT_GPR8       32
+#define PT_GPR9       36
+#define PT_GPR10      40
+#define PT_GPR11      44
+#define PT_GPR12      48
+#define PT_GPR13      52
+#define PT_GPR14      56
+#define PT_GPR15      60
+#define PT_GPR16      64
+#define PT_GPR17      68
+#define PT_GPR18      72
+#define PT_GPR19      76
+#define PT_GPR20      80
+#define PT_GPR21      84
+#define PT_GPR22      88
+#define PT_GPR23      92
+#define PT_GPR24      96
+#define PT_GPR25      100
+#define PT_GPR26      104
+#define PT_GPR27      108
+#define PT_GPR28      112
+#define PT_GPR29      116
+#define PT_GPR30      120
+#define PT_GPR31      124
+#define PT_PC	      128
+#define PT_ORIG_GPR11 132
+#define PT_SYSCALLNO  136
+
+#endif /* __ASM_OPENRISC_PTRACE_H */
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
new file mode 100644
index 0000000..cb5932f
--- /dev/null
+++ b/arch/openrisc/include/asm/serial.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SERIAL_H
+#define __ASM_OPENRISC_SERIAL_H
+
+#ifdef __KERNEL__
+
+#include <asm/cpuinfo.h>
+
+/* There's a generic version of this file, but it assumes a 1.8MHz UART clk...
+ * this, on the other hand, assumes the UART clock is tied to the system
+ * clock... 8250_early.c (early 8250 serial console) actually uses this, so
+ * it needs to be correct to get the early console working.
+ */
+
+#define BASE_BAUD (cpuinfo_or1k[smp_processor_id()].clock_frequency/16)
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_OPENRISC_SERIAL_H */
diff --git a/arch/openrisc/include/asm/smp.h b/arch/openrisc/include/asm/smp.h
new file mode 100644
index 0000000..e21d2f1
--- /dev/null
+++ b/arch/openrisc/include/asm/smp.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_SMP_H
+#define __ASM_OPENRISC_SMP_H
+
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+#define raw_smp_processor_id()	(current_thread_info()->cpu)
+#define hard_smp_processor_id()	mfspr(SPR_COREID)
+
+extern void smp_init_cpus(void);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
+extern void handle_IPI(unsigned int ipi_msg);
+
+#endif /* __ASM_OPENRISC_SMP_H */
diff --git a/arch/openrisc/include/asm/spinlock.h b/arch/openrisc/include/asm/spinlock.h
new file mode 100644
index 0000000..9b761e0
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPINLOCK_H
+#define __ASM_OPENRISC_SPINLOCK_H
+
+#include <asm/qspinlock.h>
+
+#include <asm/qrwlock.h>
+
+#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define arch_spin_relax(lock)	cpu_relax()
+#define arch_read_relax(lock)	cpu_relax()
+#define arch_write_relax(lock)	cpu_relax()
+
+
+#endif
diff --git a/arch/openrisc/include/asm/spinlock_types.h b/arch/openrisc/include/asm/spinlock_types.h
new file mode 100644
index 0000000..7c6fb12
--- /dev/null
+++ b/arch/openrisc/include/asm/spinlock_types.h
@@ -0,0 +1,7 @@
+#ifndef _ASM_OPENRISC_SPINLOCK_TYPES_H
+#define _ASM_OPENRISC_SPINLOCK_TYPES_H
+
+#include <asm/qspinlock_types.h>
+#include <asm/qrwlock_types.h>
+
+#endif /* _ASM_OPENRISC_SPINLOCK_TYPES_H */
diff --git a/arch/openrisc/include/asm/spr.h b/arch/openrisc/include/asm/spr.h
new file mode 100644
index 0000000..1cccb42
--- /dev/null
+++ b/arch/openrisc/include/asm/spr.h
@@ -0,0 +1,42 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SPR_H
+#define __ASM_OPENRISC_SPR_H
+
+#define mtspr(_spr, _val) __asm__ __volatile__ (		\
+	"l.mtspr r0,%1,%0"					\
+	: : "K" (_spr), "r" (_val))
+#define mtspr_off(_spr, _off, _val) __asm__ __volatile__ (	\
+	"l.mtspr %0,%1,%2"					\
+	: : "r" (_off), "r" (_val), "K" (_spr))
+
+static inline unsigned long mfspr(unsigned long add)
+{
+	unsigned long ret;
+	__asm__ __volatile__ ("l.mfspr %0,r0,%1" : "=r" (ret) : "K" (add));
+	return ret;
+}
+
+static inline unsigned long mfspr_off(unsigned long add, unsigned long offset)
+{
+	unsigned long ret;
+	__asm__ __volatile__ ("l.mfspr %0,%1,%2" : "=r" (ret)
+						 : "r" (offset), "K" (add));
+	return ret;
+}
+
+#endif
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
new file mode 100644
index 0000000..154b5a1
--- /dev/null
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -0,0 +1,618 @@
+/*
+ * OpenRISC Linux
+ *
+ * SPR Definitions
+ *
+ * Copyright (C) 2000 Damjan Lampret
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2008, 2010 Embecosm Limited
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This file is part of OpenRISC 1000 Architectural Simulator.
+ */
+
+#ifndef SPR_DEFS__H
+#define SPR_DEFS__H
+
+/* Definition of special-purpose registers (SPRs). */
+
+#define MAX_GRPS (32)
+#define MAX_SPRS_PER_GRP_BITS (11)
+#define MAX_SPRS_PER_GRP (1 << MAX_SPRS_PER_GRP_BITS)
+#define MAX_SPRS (0x10000)
+
+/* Base addresses for the groups */
+#define SPRGROUP_SYS	(0 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DMMU	(1 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IMMU	(2 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_DC	(3 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_IC	(4 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_MAC	(5 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_D	(6 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PC	(7 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PM	(8 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_PIC	(9 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_TT	(10 << MAX_SPRS_PER_GRP_BITS)
+#define SPRGROUP_FP	(11 << MAX_SPRS_PER_GRP_BITS)
+
+/* System control and status group */
+#define SPR_VR		(SPRGROUP_SYS + 0)
+#define SPR_UPR		(SPRGROUP_SYS + 1)
+#define SPR_CPUCFGR	(SPRGROUP_SYS + 2)
+#define SPR_DMMUCFGR	(SPRGROUP_SYS + 3)
+#define SPR_IMMUCFGR	(SPRGROUP_SYS + 4)
+#define SPR_DCCFGR	(SPRGROUP_SYS + 5)
+#define SPR_ICCFGR	(SPRGROUP_SYS + 6)
+#define SPR_DCFGR	(SPRGROUP_SYS + 7)
+#define SPR_PCCFGR	(SPRGROUP_SYS + 8)
+#define SPR_VR2		(SPRGROUP_SYS + 9)
+#define SPR_AVR		(SPRGROUP_SYS + 10)
+#define SPR_EVBAR	(SPRGROUP_SYS + 11)
+#define SPR_AECR	(SPRGROUP_SYS + 12)
+#define SPR_AESR	(SPRGROUP_SYS + 13)
+#define SPR_NPC         (SPRGROUP_SYS + 16)  /* CZ 21/06/01 */
+#define SPR_SR		(SPRGROUP_SYS + 17)  /* CZ 21/06/01 */
+#define SPR_PPC         (SPRGROUP_SYS + 18)  /* CZ 21/06/01 */
+#define SPR_FPCSR       (SPRGROUP_SYS + 20)  /* CZ 21/06/01 */
+#define SPR_EPCR_BASE	(SPRGROUP_SYS + 32)  /* CZ 21/06/01 */
+#define SPR_EPCR_LAST	(SPRGROUP_SYS + 47)  /* CZ 21/06/01 */
+#define SPR_EEAR_BASE	(SPRGROUP_SYS + 48)
+#define SPR_EEAR_LAST	(SPRGROUP_SYS + 63)
+#define SPR_ESR_BASE	(SPRGROUP_SYS + 64)
+#define SPR_ESR_LAST	(SPRGROUP_SYS + 79)
+#define SPR_COREID	(SPRGROUP_SYS + 128)
+#define SPR_NUMCORES	(SPRGROUP_SYS + 129)
+#define SPR_GPR_BASE	(SPRGROUP_SYS + 1024)
+
+/* Data MMU group */
+#define SPR_DMMUCR	(SPRGROUP_DMMU + 0)
+#define SPR_DTLBEIR	(SPRGROUP_DMMU + 2)
+#define SPR_DTLBMR_BASE(WAY)	(SPRGROUP_DMMU + 0x200 + (WAY) * 0x100)
+#define SPR_DTLBMR_LAST(WAY)	(SPRGROUP_DMMU + 0x27f + (WAY) * 0x100)
+#define SPR_DTLBTR_BASE(WAY)	(SPRGROUP_DMMU + 0x280 + (WAY) * 0x100)
+#define SPR_DTLBTR_LAST(WAY)	(SPRGROUP_DMMU + 0x2ff + (WAY) * 0x100)
+
+/* Instruction MMU group */
+#define SPR_IMMUCR	(SPRGROUP_IMMU + 0)
+#define SPR_ITLBEIR	(SPRGROUP_IMMU + 2)
+#define SPR_ITLBMR_BASE(WAY)	(SPRGROUP_IMMU + 0x200 + (WAY) * 0x100)
+#define SPR_ITLBMR_LAST(WAY)	(SPRGROUP_IMMU + 0x27f + (WAY) * 0x100)
+#define SPR_ITLBTR_BASE(WAY)	(SPRGROUP_IMMU + 0x280 + (WAY) * 0x100)
+#define SPR_ITLBTR_LAST(WAY)	(SPRGROUP_IMMU + 0x2ff + (WAY) * 0x100)
+
+/* Data cache group */
+#define SPR_DCCR	(SPRGROUP_DC + 0)
+#define SPR_DCBPR	(SPRGROUP_DC + 1)
+#define SPR_DCBFR	(SPRGROUP_DC + 2)
+#define SPR_DCBIR	(SPRGROUP_DC + 3)
+#define SPR_DCBWR	(SPRGROUP_DC + 4)
+#define SPR_DCBLR	(SPRGROUP_DC + 5)
+#define SPR_DCR_BASE(WAY)	(SPRGROUP_DC + 0x200 + (WAY) * 0x200)
+#define SPR_DCR_LAST(WAY)	(SPRGROUP_DC + 0x3ff + (WAY) * 0x200)
+
+/* Instruction cache group */
+#define SPR_ICCR	(SPRGROUP_IC + 0)
+#define SPR_ICBPR	(SPRGROUP_IC + 1)
+#define SPR_ICBIR	(SPRGROUP_IC + 2)
+#define SPR_ICBLR	(SPRGROUP_IC + 3)
+#define SPR_ICR_BASE(WAY)	(SPRGROUP_IC + 0x200 + (WAY) * 0x200)
+#define SPR_ICR_LAST(WAY)	(SPRGROUP_IC + 0x3ff + (WAY) * 0x200)
+
+/* MAC group */
+#define SPR_MACLO	(SPRGROUP_MAC + 1)
+#define SPR_MACHI	(SPRGROUP_MAC + 2)
+
+/* Debug group */
+#define SPR_DVR(N)	(SPRGROUP_D + (N))
+#define SPR_DCR(N)	(SPRGROUP_D + 8 + (N))
+#define SPR_DMR1	(SPRGROUP_D + 16)
+#define SPR_DMR2	(SPRGROUP_D + 17)
+#define SPR_DWCR0	(SPRGROUP_D + 18)
+#define SPR_DWCR1	(SPRGROUP_D + 19)
+#define SPR_DSR		(SPRGROUP_D + 20)
+#define SPR_DRR		(SPRGROUP_D + 21)
+
+/* Performance counters group */
+#define SPR_PCCR(N)	(SPRGROUP_PC + (N))
+#define SPR_PCMR(N)	(SPRGROUP_PC + 8 + (N))
+
+/* Power management group */
+#define SPR_PMR (SPRGROUP_PM + 0)
+
+/* PIC group */
+#define SPR_PICMR (SPRGROUP_PIC + 0)
+#define SPR_PICPR (SPRGROUP_PIC + 1)
+#define SPR_PICSR (SPRGROUP_PIC + 2)
+
+/* Tick Timer group */
+#define SPR_TTMR (SPRGROUP_TT + 0)
+#define SPR_TTCR (SPRGROUP_TT + 1)
+
+/*
+ * Bit definitions for the Version Register
+ *
+ */
+#define SPR_VR_VER	0xff000000  /* Processor version */
+#define SPR_VR_CFG	0x00ff0000  /* Processor configuration */
+#define SPR_VR_RES	0x0000ffc0  /* Reserved */
+#define SPR_VR_REV	0x0000003f  /* Processor revision */
+#define SPR_VR_UVRP	0x00000040  /* Updated Version Registers Present */
+
+#define SPR_VR_VER_OFF	24
+#define SPR_VR_CFG_OFF	16
+#define SPR_VR_REV_OFF	0
+
+/*
+ * Bit definitions for the Version Register 2
+ */
+#define SPR_VR2_CPUID	0xff000000  /* Processor ID */
+#define SPR_VR2_VER	0x00ffffff  /* Processor version */
+
+/*
+ * Bit definitions for the Unit Present Register
+ *
+ */
+#define SPR_UPR_UP	   0x00000001  /* UPR present */
+#define SPR_UPR_DCP	   0x00000002  /* Data cache present */
+#define SPR_UPR_ICP	   0x00000004  /* Instruction cache present */
+#define SPR_UPR_DMP	   0x00000008  /* Data MMU present */
+#define SPR_UPR_IMP	   0x00000010  /* Instruction MMU present */
+#define SPR_UPR_MP	   0x00000020  /* MAC present */
+#define SPR_UPR_DUP	   0x00000040  /* Debug unit present */
+#define SPR_UPR_PCUP	   0x00000080  /* Performance counters unit present */
+#define SPR_UPR_PICP	   0x00000100  /* PIC present */
+#define SPR_UPR_PMP	   0x00000200  /* Power management present */
+#define SPR_UPR_TTP	   0x00000400  /* Tick timer present */
+#define SPR_UPR_RES	   0x00fe0000  /* Reserved */
+#define SPR_UPR_CUP	   0xff000000  /* Context units present */
+
+/*
+ * JPB: Bit definitions for the CPU configuration register
+ *
+ */
+#define SPR_CPUCFGR_NSGF   0x0000000f  /* Number of shadow GPR files */
+#define SPR_CPUCFGR_CGF	   0x00000010  /* Custom GPR file */
+#define SPR_CPUCFGR_OB32S  0x00000020  /* ORBIS32 supported */
+#define SPR_CPUCFGR_OB64S  0x00000040  /* ORBIS64 supported */
+#define SPR_CPUCFGR_OF32S  0x00000080  /* ORFPX32 supported */
+#define SPR_CPUCFGR_OF64S  0x00000100  /* ORFPX64 supported */
+#define SPR_CPUCFGR_OV64S  0x00000200  /* ORVDX64 supported */
+#define SPR_CPUCFGR_RES	   0xfffffc00  /* Reserved */
+
+/*
+ * JPB: Bit definitions for the Debug configuration register and other
+ * constants.
+ *
+ */
+
+#define SPR_DCFGR_NDP      0x00000007  /* Number of matchpoints mask */
+#define SPR_DCFGR_NDP1     0x00000000  /* One matchpoint supported */
+#define SPR_DCFGR_NDP2     0x00000001  /* Two matchpoints supported */
+#define SPR_DCFGR_NDP3     0x00000002  /* Three matchpoints supported */
+#define SPR_DCFGR_NDP4     0x00000003  /* Four matchpoints supported */
+#define SPR_DCFGR_NDP5     0x00000004  /* Five matchpoints supported */
+#define SPR_DCFGR_NDP6     0x00000005  /* Six matchpoints supported */
+#define SPR_DCFGR_NDP7     0x00000006  /* Seven matchpoints supported */
+#define SPR_DCFGR_NDP8     0x00000007  /* Eight matchpoints supported */
+#define SPR_DCFGR_WPCI     0x00000008  /* Watchpoint counters implemented */
+
+#define MATCHPOINTS_TO_NDP(n) (1 == n ? SPR_DCFGR_NDP1 : \
+                               2 == n ? SPR_DCFGR_NDP2 : \
+                               3 == n ? SPR_DCFGR_NDP3 : \
+                               4 == n ? SPR_DCFGR_NDP4 : \
+                               5 == n ? SPR_DCFGR_NDP5 : \
+                               6 == n ? SPR_DCFGR_NDP6 : \
+                               7 == n ? SPR_DCFGR_NDP7 : SPR_DCFGR_NDP8)
+#define MAX_MATCHPOINTS  8
+#define MAX_WATCHPOINTS  (MAX_MATCHPOINTS + 2)
+
+/*
+ * Bit definitions for the Supervision Register
+ *
+ */
+#define SPR_SR_SM          0x00000001  /* Supervisor Mode */
+#define SPR_SR_TEE         0x00000002  /* Tick timer Exception Enable */
+#define SPR_SR_IEE         0x00000004  /* Interrupt Exception Enable */
+#define SPR_SR_DCE         0x00000008  /* Data Cache Enable */
+#define SPR_SR_ICE         0x00000010  /* Instruction Cache Enable */
+#define SPR_SR_DME         0x00000020  /* Data MMU Enable */
+#define SPR_SR_IME         0x00000040  /* Instruction MMU Enable */
+#define SPR_SR_LEE         0x00000080  /* Little Endian Enable */
+#define SPR_SR_CE          0x00000100  /* CID Enable */
+#define SPR_SR_F           0x00000200  /* Condition Flag */
+#define SPR_SR_CY          0x00000400  /* Carry flag */
+#define SPR_SR_OV          0x00000800  /* Overflow flag */
+#define SPR_SR_OVE         0x00001000  /* Overflow flag Exception */
+#define SPR_SR_DSX         0x00002000  /* Delay Slot Exception */
+#define SPR_SR_EPH         0x00004000  /* Exception Prefix High */
+#define SPR_SR_FO          0x00008000  /* Fixed one */
+#define SPR_SR_SUMRA       0x00010000  /* Supervisor SPR read access */
+#define SPR_SR_RES         0x0ffe0000  /* Reserved */
+#define SPR_SR_CID         0xf0000000  /* Context ID */
+
+/*
+ * Bit definitions for the Data MMU Control Register
+ *
+ */
+#define SPR_DMMUCR_P2S	   0x0000003e  /* Level 2 Page Size */
+#define SPR_DMMUCR_P1S	   0x000007c0  /* Level 1 Page Size */
+#define SPR_DMMUCR_VADDR_WIDTH	0x0000f800  /* Virtual ADDR Width */
+#define SPR_DMMUCR_PADDR_WIDTH	0x000f0000  /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Instruction MMU Control Register
+ *
+ */
+#define SPR_IMMUCR_P2S	   0x0000003e  /* Level 2 Page Size */
+#define SPR_IMMUCR_P1S	   0x000007c0  /* Level 1 Page Size */
+#define SPR_IMMUCR_VADDR_WIDTH	0x0000f800  /* Virtual ADDR Width */
+#define SPR_IMMUCR_PADDR_WIDTH	0x000f0000  /* Physical ADDR Width */
+
+/*
+ * Bit definitions for the Data TLB Match Register
+ *
+ */
+#define SPR_DTLBMR_V	   0x00000001  /* Valid */
+#define SPR_DTLBMR_PL1	   0x00000002  /* Page Level 1 (if 0 then PL2) */
+#define SPR_DTLBMR_CID	   0x0000003c  /* Context ID */
+#define SPR_DTLBMR_LRU	   0x000000c0  /* Least Recently Used */
+#define SPR_DTLBMR_VPN	   0xfffff000  /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Data TLB Translate Register
+ *
+ */
+#define SPR_DTLBTR_CC	   0x00000001  /* Cache Coherency */
+#define SPR_DTLBTR_CI	   0x00000002  /* Cache Inhibit */
+#define SPR_DTLBTR_WBC	   0x00000004  /* Write-Back Cache */
+#define SPR_DTLBTR_WOM	   0x00000008  /* Weakly-Ordered Memory */
+#define SPR_DTLBTR_A	   0x00000010  /* Accessed */
+#define SPR_DTLBTR_D	   0x00000020  /* Dirty */
+#define SPR_DTLBTR_URE	   0x00000040  /* User Read Enable */
+#define SPR_DTLBTR_UWE	   0x00000080  /* User Write Enable */
+#define SPR_DTLBTR_SRE	   0x00000100  /* Supervisor Read Enable */
+#define SPR_DTLBTR_SWE	   0x00000200  /* Supervisor Write Enable */
+#define SPR_DTLBTR_PPN	   0xfffff000  /* Physical Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Match Register
+ *
+ */
+#define SPR_ITLBMR_V	   0x00000001  /* Valid */
+#define SPR_ITLBMR_PL1	   0x00000002  /* Page Level 1 (if 0 then PL2) */
+#define SPR_ITLBMR_CID	   0x0000003c  /* Context ID */
+#define SPR_ITLBMR_LRU	   0x000000c0  /* Least Recently Used */
+#define SPR_ITLBMR_VPN	   0xfffff000  /* Virtual Page Number */
+
+/*
+ * Bit definitions for the Instruction TLB Translate Register
+ *
+ */
+#define SPR_ITLBTR_CC	   0x00000001  /* Cache Coherency */
+#define SPR_ITLBTR_CI	   0x00000002  /* Cache Inhibit */
+#define SPR_ITLBTR_WBC	   0x00000004  /* Write-Back Cache */
+#define SPR_ITLBTR_WOM	   0x00000008  /* Weakly-Ordered Memory */
+#define SPR_ITLBTR_A	   0x00000010  /* Accessed */
+#define SPR_ITLBTR_D	   0x00000020  /* Dirty */
+#define SPR_ITLBTR_SXE	   0x00000040  /* User Read Enable */
+#define SPR_ITLBTR_UXE	   0x00000080  /* User Write Enable */
+#define SPR_ITLBTR_PPN	   0xfffff000  /* Physical Page Number */
+
+/*
+ * Bit definitions for Data Cache Control register
+ *
+ */
+#define SPR_DCCR_EW	   0x000000ff  /* Enable ways */
+
+/*
+ * Bit definitions for Insn Cache Control register
+ *
+ */
+#define SPR_ICCR_EW	   0x000000ff  /* Enable ways */
+
+/*
+ * Bit definitions for Data Cache Configuration Register
+ *
+ */
+
+#define SPR_DCCFGR_NCW		0x00000007
+#define SPR_DCCFGR_NCS		0x00000078
+#define SPR_DCCFGR_CBS		0x00000080
+#define SPR_DCCFGR_CWS		0x00000100
+#define SPR_DCCFGR_CCRI		0x00000200
+#define SPR_DCCFGR_CBIRI	0x00000400
+#define SPR_DCCFGR_CBPRI	0x00000800
+#define SPR_DCCFGR_CBLRI	0x00001000
+#define SPR_DCCFGR_CBFRI	0x00002000
+#define SPR_DCCFGR_CBWBRI	0x00004000
+
+#define SPR_DCCFGR_NCW_OFF      0
+#define SPR_DCCFGR_NCS_OFF      3
+#define SPR_DCCFGR_CBS_OFF	7
+
+/*
+ * Bit definitions for Instruction Cache Configuration Register
+ *
+ */
+#define SPR_ICCFGR_NCW		0x00000007
+#define SPR_ICCFGR_NCS		0x00000078
+#define SPR_ICCFGR_CBS		0x00000080
+#define SPR_ICCFGR_CCRI		0x00000200
+#define SPR_ICCFGR_CBIRI	0x00000400
+#define SPR_ICCFGR_CBPRI	0x00000800
+#define SPR_ICCFGR_CBLRI	0x00001000
+
+#define SPR_ICCFGR_NCW_OFF      0
+#define SPR_ICCFGR_NCS_OFF      3
+#define SPR_ICCFGR_CBS_OFF	7
+
+/*
+ * Bit definitions for Data MMU Configuration Register
+ *
+ */
+
+#define SPR_DMMUCFGR_NTW	0x00000003
+#define SPR_DMMUCFGR_NTS	0x0000001C
+#define SPR_DMMUCFGR_NAE	0x000000E0
+#define SPR_DMMUCFGR_CRI	0x00000100
+#define SPR_DMMUCFGR_PRI        0x00000200
+#define SPR_DMMUCFGR_TEIRI	0x00000400
+#define SPR_DMMUCFGR_HTR	0x00000800
+
+#define SPR_DMMUCFGR_NTW_OFF	0
+#define SPR_DMMUCFGR_NTS_OFF	2
+
+/*
+ * Bit definitions for Instruction MMU Configuration Register
+ *
+ */
+
+#define SPR_IMMUCFGR_NTW	0x00000003
+#define SPR_IMMUCFGR_NTS	0x0000001C
+#define SPR_IMMUCFGR_NAE	0x000000E0
+#define SPR_IMMUCFGR_CRI	0x00000100
+#define SPR_IMMUCFGR_PRI	0x00000200
+#define SPR_IMMUCFGR_TEIRI	0x00000400
+#define SPR_IMMUCFGR_HTR	0x00000800
+
+#define SPR_IMMUCFGR_NTW_OFF	0
+#define SPR_IMMUCFGR_NTS_OFF	2
+
+/*
+ * Bit definitions for Debug Control registers
+ *
+ */
+#define SPR_DCR_DP	0x00000001  /* DVR/DCR present */
+#define SPR_DCR_CC	0x0000000e  /* Compare condition */
+#define SPR_DCR_SC	0x00000010  /* Signed compare */
+#define SPR_DCR_CT	0x000000e0  /* Compare to */
+
+/* Bit results with SPR_DCR_CC mask */
+#define SPR_DCR_CC_MASKED 0x00000000
+#define SPR_DCR_CC_EQUAL  0x00000002
+#define SPR_DCR_CC_LESS   0x00000004
+#define SPR_DCR_CC_LESSE  0x00000006
+#define SPR_DCR_CC_GREAT  0x00000008
+#define SPR_DCR_CC_GREATE 0x0000000a
+#define SPR_DCR_CC_NEQUAL 0x0000000c
+
+/* Bit results with SPR_DCR_CT mask */
+#define SPR_DCR_CT_DISABLED 0x00000000
+#define SPR_DCR_CT_IFEA     0x00000020
+#define SPR_DCR_CT_LEA      0x00000040
+#define SPR_DCR_CT_SEA      0x00000060
+#define SPR_DCR_CT_LD       0x00000080
+#define SPR_DCR_CT_SD       0x000000a0
+#define SPR_DCR_CT_LSEA     0x000000c0
+#define SPR_DCR_CT_LSD	    0x000000e0
+/* SPR_DCR_CT_LSD doesn't seem to be implemented anywhere in or1ksim. 2004-1-30 HP */
+
+/*
+ * Bit definitions for Debug Mode 1 register
+ *
+ */
+#define SPR_DMR1_CW       0x000fffff  /* Chain register pair data */
+#define SPR_DMR1_CW0_AND  0x00000001
+#define SPR_DMR1_CW0_OR   0x00000002
+#define SPR_DMR1_CW0      (SPR_DMR1_CW0_AND | SPR_DMR1_CW0_OR)
+#define SPR_DMR1_CW1_AND  0x00000004
+#define SPR_DMR1_CW1_OR   0x00000008
+#define SPR_DMR1_CW1      (SPR_DMR1_CW1_AND | SPR_DMR1_CW1_OR)
+#define SPR_DMR1_CW2_AND  0x00000010
+#define SPR_DMR1_CW2_OR   0x00000020
+#define SPR_DMR1_CW2      (SPR_DMR1_CW2_AND | SPR_DMR1_CW2_OR)
+#define SPR_DMR1_CW3_AND  0x00000040
+#define SPR_DMR1_CW3_OR   0x00000080
+#define SPR_DMR1_CW3      (SPR_DMR1_CW3_AND | SPR_DMR1_CW3_OR)
+#define SPR_DMR1_CW4_AND  0x00000100
+#define SPR_DMR1_CW4_OR   0x00000200
+#define SPR_DMR1_CW4      (SPR_DMR1_CW4_AND | SPR_DMR1_CW4_OR)
+#define SPR_DMR1_CW5_AND  0x00000400
+#define SPR_DMR1_CW5_OR   0x00000800
+#define SPR_DMR1_CW5      (SPR_DMR1_CW5_AND | SPR_DMR1_CW5_OR)
+#define SPR_DMR1_CW6_AND  0x00001000
+#define SPR_DMR1_CW6_OR   0x00002000
+#define SPR_DMR1_CW6      (SPR_DMR1_CW6_AND | SPR_DMR1_CW6_OR)
+#define SPR_DMR1_CW7_AND  0x00004000
+#define SPR_DMR1_CW7_OR   0x00008000
+#define SPR_DMR1_CW7      (SPR_DMR1_CW7_AND | SPR_DMR1_CW7_OR)
+#define SPR_DMR1_CW8_AND  0x00010000
+#define SPR_DMR1_CW8_OR   0x00020000
+#define SPR_DMR1_CW8      (SPR_DMR1_CW8_AND | SPR_DMR1_CW8_OR)
+#define SPR_DMR1_CW9_AND  0x00040000
+#define SPR_DMR1_CW9_OR   0x00080000
+#define SPR_DMR1_CW9      (SPR_DMR1_CW9_AND | SPR_DMR1_CW9_OR)
+#define SPR_DMR1_RES1      0x00300000  /* Reserved */
+#define SPR_DMR1_ST	  0x00400000  /* Single-step trace*/
+#define SPR_DMR1_BT	  0x00800000  /* Branch trace */
+#define SPR_DMR1_RES2	  0xff000000  /* Reserved */
+
+/*
+ * Bit definitions for Debug Mode 2 register. AWTC and WGB corrected by JPB
+ *
+ */
+#define SPR_DMR2_WCE0	   0x00000001  /* Watchpoint counter 0 enable */
+#define SPR_DMR2_WCE1	   0x00000002  /* Watchpoint counter 0 enable */
+#define SPR_DMR2_AWTC	   0x00000ffc  /* Assign watchpoints to counters */
+#define SPR_DMR2_AWTC_OFF           2  /* Bit offset to AWTC field */
+#define SPR_DMR2_WGB	   0x003ff000  /* Watchpoints generating breakpoint */
+#define SPR_DMR2_WGB_OFF           12  /* Bit offset to WGB field */
+#define SPR_DMR2_WBS	   0xffc00000  /* JPB: Watchpoint status */
+#define SPR_DMR2_WBS_OFF           22  /* Bit offset to WBS field */
+
+/*
+ * Bit definitions for Debug watchpoint counter registers
+ *
+ */
+#define SPR_DWCR_COUNT	    0x0000ffff  /* Count */
+#define SPR_DWCR_MATCH	    0xffff0000  /* Match */
+#define SPR_DWCR_MATCH_OFF          16  /* Match bit offset */
+
+/*
+ * Bit definitions for Debug stop register
+ *
+ */
+#define SPR_DSR_RSTE	0x00000001  /* Reset exception */
+#define SPR_DSR_BUSEE	0x00000002  /* Bus error exception */
+#define SPR_DSR_DPFE	0x00000004  /* Data Page Fault exception */
+#define SPR_DSR_IPFE	0x00000008  /* Insn Page Fault exception */
+#define SPR_DSR_TTE	0x00000010  /* Tick Timer exception */
+#define SPR_DSR_AE	0x00000020  /* Alignment exception */
+#define SPR_DSR_IIE	0x00000040  /* Illegal Instruction exception */
+#define SPR_DSR_IE	0x00000080  /* Interrupt exception */
+#define SPR_DSR_DME	0x00000100  /* DTLB miss exception */
+#define SPR_DSR_IME	0x00000200  /* ITLB miss exception */
+#define SPR_DSR_RE	0x00000400  /* Range exception */
+#define SPR_DSR_SCE	0x00000800  /* System call exception */
+#define SPR_DSR_FPE     0x00001000  /* Floating Point Exception */
+#define SPR_DSR_TE	0x00002000  /* Trap exception */
+
+/*
+ * Bit definitions for Debug reason register
+ *
+ */
+#define SPR_DRR_RSTE	0x00000001  /* Reset exception */
+#define SPR_DRR_BUSEE	0x00000002  /* Bus error exception */
+#define SPR_DRR_DPFE	0x00000004  /* Data Page Fault exception */
+#define SPR_DRR_IPFE	0x00000008  /* Insn Page Fault exception */
+#define SPR_DRR_TTE	0x00000010  /* Tick Timer exception */
+#define SPR_DRR_AE	0x00000020  /* Alignment exception */
+#define SPR_DRR_IIE	0x00000040  /* Illegal Instruction exception */
+#define SPR_DRR_IE	0x00000080  /* Interrupt exception */
+#define SPR_DRR_DME	0x00000100  /* DTLB miss exception */
+#define SPR_DRR_IME	0x00000200  /* ITLB miss exception */
+#define SPR_DRR_RE	0x00000400  /* Range exception */
+#define SPR_DRR_SCE	0x00000800  /* System call exception */
+#define SPR_DRR_FPE     0x00001000  /* Floating Point Exception */
+#define SPR_DRR_TE	0x00002000  /* Trap exception */
+
+/*
+ * Bit definitions for Performance counters mode registers
+ *
+ */
+#define SPR_PCMR_CP	0x00000001  /* Counter present */
+#define SPR_PCMR_UMRA	0x00000002  /* User mode read access */
+#define SPR_PCMR_CISM	0x00000004  /* Count in supervisor mode */
+#define SPR_PCMR_CIUM	0x00000008  /* Count in user mode */
+#define SPR_PCMR_LA	0x00000010  /* Load access event */
+#define SPR_PCMR_SA	0x00000020  /* Store access event */
+#define SPR_PCMR_IF	0x00000040  /* Instruction fetch event*/
+#define SPR_PCMR_DCM	0x00000080  /* Data cache miss event */
+#define SPR_PCMR_ICM	0x00000100  /* Insn cache miss event */
+#define SPR_PCMR_IFS	0x00000200  /* Insn fetch stall event */
+#define SPR_PCMR_LSUS	0x00000400  /* LSU stall event */
+#define SPR_PCMR_BS	0x00000800  /* Branch stall event */
+#define SPR_PCMR_DTLBM	0x00001000  /* DTLB miss event */
+#define SPR_PCMR_ITLBM	0x00002000  /* ITLB miss event */
+#define SPR_PCMR_DDS	0x00004000  /* Data dependency stall event */
+#define SPR_PCMR_WPE	0x03ff8000  /* Watchpoint events */
+
+/*
+ * Bit definitions for the Power management register
+ *
+ */
+#define SPR_PMR_SDF	0x0000000f  /* Slow down factor */
+#define SPR_PMR_DME	0x00000010  /* Doze mode enable */
+#define SPR_PMR_SME	0x00000020  /* Sleep mode enable */
+#define SPR_PMR_DCGE	0x00000040  /* Dynamic clock gating enable */
+#define SPR_PMR_SUME	0x00000080  /* Suspend mode enable */
+
+/*
+ * Bit definitions for PICMR
+ *
+ */
+#define SPR_PICMR_IUM	0xfffffffc  /* Interrupt unmask */
+
+/*
+ * Bit definitions for PICPR
+ *
+ */
+#define SPR_PICPR_IPRIO	0xfffffffc  /* Interrupt priority */
+
+/*
+ * Bit definitions for PICSR
+ *
+ */
+#define SPR_PICSR_IS	0xffffffff  /* Interrupt status */
+
+/*
+ * Bit definitions for Tick Timer Control Register
+ *
+ */
+
+#define SPR_TTCR_CNT	0xffffffff  /* Count, time period */
+#define SPR_TTMR_TP	0x0fffffff  /* Time period */
+#define SPR_TTMR_IP	0x10000000  /* Interrupt Pending */
+#define SPR_TTMR_IE	0x20000000  /* Interrupt Enable */
+#define SPR_TTMR_DI	0x00000000  /* Disabled */
+#define SPR_TTMR_RT	0x40000000  /* Restart tick */
+#define SPR_TTMR_SR     0x80000000  /* Single run */
+#define SPR_TTMR_CR     0xc0000000  /* Continuous run */
+#define SPR_TTMR_M      0xc0000000  /* Tick mode */
+
+/*
+ * Bit definitions for the FP Control Status Register
+ *
+ */
+#define SPR_FPCSR_FPEE  0x00000001  /* Floating Point Exception Enable */
+#define SPR_FPCSR_RM    0x00000006  /* Rounding Mode */
+#define SPR_FPCSR_OVF   0x00000008  /* Overflow Flag */
+#define SPR_FPCSR_UNF   0x00000010  /* Underflow Flag */
+#define SPR_FPCSR_SNF   0x00000020  /* SNAN Flag */
+#define SPR_FPCSR_QNF   0x00000040  /* QNAN Flag */
+#define SPR_FPCSR_ZF    0x00000080  /* Zero Flag */
+#define SPR_FPCSR_IXF   0x00000100  /* Inexact Flag */
+#define SPR_FPCSR_IVF   0x00000200  /* Invalid Flag */
+#define SPR_FPCSR_INF   0x00000400  /* Infinity Flag */
+#define SPR_FPCSR_DZF   0x00000800  /* Divide By Zero Flag */
+#define SPR_FPCSR_ALLF (SPR_FPCSR_OVF | SPR_FPCSR_UNF | SPR_FPCSR_SNF | \
+			SPR_FPCSR_QNF | SPR_FPCSR_ZF | SPR_FPCSR_IXF |  \
+			SPR_FPCSR_IVF | SPR_FPCSR_INF | SPR_FPCSR_DZF)
+
+#define FPCSR_RM_RN (0<<1)
+#define FPCSR_RM_RZ (1<<1)
+#define FPCSR_RM_RIP (2<<1)
+#define FPCSR_RM_RIN (3<<1)
+
+/*
+ * l.nop constants
+ *
+ */
+#define NOP_NOP          0x0000      /* Normal nop instruction */
+#define NOP_EXIT         0x0001      /* End of simulation */
+#define NOP_REPORT       0x0002      /* Simple report */
+/*#define NOP_PRINTF       0x0003       Simprintf instruction (obsolete)*/
+#define NOP_PUTC         0x0004      /* JPB: Simputc instruction */
+#define NOP_CNT_RESET    0x0005	     /* Reset statistics counters */
+#define NOP_GET_TICKS    0x0006	     /* JPB: Get # ticks running */
+#define NOP_GET_PS       0x0007      /* JPB: Get picosecs/cycle */
+#define NOP_REPORT_FIRST 0x0400      /* Report with number */
+#define NOP_REPORT_LAST  0x03ff      /* Report with number */
+
+#endif	/* SPR_DEFS__H */
diff --git a/arch/openrisc/include/asm/string.h b/arch/openrisc/include/asm/string.h
new file mode 100644
index 0000000..69b9754
--- /dev/null
+++ b/arch/openrisc/include/asm/string.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_OPENRISC_STRING_H
+#define __ASM_OPENRISC_STRING_H
+
+#define __HAVE_ARCH_MEMSET
+extern void *memset(void *s, int c, __kernel_size_t n);
+
+#define __HAVE_ARCH_MEMCPY
+extern void *memcpy(void *dest, __const void *src, __kernel_size_t n);
+
+#endif /* __ASM_OPENRISC_STRING_H */
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
new file mode 100644
index 0000000..2db9f1c
--- /dev/null
+++ b/arch/openrisc/include/asm/syscall.h
@@ -0,0 +1,79 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALL_H__
+#define __ASM_OPENRISC_SYSCALL_H__
+
+#include <uapi/linux/audit.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+
+static inline int
+syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
+{
+	return regs->orig_gpr11;
+}
+
+static inline void
+syscall_rollback(struct task_struct *task, struct pt_regs *regs)
+{
+	regs->gpr[11] = regs->orig_gpr11;
+}
+
+static inline long
+syscall_get_error(struct task_struct *task, struct pt_regs *regs)
+{
+	return IS_ERR_VALUE(regs->gpr[11]) ? regs->gpr[11] : 0;
+}
+
+static inline long
+syscall_get_return_value(struct task_struct *task, struct pt_regs *regs)
+{
+	return regs->gpr[11];
+}
+
+static inline void
+syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+			 int error, long val)
+{
+	regs->gpr[11] = (long) error ?: val;
+}
+
+static inline void
+syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+		      unsigned int i, unsigned int n, unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
+}
+
+static inline void
+syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+		      unsigned int i, unsigned int n, const unsigned long *args)
+{
+	BUG_ON(i + n > 6);
+
+	memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
+}
+
+static inline int syscall_get_arch(void)
+{
+	return AUDIT_ARCH_OPENRISC;
+}
+#endif
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
new file mode 100644
index 0000000..8ee8168
--- /dev/null
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_SYSCALLS_H
+#define __ASM_OPENRISC_SYSCALLS_H
+
+asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
+				unsigned long *v2);
+
+#include <asm-generic/syscalls.h>
+
+asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
+			void __user *parent_tid, void __user *child_tid, int tls);
+asmlinkage long __sys_fork(void);
+
+#define sys_clone __sys_clone
+#define sys_fork __sys_fork
+
+#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/include/asm/thread_info.h b/arch/openrisc/include/asm/thread_info.h
new file mode 100644
index 0000000..5c15dfa
--- /dev/null
+++ b/arch/openrisc/include/asm/thread_info.h
@@ -0,0 +1,125 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _ASM_THREAD_INFO_H
+#define _ASM_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <asm/types.h>
+#include <asm/processor.h>
+#endif
+
+
+/* THREAD_SIZE is the size of the task_struct/kernel_stack combo.
+ * normally, the stack is found by doing something like p + THREAD_SIZE
+ * in or32, a page is 8192 bytes, which seems like a sane size
+ */
+
+#define THREAD_SIZE_ORDER 0
+#define THREAD_SIZE       (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - this struct shares the supervisor stack pages
+ * - if the contents of this structure are changed, the assembly constants
+ *   must also be changed
+ */
+#ifndef __ASSEMBLY__
+
+typedef unsigned long mm_segment_t;
+
+struct thread_info {
+	struct task_struct	*task;		/* main task structure */
+	unsigned long		flags;		/* low level flags */
+	__u32			cpu;		/* current CPU */
+	__s32			preempt_count; /* 0 => preemptable, <0 => BUG */
+
+	mm_segment_t		addr_limit; /* thread address space:
+					       0-0x7FFFFFFF for user-thead
+					       0-0xFFFFFFFF for kernel-thread
+					     */
+	__u8			supervisor_stack[0];
+
+	/* saved context data */
+	unsigned long           ksp;
+};
+#endif
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#ifndef __ASSEMBLY__
+#define INIT_THREAD_INFO(tsk)				\
+{							\
+	.task		= &tsk,				\
+	.flags		= 0,				\
+	.cpu		= 0,				\
+	.preempt_count	= INIT_PREEMPT_COUNT,		\
+	.addr_limit	= KERNEL_DS,			\
+	.ksp            = 0,                            \
+}
+
+/* how to get the thread information struct from C */
+register struct thread_info *current_thread_info_reg asm("r10");
+#define current_thread_info()   (current_thread_info_reg)
+
+#define get_thread_info(ti) get_task_struct((ti)->task)
+#define put_thread_info(ti) put_task_struct((ti)->task)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ *   these are process state flags that various assembly files may need to
+ *   access
+ *   - pending work-to-be-done flags are in LSW
+ *   - other flags in MSW
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* resumption notification requested */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_SINGLESTEP		4	/* restore singlestep on return to user
+					 * mode
+					 */
+#define TIF_SYSCALL_TRACEPOINT  8       /* for ftrace syscall instrumentation */
+#define TIF_RESTORE_SIGMASK     9
+#define TIF_POLLING_NRFLAG	16	/* true if poll_idle() is polling						 * TIF_NEED_RESCHED
+					 */
+#define TIF_MEMDIE              17
+
+#define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1<<TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
+#define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_POLLING_NRFLAG	(1<<TIF_POLLING_NRFLAG)
+
+
+/* Work to do when returning from interrupt/exception */
+/* For OpenRISC, this is anything in the LSW other than syscall trace */
+#define _TIF_WORK_MASK (0xff & ~(_TIF_SYSCALL_TRACE|_TIF_SINGLESTEP))
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/openrisc/include/asm/time.h b/arch/openrisc/include/asm/time.h
new file mode 100644
index 0000000..313ee97
--- /dev/null
+++ b/arch/openrisc/include/asm/time.h
@@ -0,0 +1,23 @@
+/*
+ * OpenRISC timer API
+ *
+ * Copyright (C) 2017 by Stafford Horne (shorne@gmail.com)
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_OR1K_TIME_H
+#define __ASM_OR1K_TIME_H
+
+extern void openrisc_clockevent_init(void);
+
+extern void openrisc_timer_set(unsigned long count);
+extern void openrisc_timer_set_next(unsigned long delta);
+
+#ifdef CONFIG_SMP
+extern void synchronise_count_master(int cpu);
+extern void synchronise_count_slave(int cpu);
+#endif
+
+#endif /* __ASM_OR1K_TIME_H */
diff --git a/arch/openrisc/include/asm/timex.h b/arch/openrisc/include/asm/timex.h
new file mode 100644
index 0000000..9935cad
--- /dev/null
+++ b/arch/openrisc/include/asm/timex.h
@@ -0,0 +1,36 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TIMEX_H
+#define __ASM_OPENRISC_TIMEX_H
+
+#define get_cycles get_cycles
+
+#include <asm-generic/timex.h>
+#include <asm/spr.h>
+#include <asm/spr_defs.h>
+
+static inline cycles_t get_cycles(void)
+{
+	return mfspr(SPR_TTCR);
+}
+
+/* This isn't really used any more */
+#define CLOCK_TICK_RATE 1000
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+
+#endif
diff --git a/arch/openrisc/include/asm/tlb.h b/arch/openrisc/include/asm/tlb.h
new file mode 100644
index 0000000..fa4376a
--- /dev/null
+++ b/arch/openrisc/include/asm/tlb.h
@@ -0,0 +1,34 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLB_H__
+#define __ASM_OPENRISC_TLB_H__
+
+/*
+ * or32 doesn't need any special per-pte or
+ * per-vma handling..
+ */
+#define tlb_start_vma(tlb, vma) do { } while (0)
+#define tlb_end_vma(tlb, vma) do { } while (0)
+#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
+
+#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+#include <linux/pagemap.h>
+#include <asm-generic/tlb.h>
+
+#endif /* __ASM_OPENRISC_TLB_H__ */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
new file mode 100644
index 0000000..94227f0
--- /dev/null
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -0,0 +1,68 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_TLBFLUSH_H
+#define __ASM_OPENRISC_TLBFLUSH_H
+
+#include <linux/mm.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/current.h>
+#include <linux/sched.h>
+
+/*
+ *  - flush_tlb() flushes the current mm struct TLBs
+ *  - flush_tlb_all() flushes all processes TLBs
+ *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ *  - flush_tlb_page(vma, vmaddr) flushes one page
+ *  - flush_tlb_range(mm, start, end) flushes a range of pages
+ */
+extern void local_flush_tlb_all(void);
+extern void local_flush_tlb_mm(struct mm_struct *mm);
+extern void local_flush_tlb_page(struct vm_area_struct *vma,
+				 unsigned long addr);
+extern void local_flush_tlb_range(struct vm_area_struct *vma,
+				  unsigned long start,
+				  unsigned long end);
+
+#ifndef CONFIG_SMP
+#define flush_tlb_all	local_flush_tlb_all
+#define flush_tlb_mm	local_flush_tlb_mm
+#define flush_tlb_page	local_flush_tlb_page
+#define flush_tlb_range	local_flush_tlb_range
+#else
+extern void flush_tlb_all(void);
+extern void flush_tlb_mm(struct mm_struct *mm);
+extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end);
+#endif
+
+static inline void flush_tlb(void)
+{
+	flush_tlb_mm(current->mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+					  unsigned long end)
+{
+	flush_tlb_range(NULL, start, end);
+}
+
+#endif /* __ASM_OPENRISC_TLBFLUSH_H */
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
new file mode 100644
index 0000000..bbf5c79
--- /dev/null
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -0,0 +1,269 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UACCESS_H
+#define __ASM_OPENRISC_UACCESS_H
+
+/*
+ * User space memory access functions
+ */
+#include <linux/prefetch.h>
+#include <linux/string.h>
+#include <asm/page.h>
+#include <asm/extable.h>
+
+/*
+ * The fs value determines whether argument validity checking should be
+ * performed or not.  If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ */
+
+/* addr_limit is the maximum accessible address for the task. we misuse
+ * the KERNEL_DS and USER_DS values to both assign and compare the
+ * addr_limit values through the equally misnamed get/set_fs macros.
+ * (see above)
+ */
+
+#define KERNEL_DS	(~0UL)
+#define get_ds()	(KERNEL_DS)
+
+#define USER_DS		(TASK_SIZE)
+#define get_fs()	(current_thread_info()->addr_limit)
+#define set_fs(x)	(current_thread_info()->addr_limit = (x))
+
+#define segment_eq(a, b)	((a) == (b))
+
+/* Ensure that the range from addr to addr+size is all within the process'
+ * address space
+ */
+#define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs()-size))
+
+/* Ensure that addr is below task's addr_limit */
+#define __addr_ok(addr) ((unsigned long) addr < get_fs())
+
+#define access_ok(type, addr, size) \
+	__range_ok((unsigned long)addr, (unsigned long)size)
+
+/*
+ * These are the main single-value transfer routines.  They automatically
+ * use the right size if we just have the right pointer type.
+ *
+ * This gets kind of ugly. We want to return _two_ values in "get_user()"
+ * and yet we don't want to do any pointers, because that is too much
+ * of a performance impact. Thus we have a few rather ugly macros here,
+ * and hide all the uglyness from the user.
+ *
+ * The "__xxx" versions of the user access functions are versions that
+ * do not verify the address space, that must have been done previously
+ * with a separate "access_ok()" call (this is used when we do multiple
+ * accesses to the same area of user memory).
+ *
+ * As we use the same address space for kernel and user data on the
+ * PowerPC, we can just do these as direct assignments.  (Of course, the
+ * exception handling means that it's no longer "just"...)
+ */
+#define get_user(x, ptr) \
+	__get_user_check((x), (ptr), sizeof(*(ptr)))
+#define put_user(x, ptr) \
+	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+#define __get_user(x, ptr) \
+	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
+#define __put_user(x, ptr) \
+	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+extern long __put_user_bad(void);
+
+#define __put_user_nocheck(x, ptr, size)		\
+({							\
+	long __pu_err;					\
+	__put_user_size((x), (ptr), (size), __pu_err);	\
+	__pu_err;					\
+})
+
+#define __put_user_check(x, ptr, size)					\
+({									\
+	long __pu_err = -EFAULT;					\
+	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
+	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
+		__put_user_size((x), __pu_addr, (size), __pu_err);	\
+	__pu_err;							\
+})
+
+#define __put_user_size(x, ptr, size, retval)				\
+do {									\
+	retval = 0;							\
+	switch (size) {							\
+	case 1: __put_user_asm(x, ptr, retval, "l.sb"); break;		\
+	case 2: __put_user_asm(x, ptr, retval, "l.sh"); break;		\
+	case 4: __put_user_asm(x, ptr, retval, "l.sw"); break;		\
+	case 8: __put_user_asm2(x, ptr, retval); break;			\
+	default: __put_user_bad();					\
+	}								\
+} while (0)
+
+struct __large_struct {
+	unsigned long buf[100];
+};
+#define __m(x) (*(struct __large_struct *)(x))
+
+/*
+ * We don't tell gcc that we are accessing memory, but this is OK
+ * because we do not write to any memory gcc knows about, so there
+ * are no aliasing issues.
+ */
+#define __put_user_asm(x, addr, err, op)			\
+	__asm__ __volatile__(					\
+		"1:	"op" 0(%2),%1\n"			\
+		"2:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"3:	l.addi %0,r0,%3\n"			\
+		"	l.j 2b\n"				\
+		"	l.nop\n"				\
+		".previous\n"					\
+		".section __ex_table,\"a\"\n"			\
+		"	.align 2\n"				\
+		"	.long 1b,3b\n"				\
+		".previous"					\
+		: "=r"(err)					\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __put_user_asm2(x, addr, err)				\
+	__asm__ __volatile__(					\
+		"1:	l.sw 0(%2),%1\n"			\
+		"2:	l.sw 4(%2),%H1\n"			\
+		"3:\n"						\
+		".section .fixup,\"ax\"\n"			\
+		"4:	l.addi %0,r0,%3\n"			\
+		"	l.j 3b\n"				\
+		"	l.nop\n"				\
+		".previous\n"					\
+		".section __ex_table,\"a\"\n"			\
+		"	.align 2\n"				\
+		"	.long 1b,4b\n"				\
+		"	.long 2b,4b\n"				\
+		".previous"					\
+		: "=r"(err)					\
+		: "r"(x), "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_nocheck(x, ptr, size)			\
+({								\
+	long __gu_err, __gu_val;				\
+	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
+	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
+	__gu_err;						\
+})
+
+#define __get_user_check(x, ptr, size)					\
+({									\
+	long __gu_err = -EFAULT, __gu_val = 0;				\
+	const __typeof__(*(ptr)) * __gu_addr = (ptr);			\
+	if (access_ok(VERIFY_READ, __gu_addr, size))			\
+		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
+	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	__gu_err;							\
+})
+
+extern long __get_user_bad(void);
+
+#define __get_user_size(x, ptr, size, retval)				\
+do {									\
+	retval = 0;							\
+	switch (size) {							\
+	case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break;		\
+	case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break;		\
+	case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break;		\
+	case 8: __get_user_asm2(x, ptr, retval); break;			\
+	default: (x) = __get_user_bad();				\
+	}								\
+} while (0)
+
+#define __get_user_asm(x, addr, err, op)		\
+	__asm__ __volatile__(				\
+		"1:	"op" %1,0(%2)\n"		\
+		"2:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"3:	l.addi %0,r0,%3\n"		\
+		"	l.addi %1,r0,0\n"		\
+		"	l.j 2b\n"			\
+		"	l.nop\n"			\
+		".previous\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align 2\n"			\
+		"	.long 1b,3b\n"			\
+		".previous"				\
+		: "=r"(err), "=r"(x)			\
+		: "r"(addr), "i"(-EFAULT), "0"(err))
+
+#define __get_user_asm2(x, addr, err)			\
+	__asm__ __volatile__(				\
+		"1:	l.lwz %1,0(%2)\n"		\
+		"2:	l.lwz %H1,4(%2)\n"		\
+		"3:\n"					\
+		".section .fixup,\"ax\"\n"		\
+		"4:	l.addi %0,r0,%3\n"		\
+		"	l.addi %1,r0,0\n"		\
+		"	l.addi %H1,r0,0\n"		\
+		"	l.j 3b\n"			\
+		"	l.nop\n"			\
+		".previous\n"				\
+		".section __ex_table,\"a\"\n"		\
+		"	.align 2\n"			\
+		"	.long 1b,4b\n"			\
+		"	.long 2b,4b\n"			\
+		".previous"				\
+		: "=r"(err), "=&r"(x)			\
+		: "r"(addr), "i"(-EFAULT), "0"(err))
+
+/* more complex routines */
+
+extern unsigned long __must_check
+__copy_tofrom_user(void *to, const void *from, unsigned long size);
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long size)
+{
+	return __copy_tofrom_user(to, (__force const void *)from, size);
+}
+static inline unsigned long
+raw_copy_to_user(void *to, const void __user *from, unsigned long size)
+{
+	return __copy_tofrom_user((__force void *)to, from, size);
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+
+extern unsigned long __clear_user(void *addr, unsigned long size);
+
+static inline __must_check unsigned long
+clear_user(void *addr, unsigned long size)
+{
+	if (likely(access_ok(VERIFY_WRITE, addr, size)))
+		size = __clear_user(addr, size);
+	return size;
+}
+
+#define user_addr_max() \
+	(uaccess_kernel() ? ~0UL : TASK_SIZE)
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern __must_check long strnlen_user(const char __user *str, long n);
+
+#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/include/asm/unaligned.h b/arch/openrisc/include/asm/unaligned.h
new file mode 100644
index 0000000..1141cbd
--- /dev/null
+++ b/arch/openrisc/include/asm/unaligned.h
@@ -0,0 +1,51 @@
+/*
+ * OpenRISC Linux
+ *
+ * Linux architectural port borrowing liberally from similar works of
+ * others.  All original copyrights apply as per the original source
+ * declaration.
+ *
+ * OpenRISC implementation:
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * et al.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __ASM_OPENRISC_UNALIGNED_H
+#define __ASM_OPENRISC_UNALIGNED_H
+
+/*
+ * This is copied from the generic implementation and the C-struct
+ * variant replaced with the memmove variant.  The GCC compiler
+ * for the OR32 arch optimizes too aggressively for the C-struct
+ * variant to work, so use the memmove variant instead.
+ *
+ * It may be worth considering implementing the unaligned access
+ * exception handler and allowing unaligned accesses (access_ok.h)...
+ * not sure if it would be much of a performance win without further
+ * investigation.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_le
+# define put_unaligned	__put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned	__get_unaligned_be
+# define put_unaligned	__put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_OPENRISC_UNALIGNED_H */
diff --git a/arch/openrisc/include/asm/unwinder.h b/arch/openrisc/include/asm/unwinder.h
new file mode 100644
index 0000000..165ec6f
--- /dev/null
+++ b/arch/openrisc/include/asm/unwinder.h
@@ -0,0 +1,20 @@
+/*
+ * OpenRISC unwinder.h
+ *
+ * Architecture API for unwinding stacks.
+ *
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef __ASM_OPENRISC_UNWINDER_H
+#define __ASM_OPENRISC_UNWINDER_H
+
+void unwind_stack(void *data, unsigned long *stack,
+		  void (*trace)(void *data, unsigned long addr,
+				int reliable));
+
+#endif /* __ASM_OPENRISC_UNWINDER_H */