Update Linux to v5.10.109

Sourced from [1]

[1] https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.10.109.tar.xz

Change-Id: I19bca9fc6762d4e63bcf3e4cba88bbe560d9c76c
Signed-off-by: Olivier Deprez <olivier.deprez@arm.com>
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index ffa0cf7..adefb16 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,35 +1,10 @@
 # SPDX-License-Identifier: GPL-2.0
 generated-y += syscall_table.h
-generic-y += bug.h
-generic-y += compat.h
-generic-y += device.h
-generic-y += div64.h
-generic-y += dma-contiguous.h
-generic-y += dma-mapping.h
-generic-y += emergency-restart.h
-generic-y += exec.h
 generic-y += extable.h
-generic-y += fb.h
-generic-y += hardirq.h
-generic-y += irq_regs.h
-generic-y += irq_work.h
-generic-y += kdebug.h
-generic-y += kmap_types.h
-generic-y += kprobes.h
 generic-y += kvm_para.h
-generic-y += local.h
-generic-y += local64.h
 generic-y += mcs_spinlock.h
-generic-y += mm-arch-hooks.h
-generic-y += mmiowb.h
 generic-y += param.h
-generic-y += percpu.h
-generic-y += preempt.h
 generic-y += qrwlock.h
 generic-y += qspinlock.h
-generic-y += sections.h
-generic-y += topology.h
-generic-y += trace_clock.h
-generic-y += vga.h
-generic-y += word-at-a-time.h
-generic-y += xor.h
+generic-y += seccomp.h
+generic-y += user.h
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 71a7e84..bfc89e1 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -237,4 +237,6 @@
 #error Unsupported Xtensa ABI
 #endif
 
+#define __XTENSA_HANDLER	.section ".exception.text", "ax"
+
 #endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7b00d26..744c2f4 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -19,8 +19,6 @@
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
 
-#define ATOMIC_INIT(i)	{ (i) }
-
 /*
  * This Xtensa implementation assumes that the right mechanism
  * for exclusion is for locking interrupts to level EXCM_LEVEL.
@@ -64,13 +62,13 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32ex   %1, %3\n"			\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32ex   %0, %3\n"			\
-			"       getex   %0\n"				\
-			"       beqz    %0, 1b\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32ex   %[tmp], %[addr]\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32ex   %[result], %[addr]\n"		\
+			"       getex   %[result]\n"			\
+			"       beqz    %[result], 1b\n"		\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
+			: [i] "a" (i), [addr] "a" (v)			\
 			: "memory"					\
 			);						\
 }									\
@@ -82,14 +80,14 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32ex   %1, %3\n"			\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32ex   %0, %3\n"			\
-			"       getex   %0\n"				\
-			"       beqz    %0, 1b\n"			\
-			"       " #op " %0, %1, %2\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32ex   %[tmp], %[addr]\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32ex   %[result], %[addr]\n"		\
+			"       getex   %[result]\n"			\
+			"       beqz    %[result], 1b\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
+			: [i] "a" (i), [addr] "a" (v)			\
 			: "memory"					\
 			);						\
 									\
@@ -103,13 +101,13 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32ex   %1, %3\n"			\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32ex   %0, %3\n"			\
-			"       getex   %0\n"				\
-			"       beqz    %0, 1b\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32ex   %[tmp], %[addr]\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32ex   %[result], %[addr]\n"		\
+			"       getex   %[result]\n"			\
+			"       beqz    %[result], 1b\n"		\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
+			: [i] "a" (i), [addr] "a" (v)			\
 			: "memory"					\
 			);						\
 									\
@@ -124,13 +122,14 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32i    %1, %3, 0\n"			\
-			"       wsr     %1, scompare1\n"		\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32c1i  %0, %3, 0\n"			\
-			"       bne     %0, %1, 1b\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32i    %[tmp], %[mem]\n"		\
+			"       wsr     %[tmp], scompare1\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32c1i  %[result], %[mem]\n"		\
+			"       bne     %[result], %[tmp], 1b\n"	\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
+			  [mem] "+m" (*v)				\
+			: [i] "a" (i)					\
 			: "memory"					\
 			);						\
 }									\
@@ -142,14 +141,15 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32i    %1, %3, 0\n"			\
-			"       wsr     %1, scompare1\n"		\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32c1i  %0, %3, 0\n"			\
-			"       bne     %0, %1, 1b\n"			\
-			"       " #op " %0, %0, %2\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32i    %[tmp], %[mem]\n"		\
+			"       wsr     %[tmp], scompare1\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32c1i  %[result], %[mem]\n"		\
+			"       bne     %[result], %[tmp], 1b\n"	\
+			"       " #op " %[result], %[result], %[i]\n"	\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
+			  [mem] "+m" (*v)				\
+			: [i] "a" (i)					\
 			: "memory"					\
 			);						\
 									\
@@ -163,13 +163,14 @@
 	int result;							\
 									\
 	__asm__ __volatile__(						\
-			"1:     l32i    %1, %3, 0\n"			\
-			"       wsr     %1, scompare1\n"		\
-			"       " #op " %0, %1, %2\n"			\
-			"       s32c1i  %0, %3, 0\n"			\
-			"       bne     %0, %1, 1b\n"			\
-			: "=&a" (result), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			"1:     l32i    %[tmp], %[mem]\n"		\
+			"       wsr     %[tmp], scompare1\n"		\
+			"       " #op " %[result], %[tmp], %[i]\n"	\
+			"       s32c1i  %[result], %[mem]\n"		\
+			"       bne     %[result], %[tmp], 1b\n"	\
+			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
+			  [mem] "+m" (*v)				\
+			: [i] "a" (i)					\
 			: "memory"					\
 			);						\
 									\
@@ -184,14 +185,14 @@
 	unsigned int vval;						\
 									\
 	__asm__ __volatile__(						\
-			"       rsil    a15, "__stringify(TOPLEVEL)"\n"\
-			"       l32i    %0, %2, 0\n"			\
-			"       " #op " %0, %0, %1\n"			\
-			"       s32i    %0, %2, 0\n"			\
+			"       rsil    a15, "__stringify(TOPLEVEL)"\n"	\
+			"       l32i    %[result], %[mem]\n"		\
+			"       " #op " %[result], %[result], %[i]\n"	\
+			"       s32i    %[result], %[mem]\n"		\
 			"       wsr     a15, ps\n"			\
 			"       rsync\n"				\
-			: "=&a" (vval)					\
-			: "a" (i), "a" (v)				\
+			: [result] "=&a" (vval), [mem] "+m" (*v)	\
+			: [i] "a" (i)					\
 			: "a15", "memory"				\
 			);						\
 }									\
@@ -203,13 +204,13 @@
 									\
 	__asm__ __volatile__(						\
 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
-			"       l32i    %0, %2, 0\n"			\
-			"       " #op " %0, %0, %1\n"			\
-			"       s32i    %0, %2, 0\n"			\
+			"       l32i    %[result], %[mem]\n"		\
+			"       " #op " %[result], %[result], %[i]\n"	\
+			"       s32i    %[result], %[mem]\n"		\
 			"       wsr     a15, ps\n"			\
 			"       rsync\n"				\
-			: "=&a" (vval)					\
-			: "a" (i), "a" (v)				\
+			: [result] "=&a" (vval), [mem] "+m" (*v)	\
+			: [i] "a" (i)					\
 			: "a15", "memory"				\
 			);						\
 									\
@@ -223,13 +224,14 @@
 									\
 	__asm__ __volatile__(						\
 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
-			"       l32i    %0, %3, 0\n"			\
-			"       " #op " %1, %0, %2\n"			\
-			"       s32i    %1, %3, 0\n"			\
+			"       l32i    %[result], %[mem]\n"		\
+			"       " #op " %[tmp], %[result], %[i]\n"	\
+			"       s32i    %[tmp], %[mem]\n"		\
 			"       wsr     a15, ps\n"			\
 			"       rsync\n"				\
-			: "=&a" (vval), "=&a" (tmp)			\
-			: "a" (i), "a" (v)				\
+			: [result] "=&a" (vval), [tmp] "=&a" (tmp),	\
+			  [mem] "+m" (*v)				\
+			: [i] "a" (i)					\
 			: "a15", "memory"				\
 			);						\
 									\
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index be8b2be..3f71d36 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -98,248 +98,113 @@
 
 #if XCHAL_HAVE_EXCLUSIVE
 
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %0, %2\n"
-			"       or      %0, %0, %1\n"
-			"       s32ex   %0, %2\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp)
-			: "a" (mask), "a" (p)
-			: "memory");
+#define BIT_OP(op, insn, inv)						\
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{									\
+	unsigned long tmp;						\
+	unsigned long mask = 1UL << (bit & 31);				\
+									\
+	p += bit >> 5;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32ex   %[tmp], %[addr]\n"		\
+			"      "insn"   %[tmp], %[tmp], %[mask]\n"	\
+			"       s32ex   %[tmp], %[addr]\n"		\
+			"       getex   %[tmp]\n"			\
+			"       beqz    %[tmp], 1b\n"			\
+			: [tmp] "=&a" (tmp)				\
+			: [mask] "a" (inv mask), [addr] "a" (p)		\
+			: "memory");					\
 }
 
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %0, %2\n"
-			"       and     %0, %0, %1\n"
-			"       s32ex   %0, %2\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp)
-			: "a" (~mask), "a" (p)
-			: "memory");
-}
-
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %0, %2\n"
-			"       xor     %0, %0, %1\n"
-			"       s32ex   %0, %2\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp)
-			: "a" (mask), "a" (p)
-			: "memory");
-}
-
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %1, %3\n"
-			"       or      %0, %1, %2\n"
-			"       s32ex   %0, %3\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
-
-	return value & mask;
-}
-
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %1, %3\n"
-			"       and     %0, %1, %2\n"
-			"       s32ex   %0, %3\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (~mask), "a" (p)
-			: "memory");
-
-	return value & mask;
-}
-
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32ex   %1, %3\n"
-			"       xor     %0, %1, %2\n"
-			"       s32ex   %0, %3\n"
-			"       getex   %0\n"
-			"       beqz    %0, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
-
-	return value & mask;
+#define TEST_AND_BIT_OP(op, insn, inv)					\
+static inline int							\
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)	\
+{									\
+	unsigned long tmp, value;					\
+	unsigned long mask = 1UL << (bit & 31);				\
+									\
+	p += bit >> 5;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32ex   %[value], %[addr]\n"		\
+			"      "insn"   %[tmp], %[value], %[mask]\n"	\
+			"       s32ex   %[tmp], %[addr]\n"		\
+			"       getex   %[tmp]\n"			\
+			"       beqz    %[tmp], 1b\n"			\
+			: [tmp] "=&a" (tmp), [value] "=&a" (value)	\
+			: [mask] "a" (inv mask), [addr] "a" (p)		\
+			: "memory");					\
+									\
+	return value & mask;						\
 }
 
 #elif XCHAL_HAVE_S32C1I
 
-static inline void set_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       or      %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
+#define BIT_OP(op, insn, inv)						\
+static inline void op##_bit(unsigned int bit, volatile unsigned long *p)\
+{									\
+	unsigned long tmp, value;					\
+	unsigned long mask = 1UL << (bit & 31);				\
+									\
+	p += bit >> 5;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32i    %[value], %[mem]\n"		\
+			"       wsr     %[value], scompare1\n"		\
+			"      "insn"   %[tmp], %[value], %[mask]\n"	\
+			"       s32c1i  %[tmp], %[mem]\n"		\
+			"       bne     %[tmp], %[value], 1b\n"		\
+			: [tmp] "=&a" (tmp), [value] "=&a" (value),	\
+			  [mem] "+m" (*p)				\
+			: [mask] "a" (inv mask)				\
+			: "memory");					\
 }
 
-static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       and     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (~mask), "a" (p)
-			: "memory");
-}
-
-static inline void change_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       xor     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
-}
-
-static inline int
-test_and_set_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       or      %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
-
-	return tmp & mask;
-}
-
-static inline int
-test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       and     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (~mask), "a" (p)
-			: "memory");
-
-	return tmp & mask;
-}
-
-static inline int
-test_and_change_bit(unsigned int bit, volatile unsigned long *p)
-{
-	unsigned long tmp, value;
-	unsigned long mask = 1UL << (bit & 31);
-
-	p += bit >> 5;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %3, 0\n"
-			"       wsr     %1, scompare1\n"
-			"       xor     %0, %1, %2\n"
-			"       s32c1i  %0, %3, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp), "=&a" (value)
-			: "a" (mask), "a" (p)
-			: "memory");
-
-	return tmp & mask;
+#define TEST_AND_BIT_OP(op, insn, inv)					\
+static inline int							\
+test_and_##op##_bit(unsigned int bit, volatile unsigned long *p)	\
+{									\
+	unsigned long tmp, value;					\
+	unsigned long mask = 1UL << (bit & 31);				\
+									\
+	p += bit >> 5;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32i    %[value], %[mem]\n"		\
+			"       wsr     %[value], scompare1\n"		\
+			"      "insn"   %[tmp], %[value], %[mask]\n"	\
+			"       s32c1i  %[tmp], %[mem]\n"		\
+			"       bne     %[tmp], %[value], 1b\n"		\
+			: [tmp] "=&a" (tmp), [value] "=&a" (value),	\
+			  [mem] "+m" (*p)				\
+			: [mask] "a" (inv mask)				\
+			: "memory");					\
+									\
+	return tmp & mask;						\
 }
 
 #else
 
+#define BIT_OP(op, insn, inv)
+#define TEST_AND_BIT_OP(op, insn, inv)
+
 #include <asm-generic/bitops/atomic.h>
 
 #endif /* XCHAL_HAVE_S32C1I */
 
+#define BIT_OPS(op, insn, inv)		\
+	BIT_OP(op, insn, inv)		\
+	TEST_AND_BIT_OP(op, insn, inv)
+
+BIT_OPS(set, "or", )
+BIT_OPS(clear, "and", ~)
+BIT_OPS(change, "xor", )
+
+#undef BIT_OPS
+#undef BIT_OP
+#undef TEST_AND_BIT_OP
+
 #include <asm-generic/bitops/find.h>
 #include <asm-generic/bitops/le.h>
 
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index b21fd13..54e147a 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -31,4 +31,10 @@
 
 #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
 
+/*
+ * R/O after init is actually writable, it cannot go to .rodata
+ * according to vmlinux linker script.
+ */
+#define __ro_after_init __read_mostly
+
 #endif	/* _XTENSA_CACHE_H */
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index a0d50be..cf907e5 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -145,6 +145,8 @@
 
 #endif
 
+#define flush_icache_user_range flush_icache_range
+
 /* Ensure consistency between data and instruction cache. */
 #define local_flush_icache_range(start, end)				\
 	do {								\
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index 8b68717..44ec1d0 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -37,29 +37,27 @@
  * better 64-bit) boundary
  */
 
-asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
-					    int len, __wsum sum,
-					    int *src_err_ptr, int *dst_err_ptr);
+asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst, int len);
 
+#define _HAVE_ARCH_CSUM_AND_COPY
 /*
  *	Note: when you get a NULL pointer exception here this means someone
  *	passed in an incorrect kernel address to one of these functions.
- *
- *	If you use these functions directly please don't forget the access_ok().
  */
 static inline
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
-					int len, __wsum sum)
+__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len)
 {
-	return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL);
+	return csum_partial_copy_generic(src, dst, len);
 }
 
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
 static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst,
-				   int len, __wsum sum, int *err_ptr)
+__wsum csum_and_copy_from_user(const void __user *src, void *dst,
+				   int len)
 {
-	return csum_partial_copy_generic((__force const void *)src, dst,
-					len, sum, err_ptr, NULL);
+	if (!access_ok(src, len))
+		return 0;
+	return csum_partial_copy_generic((__force const void *)src, dst, len);
 }
 
 /*
@@ -240,15 +238,10 @@
  */
 #define HAVE_CSUM_COPY_USER
 static __inline__ __wsum csum_and_copy_to_user(const void *src,
-					       void __user *dst, int len,
-					       __wsum sum, int *err_ptr)
+					       void __user *dst, int len)
 {
-	if (access_ok(dst, len))
-		return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
-
-	if (len)
-		*err_ptr = -EFAULT;
-
-	return (__force __wsum)-1; /* invalid checksum */
+	if (!access_ok(dst, len))
+		return 0;
+	return csum_partial_copy_generic(src, (__force void *)dst, len);
 }
 #endif
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 7ccc5cb..a175f8a 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -27,25 +27,25 @@
 	unsigned long tmp, result;
 
 	__asm__ __volatile__(
-			"1:     l32ex   %0, %3\n"
-			"       bne     %0, %4, 2f\n"
-			"       mov     %1, %2\n"
-			"       s32ex   %1, %3\n"
-			"       getex   %1\n"
-			"       beqz    %1, 1b\n"
+			"1:     l32ex   %[result], %[addr]\n"
+			"       bne     %[result], %[cmp], 2f\n"
+			"       mov     %[tmp], %[new]\n"
+			"       s32ex   %[tmp], %[addr]\n"
+			"       getex   %[tmp]\n"
+			"       beqz    %[tmp], 1b\n"
 			"2:\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (new), "a" (p), "a" (old)
+			: [result] "=&a" (result), [tmp] "=&a" (tmp)
+			: [new] "a" (new), [addr] "a" (p), [cmp] "a" (old)
 			: "memory"
 			);
 
 	return result;
 #elif XCHAL_HAVE_S32C1I
 	__asm__ __volatile__(
-			"       wsr     %2, scompare1\n"
-			"       s32c1i  %0, %1, 0\n"
-			: "+a" (new)
-			: "a" (p), "a" (old)
+			"       wsr     %[cmp], scompare1\n"
+			"       s32c1i  %[new], %[mem]\n"
+			: [new] "+a" (new), [mem] "+m" (*p)
+			: [cmp] "a" (old)
 			: "memory"
 			);
 
@@ -53,14 +53,14 @@
 #else
 	__asm__ __volatile__(
 			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %1, 0\n"
-			"       bne     %0, %2, 1f\n"
-			"       s32i    %3, %1, 0\n"
+			"       l32i    %[old], %[mem]\n"
+			"       bne     %[old], %[cmp], 1f\n"
+			"       s32i    %[new], %[mem]\n"
 			"1:\n"
 			"       wsr     a15, ps\n"
 			"       rsync\n"
-			: "=&a" (old)
-			: "a" (p), "a" (old), "r" (new)
+			: [old] "=&a" (old), [mem] "+m" (*p)
+			: [cmp] "a" (old), [new] "r" (new)
 			: "a15", "memory");
 	return old;
 #endif
@@ -129,13 +129,13 @@
 	unsigned long tmp, result;
 
 	__asm__ __volatile__(
-			"1:     l32ex   %0, %3\n"
-			"       mov     %1, %2\n"
-			"       s32ex   %1, %3\n"
-			"       getex   %1\n"
-			"       beqz    %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (val), "a" (m)
+			"1:     l32ex   %[result], %[addr]\n"
+			"       mov     %[tmp], %[val]\n"
+			"       s32ex   %[tmp], %[addr]\n"
+			"       getex   %[tmp]\n"
+			"       beqz    %[tmp], 1b\n"
+			: [result] "=&a" (result), [tmp] "=&a" (tmp)
+			: [val] "a" (val), [addr] "a" (m)
 			: "memory"
 			);
 
@@ -143,13 +143,14 @@
 #elif XCHAL_HAVE_S32C1I
 	unsigned long tmp, result;
 	__asm__ __volatile__(
-			"1:     l32i    %1, %2, 0\n"
-			"       mov     %0, %3\n"
-			"       wsr     %1, scompare1\n"
-			"       s32c1i  %0, %2, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (m), "a" (val)
+			"1:     l32i    %[tmp], %[mem]\n"
+			"       mov     %[result], %[val]\n"
+			"       wsr     %[tmp], scompare1\n"
+			"       s32c1i  %[result], %[mem]\n"
+			"       bne     %[result], %[tmp], 1b\n"
+			: [result] "=&a" (result), [tmp] "=&a" (tmp),
+			  [mem] "+m" (*m)
+			: [val] "a" (val)
 			: "memory"
 			);
 	return result;
@@ -157,12 +158,12 @@
 	unsigned long tmp;
 	__asm__ __volatile__(
 			"       rsil    a15, "__stringify(TOPLEVEL)"\n"
-			"       l32i    %0, %1, 0\n"
-			"       s32i    %2, %1, 0\n"
+			"       l32i    %[tmp], %[mem]\n"
+			"       s32i    %[val], %[mem]\n"
 			"       wsr     a15, ps\n"
 			"       rsync\n"
-			: "=&a" (tmp)
-			: "a" (m), "a" (val)
+			: [tmp] "=&a" (tmp), [mem] "+m" (*m)
+			: [val] "a" (val)
 			: "a15", "memory");
 	return tmp;
 #endif
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
index 5b4acb7..5590b0f 100644
--- a/arch/xtensa/include/asm/core.h
+++ b/arch/xtensa/include/asm/core.h
@@ -10,10 +10,18 @@
 #define XCHAL_HAVE_EXCLUSIVE 0
 #endif
 
+#ifndef XCHAL_HAVE_EXTERN_REGS
+#define XCHAL_HAVE_EXTERN_REGS 0
+#endif
+
 #ifndef XCHAL_HAVE_MPU
 #define XCHAL_HAVE_MPU 0
 #endif
 
+#ifndef XCHAL_HAVE_VECBASE
+#define XCHAL_HAVE_VECBASE 0
+#endif
+
 #ifndef XCHAL_SPANNING_WAY
 #define XCHAL_SPANNING_WAY 0
 #endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
index 7e25c1b..a06ffb0 100644
--- a/arch/xtensa/include/asm/fixmap.h
+++ b/arch/xtensa/include/asm/fixmap.h
@@ -13,9 +13,9 @@
 #ifndef _ASM_FIXMAP_H
 #define _ASM_FIXMAP_H
 
-#include <asm/pgtable.h>
 #ifdef CONFIG_HIGHMEM
 #include <linux/threads.h>
+#include <linux/pgtable.h>
 #include <asm/kmap_types.h>
 #endif
 
@@ -76,10 +76,4 @@
 
 #endif
 
-#define kmap_get_fixmap_pte(vaddr) \
-	pte_offset_kernel( \
-		pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
-		(vaddr) \
-	)
-
 #endif
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 0c4457c..a1a27b2 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -43,10 +43,10 @@
 #elif XCHAL_HAVE_S32C1I
 #define __futex_atomic_op(insn, ret, old, uaddr, arg)	\
 	__asm__ __volatile(				\
-	"1:	l32i	%[oldval], %[addr], 0\n"	\
+	"1:	l32i	%[oldval], %[mem]\n"		\
 		insn "\n"				\
 	"	wsr	%[oldval], scompare1\n"		\
-	"2:	s32c1i	%[newval], %[addr], 0\n"	\
+	"2:	s32c1i	%[newval], %[mem]\n"		\
 	"	bne	%[newval], %[oldval], 1b\n"	\
 	"	movi	%[newval], 0\n"			\
 	"3:\n"						\
@@ -60,9 +60,9 @@
 	"	.section __ex_table,\"a\"\n"		\
 	"	.long 1b, 5b, 2b, 5b\n"			\
 	"	.previous\n"				\
-	: [oldval] "=&r" (old), [newval] "=&r" (ret)	\
-	: [addr] "r" (uaddr), [oparg] "r" (arg),	\
-	  [fault] "I" (-EFAULT)				\
+	: [oldval] "=&r" (old), [newval] "=&r" (ret),	\
+	  [mem] "+m" (*(uaddr))				\
+	: [oparg] "r" (arg), [fault] "I" (-EFAULT)	\
 	: "memory")
 #endif
 
@@ -72,7 +72,8 @@
 #if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
 	int oldval = 0, ret;
 
-	pagefault_disable();
+	if (!access_ok(uaddr, sizeof(u32)))
+		return -EFAULT;
 
 	switch (op) {
 	case FUTEX_OP_SET:
@@ -99,8 +100,6 @@
 		ret = -ENOSYS;
 	}
 
-	pagefault_enable();
-
 	if (!ret)
 		*oval = oldval;
 
diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h
index 04e9340..eac5032 100644
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
@@ -13,10 +13,10 @@
 #define _XTENSA_HIGHMEM_H
 
 #include <linux/wait.h>
+#include <linux/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/fixmap.h>
 #include <asm/kmap_types.h>
-#include <asm/pgtable.h>
 
 #define PKMAP_BASE		((FIXADDR_START - \
 				  (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
@@ -63,38 +63,11 @@
 
 extern pte_t *pkmap_page_table;
 
-void *kmap_high(struct page *page);
-void kunmap_high(struct page *page);
-
-static inline void *kmap(struct page *page)
-{
-	/* Check if this memory layout is broken because PKMAP overlaps
-	 * page table.
-	 */
-	BUILD_BUG_ON(PKMAP_BASE <
-		     TLBTEMP_BASE_1 + TLBTEMP_SIZE);
-	BUG_ON(in_interrupt());
-	if (!PageHighMem(page))
-		return page_address(page);
-	return kmap_high(page);
-}
-
-static inline void kunmap(struct page *page)
-{
-	BUG_ON(in_interrupt());
-	if (!PageHighMem(page))
-		return;
-	kunmap_high(page);
-}
-
 static inline void flush_cache_kmaps(void)
 {
 	flush_cache_all();
 }
 
-void *kmap_atomic(struct page *page);
-void __kunmap_atomic(void *kvaddr);
-
 void kmap_init(void);
 
 #endif
diff --git a/arch/xtensa/include/asm/hw_irq.h b/arch/xtensa/include/asm/hw_irq.h
deleted file mode 100644
index 3ddbea7..0000000
--- a/arch/xtensa/include/asm/hw_irq.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * include/asm-xtensa/hw_irq.h
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License.  See the file "COPYING" in the main directory of
- * this archive for more details.
- *
- * Copyright (C) 2002 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_HW_IRQ_H
-#define _XTENSA_HW_IRQ_H
-
-#endif
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 3b054d2..05cb13d 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -6,7 +6,7 @@
  *      For the new V3 MMU we remap the TLB from virtual == physical
  *      to the standard Linux mapping used in earlier MMU's.
  *
- *      The the MMU we also support a new configuration register that
+ *      For the MMU we also support a new configuration register that
  *      specifies how the S32C1I instruction operates with the cache
  *      controller.
  *
@@ -23,7 +23,8 @@
 #ifndef _XTENSA_INITIALIZE_MMU_H
 #define _XTENSA_INITIALIZE_MMU_H
 
-#include <asm/pgtable.h>
+#include <linux/init.h>
+#include <linux/pgtable.h>
 #include <asm/vectors.h>
 
 #if XCHAL_HAVE_PTP_MMU
@@ -183,7 +184,7 @@
 #endif
 
 #if XCHAL_HAVE_MPU
-	.data
+	__REFCONST
 	.align	4
 .Lattribute_table:
 	.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 988e085..54188e6 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -32,8 +32,7 @@
 /*
  * Return the virtual address for the specified bus memory.
  */
-static inline void __iomem *ioremap_nocache(unsigned long offset,
-		unsigned long size)
+static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
 	if (offset >= XCHAL_KIO_PADDR
 	    && offset - XCHAL_KIO_PADDR < XCHAL_KIO_SIZE)
@@ -52,15 +51,6 @@
 		return xtensa_ioremap_cache(offset, size);
 }
 #define ioremap_cache ioremap_cache
-#define ioremap_nocache ioremap_nocache
-
-#define ioremap_wc ioremap_nocache
-#define ioremap_wt ioremap_nocache
-
-static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
-{
-	return ioremap_nocache(offset, size);
-}
 
 static inline void iounmap(volatile void __iomem *addr)
 {
diff --git a/arch/xtensa/include/asm/kmem_layout.h b/arch/xtensa/include/asm/kmem_layout.h
index 9c12bab..6fc05cb 100644
--- a/arch/xtensa/include/asm/kmem_layout.h
+++ b/arch/xtensa/include/asm/kmem_layout.h
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_KMEM_LAYOUT_H
 #define _XTENSA_KMEM_LAYOUT_H
 
+#include <asm/core.h>
 #include <asm/types.h>
 
 #ifdef CONFIG_MMU
@@ -65,6 +66,34 @@
 
 #endif
 
+/* KIO definition */
+
+#if XCHAL_HAVE_PTP_MMU
+#define XCHAL_KIO_CACHED_VADDR		0xe0000000
+#define XCHAL_KIO_BYPASS_VADDR		0xf0000000
+#define XCHAL_KIO_DEFAULT_PADDR		0xf0000000
+#else
+#define XCHAL_KIO_BYPASS_VADDR		XCHAL_KIO_PADDR
+#define XCHAL_KIO_DEFAULT_PADDR		0x90000000
+#endif
+#define XCHAL_KIO_SIZE			0x10000000
+
+#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_USE_OF)
+#define XCHAL_KIO_PADDR			xtensa_get_kio_paddr()
+#ifndef __ASSEMBLY__
+extern unsigned long xtensa_kio_paddr;
+
+static inline unsigned long xtensa_get_kio_paddr(void)
+{
+	return xtensa_kio_paddr;
+}
+#endif
+#else
+#define XCHAL_KIO_PADDR			XCHAL_KIO_DEFAULT_PADDR
+#endif
+
+/* KERNEL_STACK definition */
+
 #ifndef CONFIG_KASAN
 #define KERNEL_STACK_SHIFT	13
 #else
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index de5e6cb..74923ef 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -18,10 +18,10 @@
 #include <linux/stringify.h>
 #include <linux/sched.h>
 #include <linux/mm_types.h>
+#include <linux/pgtable.h>
 
 #include <asm/vectors.h>
 
-#include <asm/pgtable.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
diff --git a/arch/xtensa/include/asm/module.h b/arch/xtensa/include/asm/module.h
deleted file mode 100644
index 488b40c..0000000
--- a/arch/xtensa/include/asm/module.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-xtensa/module.h
- *
- * This file contains the module code specific to the Xtensa architecture.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_MODULE_H
-#define _XTENSA_MODULE_H
-
-#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
-
-#include <asm-generic/module.h>
-
-#endif	/* _XTENSA_MODULE_H */
diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h
index 09c56cb..37ce25e 100644
--- a/arch/xtensa/include/asm/page.h
+++ b/arch/xtensa/include/asm/page.h
@@ -169,7 +169,18 @@
 	if (off >= XCHAL_KSEG_SIZE)
 		off -= XCHAL_KSEG_SIZE;
 
+#ifndef CONFIG_XIP_KERNEL
 	return off + PHYS_OFFSET;
+#else
+	if (off < XCHAL_KSEG_SIZE)
+		return off + PHYS_OFFSET;
+
+	off -= XCHAL_KSEG_SIZE;
+	if (off >= XCHAL_KIO_SIZE)
+		off -= XCHAL_KIO_SIZE;
+
+	return off + XCHAL_KIO_PADDR;
+#endif
 }
 #define __pa(x)	___pa((unsigned long)(x))
 #else
@@ -192,8 +203,5 @@
 
 #endif /* __ASSEMBLY__ */
 
-#define VM_DATA_DEFAULT_FLAGS	(VM_READ | VM_WRITE | VM_EXEC | \
-				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-
 #include <asm-generic/memory_model.h>
 #endif /* _XTENSA_PAGE_H */
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 1d38f0e..d3a22da 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -8,9 +8,14 @@
 #ifndef _XTENSA_PGALLOC_H
 #define _XTENSA_PGALLOC_H
 
+#ifdef CONFIG_MMU
 #include <linux/highmem.h>
 #include <linux/slab.h>
 
+#define __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+#define __HAVE_ARCH_PTE_ALLOC_ONE
+#include <asm-generic/pgalloc.h>
+
 /*
  * Allocating and freeing a pmd is trivial: the 1-entry pmd is
  * inside the pgd, so has no extra memory associated with it.
@@ -28,50 +33,37 @@
 	return (pgd_t*) __get_free_pages(GFP_KERNEL | __GFP_ZERO, PGD_ORDER);
 }
 
-static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
+static inline void ptes_clear(pte_t *ptep)
 {
-	free_page((unsigned long)pgd);
+	int i;
+
+	for (i = 0; i < PTRS_PER_PTE; i++)
+		pte_clear(NULL, 0, ptep + i);
 }
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
 	pte_t *ptep;
-	int i;
 
-	ptep = (pte_t *)__get_free_page(GFP_KERNEL);
+	ptep = (pte_t *)__pte_alloc_one_kernel(mm);
 	if (!ptep)
 		return NULL;
-	for (i = 0; i < 1024; i++)
-		pte_clear(NULL, 0, ptep + i);
+	ptes_clear(ptep);
 	return ptep;
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
-	pte_t *pte;
 	struct page *page;
 
-	pte = pte_alloc_one_kernel(mm);
-	if (!pte)
+	page = __pte_alloc_one(mm, GFP_PGTABLE_USER);
+	if (!page)
 		return NULL;
-	page = virt_to_page(pte);
-	if (!pgtable_pte_page_ctor(page)) {
-		__free_page(page);
-		return NULL;
-	}
+	ptes_clear(page_address(page));
 	return page;
 }
 
-static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
-{
-	free_page((unsigned long)pte);
-}
-
-static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
-{
-	pgtable_pte_page_dtor(pte);
-	__free_page(pte);
-}
 #define pmd_pgtable(pmd) pmd_page(pmd)
+#endif /* CONFIG_MMU */
 
 #endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 359c7bd..4dc04e6 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -8,7 +8,6 @@
 #ifndef _XTENSA_PGTABLE_H
 #define _XTENSA_PGTABLE_H
 
-#define __ARCH_USE_5LEVEL_HACK
 #include <asm/page.h>
 #include <asm/kmem_layout.h>
 #include <asm-generic/pgtable-nopmd.h>
@@ -267,9 +266,8 @@
 static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_special(pte_t pte) { return 0; }
 
-static inline pte_t pte_wrprotect(pte_t pte)	
+static inline pte_t pte_wrprotect(pte_t pte)
 	{ pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
 static inline pte_t pte_mkclean(pte_t pte)
 	{ pte_val(pte) &= ~(_PAGE_DIRTY | _PAGE_HW_WRITE); return pte; }
@@ -281,8 +279,6 @@
 	{ pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)
 	{ pte_val(pte) |= _PAGE_WRITABLE; return pte; }
-static inline pte_t pte_mkspecial(pte_t pte)
-	{ return pte; }
 
 #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
 
@@ -363,25 +359,6 @@
 	update_pte(ptep, pte_wrprotect(pte));
 }
 
-/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address)	pgd_offset(&init_mm, address)
-
-/* to find an entry in a page-table-directory */
-#define pgd_offset(mm,address)	((mm)->pgd + pgd_index(address))
-
-#define pgd_index(address)	((address) >> PGDIR_SHIFT)
-
-/* Find an entry in the second-level page table.. */
-#define pmd_offset(dir,address) ((pmd_t*)(dir))
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(address)	(((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-#define pte_offset_kernel(dir,addr) 					\
-	((pte_t*) pmd_page_vaddr(*(dir)) + pte_index(addr))
-#define pte_offset_map(dir,addr)	pte_offset_kernel((dir),(addr))
-#define pte_unmap(pte)		do { } while (0)
-
-
 /*
  * Encode and decode a swap and file entry.
  */
@@ -445,6 +422,4 @@
  */
 #define HAVE_ARCH_UNMAPPED_AREA
 
-#include <asm-generic/pgtable.h>
-
 #endif /* _XTENSA_PGTABLE_H */
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 913826d..354ca94 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -12,8 +12,6 @@
 #define _XTENSA_PLATFORM_H
 
 #include <linux/types.h>
-#include <linux/pci.h>
-
 #include <asm/bootparam.h>
 
 /*
@@ -65,31 +63,4 @@
  */
 void cpu_reset(void) __attribute__((noreturn));
 
-/*
- * Memory caching is platform-dependent in noMMU xtensa configurations.
- * The following set of functions should be implemented in platform code
- * in order to enable coherent DMA memory operations when CONFIG_MMU is not
- * enabled. Default implementations do nothing and issue a warning.
- */
-
-/*
- * Check whether p points to a cached memory.
- */
-bool platform_vaddr_cached(const void *p);
-
-/*
- * Check whether p points to an uncached memory.
- */
-bool platform_vaddr_uncached(const void *p);
-
-/*
- * Return pointer to an uncached view of the cached sddress p.
- */
-void *platform_vaddr_to_uncached(void *p);
-
-/*
- * Return pointer to a cached view of the uncached sddress p.
- */
-void *platform_vaddr_to_cached(void *p);
-
 #endif	/* _XTENSA_PLATFORM_H */
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 7495520..7f63aca 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -195,6 +195,7 @@
 /* Clearing a0 terminates the backtrace. */
 #define start_thread(regs, new_pc, new_sp) \
 	do { \
+		unsigned long syscall = (regs)->syscall; \
 		memset((regs), 0, sizeof(*(regs))); \
 		(regs)->pc = (new_pc); \
 		(regs)->ps = USER_PS_VALUE; \
@@ -204,7 +205,7 @@
 		(regs)->depc = 0; \
 		(regs)->windowbase = 0; \
 		(regs)->windowstart = 1; \
-		(regs)->syscall = NO_SYSCALL; \
+		(regs)->syscall = syscall; \
 	} while (0)
 
 /* Forward declaration */
@@ -236,10 +237,6 @@
 	 v; \
 	 })
 
-#ifndef XCHAL_HAVE_EXTERN_REGS
-#define XCHAL_HAVE_EXTERN_REGS 0
-#endif
-
 #if XCHAL_HAVE_EXTERN_REGS
 
 static inline void set_er(unsigned long value, unsigned long addr)
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index c90fb94..f9a671c 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -79,7 +79,7 @@
 		regs->areg[reg[i]] = args[i];
 }
 
-asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
+asmlinkage long xtensa_rt_sigreturn(void);
 asmlinkage long xtensa_shmat(int, char __user *, int);
 asmlinkage long xtensa_fadvise64_64(int, int,
 				    unsigned long long, unsigned long long);
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 956d4d4..6acbbe0 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -115,15 +115,20 @@
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_NOTIFY_RESUME	7	/* callback before returning to user */
 #define TIF_DB_DISABLED		8	/* debug trap disabled for syscall */
+#define TIF_SYSCALL_AUDIT	9	/* syscall auditing active */
+#define TIF_SECCOMP		10	/* secure computing */
 
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT	(1<<TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP		(1<<TIF_SECCOMP)
 
 #define _TIF_WORK_MASK		(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
-				 _TIF_SYSCALL_TRACEPOINT)
+				 _TIF_SYSCALL_TRACEPOINT | \
+				 _TIF_SYSCALL_AUDIT | _TIF_SECCOMP)
 
 #define THREAD_SIZE KERNEL_STACK_SIZE
 #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index 5cb24a7..5c9fb80 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -35,7 +35,7 @@
 #define get_fs()	(current->thread.current_ds)
 #define set_fs(val)	(current->thread.current_ds = (val))
 
-#define segment_eq(a, b)	((a).seg == (b).seg)
+#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
 
 #define __kernel_ok (uaccess_kernel())
 #define __user_ok(addr, size) \
@@ -84,7 +84,7 @@
 #define __put_user_check(x, ptr, size)					\
 ({									\
 	long __pu_err = -EFAULT;					\
-	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
+	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
 	if (access_ok(__pu_addr, size))			\
 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
 	__pu_err;							\
@@ -132,13 +132,13 @@
 #define __check_align_1  ""
 
 #define __check_align_2				\
-	"   _bbci.l %[addr], 0, 1f	\n"	\
+	"   _bbci.l %[mem] * 0, 1f	\n"	\
 	"   movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
 #define __check_align_4				\
-	"   _bbsi.l %[addr], 0, 0f	\n"	\
-	"   _bbci.l %[addr], 1, 1f	\n"	\
+	"   _bbsi.l %[mem] * 0, 0f	\n"	\
+	"   _bbci.l %[mem] * 0 + 1, 1f	\n"	\
 	"0: movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
@@ -154,7 +154,7 @@
 #define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(					\
 	__check_align_##align				\
-	"1: "insn"  %[x], %[addr], 0	\n"		\
+	"1: "insn"  %[x], %[mem]	\n"		\
 	"2:				\n"		\
 	"   .section  .fixup,\"ax\"	\n"		\
 	"   .align 4			\n"		\
@@ -167,8 +167,8 @@
 	"   .section  __ex_table,\"a\"	\n"		\
 	"   .long	1b, 5b		\n"		\
 	"   .previous"					\
-	:[err] "+r"(err_), [tmp] "=r"(cb)		\
-	:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
+	:[err] "+r"(err_), [tmp] "=r"(cb), [mem] "=m"(*(addr_))		\
+	:[x] "r"(x_), [efault] "i"(-EFAULT))
 
 #define __get_user_nocheck(x, ptr, size)			\
 ({								\
@@ -180,11 +180,11 @@
 #define __get_user_check(x, ptr, size)					\
 ({									\
 	long __gu_err = -EFAULT;					\
-	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
+	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
 	if (access_ok(__gu_addr, size))					\
 		__get_user_size((x), __gu_addr, (size), __gu_err);	\
 	else								\
-		(x) = 0;						\
+		(x) = (__typeof__(*(ptr)))0;				\
 	__gu_err;							\
 })
 
@@ -202,13 +202,15 @@
 		u64 __x;						\
 		if (unlikely(__copy_from_user(&__x, ptr, 8))) {		\
 			retval = -EFAULT;				\
-			(x) = 0;					\
+			(x) = (__typeof__(*(ptr)))0;			\
 		} else {						\
-			(x) = *(__force __typeof__((ptr)))&__x;		\
+			(x) = *(__force __typeof__(*(ptr)) *)&__x;	\
 		}							\
 		break;							\
 	}								\
-	default: (x) = 0; __get_user_bad();				\
+	default:							\
+		(x) = (__typeof__(*(ptr)))0;				\
+		__get_user_bad();					\
 	}								\
 } while (0)
 
@@ -222,7 +224,7 @@
 	u32 __x = 0;					\
 	__asm__ __volatile__(				\
 		__check_align_##align			\
-		"1: "insn"  %[x], %[addr], 0	\n"	\
+		"1: "insn"  %[x], %[mem]	\n"	\
 		"2:				\n"	\
 		"   .section  .fixup,\"ax\"	\n"	\
 		"   .align 4			\n"	\
@@ -236,7 +238,7 @@
 		"   .long	1b, 5b		\n"	\
 		"   .previous"				\
 		:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
-		:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+		:[mem] "m"(*(addr_)), [efault] "i"(-EFAULT)); \
 	(x_) = (__force __typeof__(*(addr_)))__x;	\
 } while (0)
 
@@ -270,15 +272,15 @@
  */
 
 static inline unsigned long
-__xtensa_clear_user(void *addr, unsigned long size)
+__xtensa_clear_user(void __user *addr, unsigned long size)
 {
-	if (!__memset(addr, 0, size))
+	if (!__memset((void __force *)addr, 0, size))
 		return size;
 	return 0;
 }
 
 static inline unsigned long
-clear_user(void *addr, unsigned long size)
+clear_user(void __user *addr, unsigned long size)
 {
 	if (access_ok(addr, size))
 		return __xtensa_clear_user(addr, size);
@@ -290,10 +292,10 @@
 
 #ifndef CONFIG_GENERIC_STRNCPY_FROM_USER
 
-extern long __strncpy_user(char *, const char *, long);
+extern long __strncpy_user(char *dst, const char __user *src, long count);
 
 static inline long
-strncpy_from_user(char *dst, const char *src, long count)
+strncpy_from_user(char *dst, const char __user *src, long count)
 {
 	if (access_ok(src, 1))
 		return __strncpy_user(dst, src, count);
@@ -306,13 +308,11 @@
 /*
  * Return the size of a string (including the ending 0!)
  */
-extern long __strnlen_user(const char *, long);
+extern long __strnlen_user(const char __user *str, long len);
 
-static inline long strnlen_user(const char *str, long len)
+static inline long strnlen_user(const char __user *str, long len)
 {
-	unsigned long top = __kernel_ok ? ~0UL : TASK_SIZE - 1;
-
-	if ((unsigned long)str > top)
+	if (!access_ok(str, 1))
 		return 0;
 	return __strnlen_user(str, len);
 }
diff --git a/arch/xtensa/include/asm/user.h b/arch/xtensa/include/asm/user.h
deleted file mode 100644
index 2c3ed23..0000000
--- a/arch/xtensa/include/asm/user.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * include/asm-xtensa/user.h
- *
- * Xtensa Processor version.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_USER_H
-#define _XTENSA_USER_H
-
-/* This file usually defines a 'struct user' structure. However, it it only
- * used for a.out file, which are not supported on Xtensa.
- */
-
-#endif	/* _XTENSA_USER_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 79fe300..704286c 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -21,58 +21,26 @@
 #include <asm/core.h>
 #include <asm/kmem_layout.h>
 
-#if XCHAL_HAVE_PTP_MMU
-#define XCHAL_KIO_CACHED_VADDR		0xe0000000
-#define XCHAL_KIO_BYPASS_VADDR		0xf0000000
-#define XCHAL_KIO_DEFAULT_PADDR		0xf0000000
+#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
+#ifdef CONFIG_KERNEL_VIRTUAL_ADDRESS
+#define KERNELOFFSET			CONFIG_KERNEL_VIRTUAL_ADDRESS
 #else
-#define XCHAL_KIO_BYPASS_VADDR		XCHAL_KIO_PADDR
-#define XCHAL_KIO_DEFAULT_PADDR		0x90000000
-#endif
-#define XCHAL_KIO_SIZE			0x10000000
-
-#if (!XCHAL_HAVE_PTP_MMU || XCHAL_HAVE_SPANNING_WAY) && defined(CONFIG_OF)
-#define XCHAL_KIO_PADDR			xtensa_get_kio_paddr()
-#ifndef __ASSEMBLY__
-extern unsigned long xtensa_kio_paddr;
-
-static inline unsigned long xtensa_get_kio_paddr(void)
-{
-	return xtensa_kio_paddr;
-}
-#endif
-#else
-#define XCHAL_KIO_PADDR			XCHAL_KIO_DEFAULT_PADDR
-#endif
-
-#if defined(CONFIG_MMU)
-
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
-/* Image Virtual Start Address */
-#define KERNELOFFSET			(XCHAL_KSEG_CACHED_VADDR + \
-					 CONFIG_KERNEL_LOAD_ADDRESS - \
+#define KERNELOFFSET			(CONFIG_KERNEL_LOAD_ADDRESS + \
+					 XCHAL_KSEG_CACHED_VADDR - \
 					 XCHAL_KSEG_PADDR)
+#endif
 #else
 #define KERNELOFFSET			CONFIG_KERNEL_LOAD_ADDRESS
 #endif
 
-#else /* !defined(CONFIG_MMU) */
-  /* MMU Not being used - Virtual == Physical */
-
-/* Location of the start of the kernel text, _start */
-#define KERNELOFFSET			CONFIG_KERNEL_LOAD_ADDRESS
-
-
-#endif /* CONFIG_MMU */
-
 #define RESET_VECTOR1_VADDR		(XCHAL_RESET_VECTOR1_VADDR)
-#ifdef CONFIG_VECTORS_OFFSET
-#define VECBASE_VADDR			(KERNELOFFSET - CONFIG_VECTORS_OFFSET)
+#ifdef CONFIG_VECTORS_ADDR
+#define VECBASE_VADDR			(CONFIG_VECTORS_ADDR)
 #else
 #define VECBASE_VADDR			_vecbase
 #endif
 
-#if defined(XCHAL_HAVE_VECBASE) && XCHAL_HAVE_VECBASE
+#if XCHAL_HAVE_VECBASE
 
 #define VECTOR_VADDR(offset)		(VECBASE_VADDR + offset)
 
diff --git a/arch/xtensa/include/asm/vermagic.h b/arch/xtensa/include/asm/vermagic.h
new file mode 100644
index 0000000..6d9c670
--- /dev/null
+++ b/arch/xtensa/include/asm/vermagic.h
@@ -0,0 +1,17 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2005 Tensilica Inc.
+ */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#include <linux/stringify.h>
+#include <variant/core.h>
+
+#define MODULE_ARCH_VERMAGIC "xtensa-" __stringify(XCHAL_CORE_ID) " "
+
+#endif	/* _ASM_VERMAGIC_H */
diff --git a/arch/xtensa/include/asm/vmalloc.h b/arch/xtensa/include/asm/vmalloc.h
new file mode 100644
index 0000000..0eb94b7
--- /dev/null
+++ b/arch/xtensa/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_XTENSA_VMALLOC_H
+#define _ASM_XTENSA_VMALLOC_H
+
+#endif /* _ASM_XTENSA_VMALLOC_H */