Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild
index 82c7564..ffa0cf7 100644
--- a/arch/xtensa/include/asm/Kbuild
+++ b/arch/xtensa/include/asm/Kbuild
@@ -1,3 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+generated-y += syscall_table.h
 generic-y += bug.h
 generic-y += compat.h
 generic-y += device.h
@@ -14,17 +16,20 @@
 generic-y += kdebug.h
 generic-y += kmap_types.h
 generic-y += kprobes.h
-generic-y += linkage.h
+generic-y += kvm_para.h
 generic-y += local.h
 generic-y += local64.h
 generic-y += mcs_spinlock.h
 generic-y += mm-arch-hooks.h
+generic-y += mmiowb.h
 generic-y += param.h
 generic-y += percpu.h
 generic-y += preempt.h
-generic-y += rwsem.h
+generic-y += qrwlock.h
+generic-y += qspinlock.h
 generic-y += sections.h
 generic-y += topology.h
 generic-y += trace_clock.h
+generic-y += vga.h
 generic-y += word-at-a-time.h
 generic-y += xor.h
diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h
index dfdf9fa..7f6cf41 100644
--- a/arch/xtensa/include/asm/asm-uaccess.h
+++ b/arch/xtensa/include/asm/asm-uaccess.h
@@ -32,8 +32,6 @@
 #define KERNEL_DS	0
 #define USER_DS		1
 
-#define get_ds		(KERNEL_DS)
-
 /*
  * get_fs reads current->thread.current_ds into a register.
  * On Entry:
diff --git a/arch/xtensa/include/asm/asmmacro.h b/arch/xtensa/include/asm/asmmacro.h
index 7f2ae58..71a7e84 100644
--- a/arch/xtensa/include/asm/asmmacro.h
+++ b/arch/xtensa/include/asm/asmmacro.h
@@ -11,7 +11,7 @@
 #ifndef _XTENSA_ASMMACRO_H
 #define _XTENSA_ASMMACRO_H
 
-#include <variant/core.h>
+#include <asm/core.h>
 
 /*
  * Some little helpers for loops. Use zero-overhead-loops
@@ -191,4 +191,50 @@
 #endif
 	.endm
 
+#define XTENSA_STACK_ALIGNMENT		16
+
+#if defined(__XTENSA_WINDOWED_ABI__)
+#define XTENSA_FRAME_SIZE_RESERVE	16
+#define XTENSA_SPILL_STACK_RESERVE	32
+
+#define abi_entry(frame_size) \
+	entry sp, (XTENSA_FRAME_SIZE_RESERVE + \
+		   (((frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+		    -XTENSA_STACK_ALIGNMENT))
+#define abi_entry_default abi_entry(0)
+
+#define abi_ret(frame_size) retw
+#define abi_ret_default retw
+
+#elif defined(__XTENSA_CALL0_ABI__)
+
+#define XTENSA_SPILL_STACK_RESERVE	0
+
+#define abi_entry(frame_size) __abi_entry (frame_size)
+
+	.macro	__abi_entry frame_size
+	.ifgt \frame_size
+	addi sp, sp, -(((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+		       -XTENSA_STACK_ALIGNMENT)
+	.endif
+	.endm
+
+#define abi_entry_default
+
+#define abi_ret(frame_size) __abi_ret (frame_size)
+
+	.macro	__abi_ret frame_size
+	.ifgt \frame_size
+	addi sp, sp, (((\frame_size) + XTENSA_STACK_ALIGNMENT - 1) & \
+		      -XTENSA_STACK_ALIGNMENT)
+	.endif
+	ret
+	.endm
+
+#define abi_ret_default ret
+
+#else
+#error Unsupported Xtensa ABI
+#endif
+
 #endif /* _XTENSA_ASMMACRO_H */
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 7de0149..7b00d26 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -15,8 +15,6 @@
 
 #include <linux/stringify.h>
 #include <linux/types.h>
-
-#ifdef __KERNEL__
 #include <asm/processor.h>
 #include <asm/cmpxchg.h>
 #include <asm/barrier.h>
@@ -58,7 +56,67 @@
  */
 #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
 
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+#define ATOMIC_OP(op)							\
+static inline void atomic_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32ex   %1, %3\n"			\
+			"       " #op " %0, %1, %2\n"			\
+			"       s32ex   %0, %3\n"			\
+			"       getex   %0\n"				\
+			"       beqz    %0, 1b\n"			\
+			: "=&a" (result), "=&a" (tmp)			\
+			: "a" (i), "a" (v)				\
+			: "memory"					\
+			);						\
+}									\
+
+#define ATOMIC_OP_RETURN(op)						\
+static inline int atomic_##op##_return(int i, atomic_t *v)		\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32ex   %1, %3\n"			\
+			"       " #op " %0, %1, %2\n"			\
+			"       s32ex   %0, %3\n"			\
+			"       getex   %0\n"				\
+			"       beqz    %0, 1b\n"			\
+			"       " #op " %0, %1, %2\n"			\
+			: "=&a" (result), "=&a" (tmp)			\
+			: "a" (i), "a" (v)				\
+			: "memory"					\
+			);						\
+									\
+	return result;							\
+}
+
+#define ATOMIC_FETCH_OP(op)						\
+static inline int atomic_fetch_##op(int i, atomic_t *v)			\
+{									\
+	unsigned long tmp;						\
+	int result;							\
+									\
+	__asm__ __volatile__(						\
+			"1:     l32ex   %1, %3\n"			\
+			"       " #op " %0, %1, %2\n"			\
+			"       s32ex   %0, %3\n"			\
+			"       getex   %0\n"				\
+			"       beqz    %0, 1b\n"			\
+			: "=&a" (result), "=&a" (tmp)			\
+			: "a" (i), "a" (v)				\
+			: "memory"					\
+			);						\
+									\
+	return tmp;							\
+}
+
+#elif XCHAL_HAVE_S32C1I
 #define ATOMIC_OP(op)							\
 static inline void atomic_##op(int i, atomic_t * v)			\
 {									\
@@ -200,6 +258,4 @@
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#endif /* __KERNEL__ */
-
 #endif /* _XTENSA_ATOMIC_H */
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index 956596e..d6f8d4d 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -9,12 +9,16 @@
 #ifndef _XTENSA_SYSTEM_H
 #define _XTENSA_SYSTEM_H
 
+#include <asm/core.h>
+
 #define mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
 #define rmb() barrier()
 #define wmb() mb()
 
+#if XCHAL_HAVE_S32C1I
 #define __smp_mb__before_atomic()		barrier()
 #define __smp_mb__after_atomic()		barrier()
+#endif
 
 #include <asm-generic/barrier.h>
 
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index d349018..be8b2be 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -13,8 +13,6 @@
 #ifndef _XTENSA_BITOPS_H
 #define _XTENSA_BITOPS_H
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -98,7 +96,126 @@
 
 #include <asm-generic/bitops/fls64.h>
 
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+
+static inline void set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %0, %2\n"
+			"       or      %0, %0, %1\n"
+			"       s32ex   %0, %2\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp)
+			: "a" (mask), "a" (p)
+			: "memory");
+}
+
+static inline void clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %0, %2\n"
+			"       and     %0, %0, %1\n"
+			"       s32ex   %0, %2\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp)
+			: "a" (~mask), "a" (p)
+			: "memory");
+}
+
+static inline void change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %0, %2\n"
+			"       xor     %0, %0, %1\n"
+			"       s32ex   %0, %2\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp)
+			: "a" (mask), "a" (p)
+			: "memory");
+}
+
+static inline int
+test_and_set_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp, value;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %1, %3\n"
+			"       or      %0, %1, %2\n"
+			"       s32ex   %0, %3\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp), "=&a" (value)
+			: "a" (mask), "a" (p)
+			: "memory");
+
+	return value & mask;
+}
+
+static inline int
+test_and_clear_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp, value;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %1, %3\n"
+			"       and     %0, %1, %2\n"
+			"       s32ex   %0, %3\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp), "=&a" (value)
+			: "a" (~mask), "a" (p)
+			: "memory");
+
+	return value & mask;
+}
+
+static inline int
+test_and_change_bit(unsigned int bit, volatile unsigned long *p)
+{
+	unsigned long tmp, value;
+	unsigned long mask = 1UL << (bit & 31);
+
+	p += bit >> 5;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %1, %3\n"
+			"       xor     %0, %1, %2\n"
+			"       s32ex   %0, %3\n"
+			"       getex   %0\n"
+			"       beqz    %0, 1b\n"
+			: "=&a" (tmp), "=&a" (value)
+			: "a" (mask), "a" (p)
+			: "memory");
+
+	return value & mask;
+}
+
+#elif XCHAL_HAVE_S32C1I
 
 static inline void set_bit(unsigned int bit, volatile unsigned long *p)
 {
@@ -232,6 +349,4 @@
 #include <asm-generic/bitops/lock.h>
 #include <asm-generic/bitops/sched.h>
 
-#endif	/* __KERNEL__ */
-
 #endif	/* _XTENSA_BITOPS_H */
diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h
index d2fd932..b21fd13 100644
--- a/arch/xtensa/include/asm/cache.h
+++ b/arch/xtensa/include/asm/cache.h
@@ -11,7 +11,7 @@
 #ifndef _XTENSA_CACHE_H
 #define _XTENSA_CACHE_H
 
-#include <variant/core.h>
+#include <asm/core.h>
 
 #define L1_CACHE_SHIFT	XCHAL_DCACHE_LINEWIDTH
 #define L1_CACHE_BYTES	XCHAL_DCACHE_LINESIZE
diff --git a/arch/xtensa/include/asm/checksum.h b/arch/xtensa/include/asm/checksum.h
index 3ae74d7..8b68717 100644
--- a/arch/xtensa/include/asm/checksum.h
+++ b/arch/xtensa/include/asm/checksum.h
@@ -13,7 +13,7 @@
 
 #include <linux/in6.h>
 #include <linux/uaccess.h>
-#include <variant/core.h>
+#include <asm/core.h>
 
 /*
  * computes the checksum of a memory block at buff, length len,
@@ -243,7 +243,7 @@
 					       void __user *dst, int len,
 					       __wsum sum, int *err_ptr)
 {
-	if (access_ok(VERIFY_WRITE, dst, len))
+	if (access_ok(dst, len))
 		return csum_partial_copy_generic(src,dst,len,sum,NULL,err_ptr);
 
 	if (len)
diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h
index 201e900..7ccc5cb 100644
--- a/arch/xtensa/include/asm/cmpxchg.h
+++ b/arch/xtensa/include/asm/cmpxchg.h
@@ -13,6 +13,7 @@
 
 #ifndef __ASSEMBLY__
 
+#include <linux/bits.h>
 #include <linux/stringify.h>
 
 /*
@@ -22,7 +23,24 @@
 static inline unsigned long
 __cmpxchg_u32(volatile int *p, int old, int new)
 {
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+	unsigned long tmp, result;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %0, %3\n"
+			"       bne     %0, %4, 2f\n"
+			"       mov     %1, %2\n"
+			"       s32ex   %1, %3\n"
+			"       getex   %1\n"
+			"       beqz    %1, 1b\n"
+			"2:\n"
+			: "=&a" (result), "=&a" (tmp)
+			: "a" (new), "a" (p), "a" (old)
+			: "memory"
+			);
+
+	return result;
+#elif XCHAL_HAVE_S32C1I
 	__asm__ __volatile__(
 			"       wsr     %2, scompare1\n"
 			"       s32c1i  %0, %1, 0\n"
@@ -107,7 +125,22 @@
 
 static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
 {
-#if XCHAL_HAVE_S32C1I
+#if XCHAL_HAVE_EXCLUSIVE
+	unsigned long tmp, result;
+
+	__asm__ __volatile__(
+			"1:     l32ex   %0, %3\n"
+			"       mov     %1, %2\n"
+			"       s32ex   %1, %3\n"
+			"       getex   %1\n"
+			"       beqz    %1, 1b\n"
+			: "=&a" (result), "=&a" (tmp)
+			: "a" (val), "a" (m)
+			: "memory"
+			);
+
+	return result;
+#elif XCHAL_HAVE_S32C1I
 	unsigned long tmp, result;
 	__asm__ __volatile__(
 			"1:     l32i    %1, %2, 0\n"
@@ -138,6 +171,28 @@
 #define xchg(ptr,x) \
 	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
+static inline u32 xchg_small(volatile void *ptr, u32 x, int size)
+{
+	int off = (unsigned long)ptr % sizeof(u32);
+	volatile u32 *p = ptr - off;
+#ifdef __BIG_ENDIAN
+	int bitoff = (sizeof(u32) - size - off) * BITS_PER_BYTE;
+#else
+	int bitoff = off * BITS_PER_BYTE;
+#endif
+	u32 bitmask = ((0x1 << size * BITS_PER_BYTE) - 1) << bitoff;
+	u32 oldv, newv;
+	u32 ret;
+
+	do {
+		oldv = READ_ONCE(*p);
+		ret = (oldv & bitmask) >> bitoff;
+		newv = (oldv & ~bitmask) | (x << bitoff);
+	} while (__cmpxchg_u32(p, oldv, newv) != oldv);
+
+	return ret;
+}
+
 /*
  * This only works if the compiler isn't horribly bad at optimizing.
  * gcc-2.5.8 reportedly can't handle this, but I define that one to
@@ -150,11 +205,16 @@
 __xchg(unsigned long x, volatile void * ptr, int size)
 {
 	switch (size) {
-		case 4:
-			return xchg_u32(ptr, x);
+	case 1:
+		return xchg_small(ptr, x, 1);
+	case 2:
+		return xchg_small(ptr, x, 2);
+	case 4:
+		return xchg_u32(ptr, x);
+	default:
+		__xchg_called_with_bad_pointer();
+		return x;
 	}
-	__xchg_called_with_bad_pointer();
-	return x;
 }
 
 #endif /* __ASSEMBLY__ */
diff --git a/arch/xtensa/include/asm/coprocessor.h b/arch/xtensa/include/asm/coprocessor.h
index 677501b..0fbe2a7 100644
--- a/arch/xtensa/include/asm/coprocessor.h
+++ b/arch/xtensa/include/asm/coprocessor.h
@@ -12,9 +12,8 @@
 #ifndef _XTENSA_COPROCESSOR_H
 #define _XTENSA_COPROCESSOR_H
 
-#include <linux/stringify.h>
-#include <variant/core.h>
 #include <variant/tie.h>
+#include <asm/core.h>
 #include <asm/types.h>
 
 #ifdef __ASSEMBLY__
@@ -90,19 +89,6 @@
 
 #ifndef __ASSEMBLY__
 
-
-#if XCHAL_HAVE_CP
-
-#define RSR_CPENABLE(x)	do {						  \
-	__asm__ __volatile__("rsr %0, cpenable" : "=a" (x));		  \
-	} while(0);
-#define WSR_CPENABLE(x)	do {						  \
-	__asm__ __volatile__("wsr %0, cpenable; rsync" :: "a" (x));	  \
-	} while(0);
-
-#endif /* XCHAL_HAVE_CP */
-
-
 /*
  * Additional registers.
  * We define three types of additional registers:
@@ -157,20 +143,11 @@
 	__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
 
 extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
-extern void coprocessor_save(void*, int);
-extern void coprocessor_load(void*, int);
 extern void coprocessor_flush(struct thread_info*, int);
-extern void coprocessor_restore(struct thread_info*, int);
 
 extern void coprocessor_release_all(struct thread_info*);
 extern void coprocessor_flush_all(struct thread_info*);
 
-static inline void coprocessor_clear_cpenable(void)
-{
-	unsigned long i = 0;
-	WSR_CPENABLE(i);
-}
-
 #endif	/* XTENSA_HAVE_COPROCESSORS */
 
 #endif	/* !__ASSEMBLY__ */
diff --git a/arch/xtensa/include/asm/core.h b/arch/xtensa/include/asm/core.h
new file mode 100644
index 0000000..5b4acb7
--- /dev/null
+++ b/arch/xtensa/include/asm/core.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_CORE_H
+#define _ASM_XTENSA_CORE_H
+
+#include <variant/core.h>
+
+#ifndef XCHAL_HAVE_EXCLUSIVE
+#define XCHAL_HAVE_EXCLUSIVE 0
+#endif
+
+#ifndef XCHAL_HAVE_MPU
+#define XCHAL_HAVE_MPU 0
+#endif
+
+#ifndef XCHAL_SPANNING_WAY
+#define XCHAL_SPANNING_WAY 0
+#endif
+
+#endif
diff --git a/arch/xtensa/include/asm/elf.h b/arch/xtensa/include/asm/elf.h
index eacb25a..909a6ab 100644
--- a/arch/xtensa/include/asm/elf.h
+++ b/arch/xtensa/include/asm/elf.h
@@ -15,10 +15,10 @@
 
 #include <asm/ptrace.h>
 #include <asm/coprocessor.h>
+#include <linux/elf-em.h>
 
 /* Xtensa processor ELF architecture-magic number */
 
-#define EM_XTENSA	94
 #define EM_XTENSA_OLD	0xABC7
 
 /* Xtensa relocations defined by the ABIs */
@@ -75,19 +75,7 @@
 
 typedef unsigned long elf_greg_t;
 
-typedef struct {
-	elf_greg_t pc;
-	elf_greg_t ps;
-	elf_greg_t lbeg;
-	elf_greg_t lend;
-	elf_greg_t lcount;
-	elf_greg_t sar;
-	elf_greg_t windowstart;
-	elf_greg_t windowbase;
-	elf_greg_t threadptr;
-	elf_greg_t reserved[7+48];
-	elf_greg_t a[64];
-} xtensa_gregset_t;
+typedef struct user_pt_regs xtensa_gregset_t;
 
 #define ELF_NGREG	(sizeof(xtensa_gregset_t) / sizeof(elf_greg_t))
 
@@ -98,11 +86,6 @@
 typedef unsigned int elf_fpreg_t;
 typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 
-#define ELF_CORE_COPY_REGS(_eregs, _pregs) 				\
-	xtensa_elf_core_copy_regs ((xtensa_gregset_t*)&(_eregs), _pregs);
-
-extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
-
 /*
  * This is used to ensure we don't load something for the wrong architecture.
  */
@@ -126,6 +109,7 @@
 #define ELF_ARCH	EM_XTENSA
 
 #define ELF_EXEC_PAGESIZE	PAGE_SIZE
+#define CORE_DUMP_USE_REGSET
 
 /*
  * This is the location that an ET_DYN program is loaded if exec'ed.  Typical
@@ -193,15 +177,4 @@
 #define SET_PERSONALITY(ex) \
 	set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
 
-struct task_struct;
-
-extern void do_copy_regs (xtensa_gregset_t*, struct pt_regs*,
-			  struct task_struct*);
-extern void do_restore_regs (xtensa_gregset_t*, struct pt_regs*,
-			     struct task_struct*);
-extern void do_save_fpregs (elf_fpregset_t*, struct pt_regs*,
-			    struct task_struct*);
-extern int do_restore_fpregs (elf_fpregset_t*, struct pt_regs*,
-			      struct task_struct*);
-
 #endif	/* _XTENSA_ELF_H */
diff --git a/arch/xtensa/include/asm/flat.h b/arch/xtensa/include/asm/flat.h
index b8532d7..ed5870c 100644
--- a/arch/xtensa/include/asm/flat.h
+++ b/arch/xtensa/include/asm/flat.h
@@ -4,11 +4,8 @@
 
 #include <asm/unaligned.h>
 
-#define flat_argvp_envp_on_stack()			0
-#define flat_old_ram_flag(flags)			(flags)
-#define flat_reloc_valid(reloc, size)			((reloc) <= (size))
 static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
-					u32 *addr, u32 *persistent)
+					u32 *addr)
 {
 	*addr = get_unaligned((__force u32 *)rp);
 	return 0;
@@ -18,7 +15,5 @@
 	put_unaligned(addr, (__force u32 *)rp);
 	return 0;
 }
-#define flat_get_relocate_addr(rel)			(rel)
-#define flat_set_persistent(relval, p)			0
 
 #endif /* __ASM_XTENSA_FLAT_H */
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 5bfbc1c..0c4457c 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Atomic futex routines
  *
  * Based on the PowerPC implementataion
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Copyright (C) 2013 TangoTec Ltd.
  *
  * Baruch Siach <baruch@tkos.co.il>
@@ -15,65 +12,88 @@
 #ifndef _ASM_XTENSA_FUTEX_H
 #define _ASM_XTENSA_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
 #include <linux/uaccess.h>
 #include <linux/errno.h>
 
-#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+#if XCHAL_HAVE_EXCLUSIVE
+#define __futex_atomic_op(insn, ret, old, uaddr, arg)	\
 	__asm__ __volatile(				\
-	"1:	l32i	%0, %2, 0\n"			\
+	"1:	l32ex	%[oldval], %[addr]\n"		\
 		insn "\n"				\
-	"	wsr	%0, scompare1\n"		\
-	"2:	s32c1i	%1, %2, 0\n"			\
-	"	bne	%1, %0, 1b\n"			\
-	"	movi	%1, 0\n"			\
+	"2:	s32ex	%[newval], %[addr]\n"		\
+	"	getex	%[newval]\n"			\
+	"	beqz	%[newval], 1b\n"		\
+	"	movi	%[newval], 0\n"			\
 	"3:\n"						\
 	"	.section .fixup,\"ax\"\n"		\
 	"	.align 4\n"				\
-	"4:	.long	3b\n"				\
-	"5:	l32r	%0, 4b\n"			\
-	"	movi	%1, %3\n"			\
-	"	jx	%0\n"				\
+	"	.literal_position\n"			\
+	"5:	movi	%[oldval], 3b\n"		\
+	"	movi	%[newval], %[fault]\n"		\
+	"	jx	%[oldval]\n"			\
 	"	.previous\n"				\
 	"	.section __ex_table,\"a\"\n"		\
-	"	.long 1b,5b,2b,5b\n"			\
+	"	.long 1b, 5b, 2b, 5b\n"			\
 	"	.previous\n"				\
-	: "=&r" (oldval), "=&r" (ret)			\
-	: "r" (uaddr), "I" (-EFAULT), "r" (oparg)	\
+	: [oldval] "=&r" (old), [newval] "=&r" (ret)	\
+	: [addr] "r" (uaddr), [oparg] "r" (arg),	\
+	  [fault] "I" (-EFAULT)				\
 	: "memory")
+#elif XCHAL_HAVE_S32C1I
+#define __futex_atomic_op(insn, ret, old, uaddr, arg)	\
+	__asm__ __volatile(				\
+	"1:	l32i	%[oldval], %[addr], 0\n"	\
+		insn "\n"				\
+	"	wsr	%[oldval], scompare1\n"		\
+	"2:	s32c1i	%[newval], %[addr], 0\n"	\
+	"	bne	%[newval], %[oldval], 1b\n"	\
+	"	movi	%[newval], 0\n"			\
+	"3:\n"						\
+	"	.section .fixup,\"ax\"\n"		\
+	"	.align 4\n"				\
+	"	.literal_position\n"			\
+	"5:	movi	%[oldval], 3b\n"		\
+	"	movi	%[newval], %[fault]\n"		\
+	"	jx	%[oldval]\n"			\
+	"	.previous\n"				\
+	"	.section __ex_table,\"a\"\n"		\
+	"	.long 1b, 5b, 2b, 5b\n"			\
+	"	.previous\n"				\
+	: [oldval] "=&r" (old), [newval] "=&r" (ret)	\
+	: [addr] "r" (uaddr), [oparg] "r" (arg),	\
+	  [fault] "I" (-EFAULT)				\
+	: "memory")
+#endif
 
 static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
 		u32 __user *uaddr)
 {
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
 	int oldval = 0, ret;
 
-#if !XCHAL_HAVE_S32C1I
-	return -ENOSYS;
-#endif
-
 	pagefault_disable();
 
 	switch (op) {
 	case FUTEX_OP_SET:
-		__futex_atomic_op("mov %1, %4", ret, oldval, uaddr, oparg);
+		__futex_atomic_op("mov %[newval], %[oparg]",
+				  ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_ADD:
-		__futex_atomic_op("add %1, %0, %4", ret, oldval, uaddr,
-				oparg);
+		__futex_atomic_op("add %[newval], %[oldval], %[oparg]",
+				  ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_OR:
-		__futex_atomic_op("or %1, %0, %4", ret, oldval, uaddr,
-				oparg);
+		__futex_atomic_op("or %[newval], %[oldval], %[oparg]",
+				  ret, oldval, uaddr, oparg);
 		break;
 	case FUTEX_OP_ANDN:
-		__futex_atomic_op("and %1, %0, %4", ret, oldval, uaddr,
-				~oparg);
+		__futex_atomic_op("and %[newval], %[oldval], %[oparg]",
+				  ret, oldval, uaddr, ~oparg);
 		break;
 	case FUTEX_OP_XOR:
-		__futex_atomic_op("xor %1, %0, %4", ret, oldval, uaddr,
-				oparg);
+		__futex_atomic_op("xor %[newval], %[oldval], %[oparg]",
+				  ret, oldval, uaddr, oparg);
 		break;
 	default:
 		ret = -ENOSYS;
@@ -85,43 +105,60 @@
 		*oval = oldval;
 
 	return ret;
+#else
+	return -ENOSYS;
+#endif
 }
 
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 			      u32 oldval, u32 newval)
 {
+#if XCHAL_HAVE_S32C1I || XCHAL_HAVE_EXCLUSIVE
+	unsigned long tmp;
 	int ret = 0;
 
-	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+	if (!access_ok(uaddr, sizeof(u32)))
 		return -EFAULT;
 
-#if !XCHAL_HAVE_S32C1I
-	return -ENOSYS;
-#endif
-
 	__asm__ __volatile__ (
 	"	# futex_atomic_cmpxchg_inatomic\n"
-	"	wsr	%5, scompare1\n"
-	"1:	s32c1i	%1, %4, 0\n"
-	"	s32i	%1, %6, 0\n"
+#if XCHAL_HAVE_EXCLUSIVE
+	"1:	l32ex	%[tmp], %[addr]\n"
+	"	s32i	%[tmp], %[uval], 0\n"
+	"	bne	%[tmp], %[oldval], 2f\n"
+	"	mov	%[tmp], %[newval]\n"
+	"3:	s32ex	%[tmp], %[addr]\n"
+	"	getex	%[tmp]\n"
+	"	beqz	%[tmp], 1b\n"
+#elif XCHAL_HAVE_S32C1I
+	"	wsr	%[oldval], scompare1\n"
+	"1:	s32c1i	%[newval], %[addr], 0\n"
+	"	s32i	%[newval], %[uval], 0\n"
+#endif
 	"2:\n"
 	"	.section .fixup,\"ax\"\n"
 	"	.align 4\n"
-	"3:	.long	2b\n"
-	"4:	l32r	%1, 3b\n"
-	"	movi	%0, %7\n"
-	"	jx	%1\n"
+	"	.literal_position\n"
+	"4:	movi	%[tmp], 2b\n"
+	"	movi	%[ret], %[fault]\n"
+	"	jx	%[tmp]\n"
 	"	.previous\n"
 	"	.section __ex_table,\"a\"\n"
-	"	.long 1b,4b\n"
+	"	.long 1b, 4b\n"
+#if XCHAL_HAVE_EXCLUSIVE
+	"	.long 3b, 4b\n"
+#endif
 	"	.previous\n"
-	: "+r" (ret), "+r" (newval), "+m" (*uaddr), "+m" (*uval)
-	: "r" (uaddr), "r" (oldval), "r" (uval), "I" (-EFAULT)
+	: [ret] "+r" (ret), [newval] "+r" (newval), [tmp] "=&r" (tmp)
+	: [addr] "r" (uaddr), [oldval] "r" (oldval), [uval] "r" (uval),
+	  [fault] "I" (-EFAULT)
 	: "memory");
 
 	return ret;
+#else
+	return -ENOSYS;
+#endif
 }
 
-#endif /* __KERNEL__ */
 #endif /* _ASM_XTENSA_FUTEX_H */
diff --git a/arch/xtensa/include/asm/initialize_mmu.h b/arch/xtensa/include/asm/initialize_mmu.h
index 10e9852..3b054d2 100644
--- a/arch/xtensa/include/asm/initialize_mmu.h
+++ b/arch/xtensa/include/asm/initialize_mmu.h
@@ -33,10 +33,6 @@
 #define CA_WRITEBACK	(0x4)
 #endif
 
-#ifndef XCHAL_SPANNING_WAY
-#define XCHAL_SPANNING_WAY 0
-#endif
-
 #ifdef __ASSEMBLY__
 
 #define XTENSA_HWVERSION_RC_2009_0 230000
@@ -46,7 +42,7 @@
 #if XCHAL_HAVE_S32C1I && (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0)
 /*
  * We Have Atomic Operation Control (ATOMCTL) Register; Initialize it.
- * For details see Documentation/xtensa/atomctl.txt
+ * For details see Documentation/xtensa/atomctl.rst
  */
 #if XCHAL_DCACHE_IS_COHERENT
 	movi	a3, 0x25	/* For SMP/MX -- internal for writeback,
@@ -181,11 +177,42 @@
 
 	.macro	initialize_cacheattr
 
-#if !defined(CONFIG_MMU) && XCHAL_HAVE_TLBS
+#if !defined(CONFIG_MMU) && (XCHAL_HAVE_TLBS || XCHAL_HAVE_MPU)
 #if CONFIG_MEMMAP_CACHEATTR == 0x22222222 && XCHAL_HAVE_PTP_MMU
 #error Default MEMMAP_CACHEATTR of 0x22222222 does not work with full MMU.
 #endif
 
+#if XCHAL_HAVE_MPU
+	.data
+	.align	4
+.Lattribute_table:
+	.long 0x000000, 0x1fff00, 0x1ddf00, 0x1eef00
+	.long 0x006600, 0x000000, 0x000000, 0x000000
+	.long 0x000000, 0x000000, 0x000000, 0x000000
+	.long 0x000000, 0x000000, 0x000000, 0x000000
+	.previous
+
+	movi	a3, .Lattribute_table
+	movi	a4, CONFIG_MEMMAP_CACHEATTR
+	movi	a5, 1
+	movi	a6, XCHAL_MPU_ENTRIES
+	movi	a10, 0x20000000
+	movi	a11, -1
+1:
+	sub	a5, a5, a10
+	extui	a8, a4, 28, 4
+	beq	a8, a11, 2f
+	addi	a6, a6, -1
+	mov	a11, a8
+2:
+	addx4	a9, a8, a3
+	l32i	a9, a9, 0
+	or	a9, a9, a6
+	wptlb	a9, a5
+	slli	a4, a4, 4
+	bgeu	a5, a10, 1b
+
+#else
 	movi	a5, XCHAL_SPANNING_WAY
 	movi	a6, ~_PAGE_ATTRIB_MASK
 	movi	a4, CONFIG_MEMMAP_CACHEATTR
@@ -208,6 +235,7 @@
 
 	isync
 #endif
+#endif
 
 	.endm
 
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index acc5bb2..988e085 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -11,7 +11,6 @@
 #ifndef _XTENSA_IO_H
 #define _XTENSA_IO_H
 
-#ifdef __KERNEL__
 #include <asm/byteorder.h>
 #include <asm/page.h>
 #include <asm/vectors.h>
@@ -22,6 +21,7 @@
 
 #define IOADDR(x)		(XCHAL_KIO_BYPASS_VADDR + (x))
 #define IO_SPACE_LIMIT ~0
+#define PCI_IOBASE		((void __iomem *)XCHAL_KIO_BYPASS_VADDR)
 
 #ifdef CONFIG_MMU
 
@@ -78,8 +78,6 @@
 
 #endif /* CONFIG_MMU */
 
-#endif	/* __KERNEL__ */
-
 #include <asm-generic/io.h>
 
 #endif	/* _XTENSA_IO_H */
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index 6c6ed23..0f71a51 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -12,7 +12,7 @@
 #define _XTENSA_IRQ_H
 
 #include <linux/init.h>
-#include <variant/core.h>
+#include <asm/core.h>
 
 #ifdef CONFIG_PLATFORM_NR_IRQS
 # define PLATFORM_NR_IRQS CONFIG_PLATFORM_NR_IRQS
diff --git a/arch/xtensa/include/asm/irqflags.h b/arch/xtensa/include/asm/irqflags.h
index 407606e..1289068 100644
--- a/arch/xtensa/include/asm/irqflags.h
+++ b/arch/xtensa/include/asm/irqflags.h
@@ -12,6 +12,7 @@
 #ifndef _XTENSA_IRQFLAGS_H
 #define _XTENSA_IRQFLAGS_H
 
+#include <linux/stringify.h>
 #include <linux/types.h>
 #include <asm/processor.h>
 
@@ -26,7 +27,7 @@
 {
 	unsigned long flags;
 #if XTENSA_FAKE_NMI
-#if defined(CONFIG_DEBUG_KERNEL) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
+#if defined(CONFIG_DEBUG_MISC) && (LOCKLEVEL | TOPLEVEL) >= XCHAL_DEBUGLEVEL
 	unsigned long tmp;
 
 	asm volatile("rsr	%0, ps\t\n"
diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h
new file mode 100644
index 0000000..c812bf8
--- /dev/null
+++ b/arch/xtensa/include/asm/jump_label.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2018 Cadence Design Systems Inc. */
+
+#ifndef _ASM_XTENSA_JUMP_LABEL_H
+#define _ASM_XTENSA_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+#define JUMP_LABEL_NOP_SIZE 3
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+					       bool branch)
+{
+	asm_volatile_goto("1:\n\t"
+			  "_nop\n\t"
+			  ".pushsection __jump_table,  \"aw\"\n\t"
+			  ".word 1b, %l[l_yes], %c0\n\t"
+			  ".popsection\n\t"
+			  : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+						    bool branch)
+{
+	/*
+	 * Xtensa assembler will mark certain points in the code
+	 * as unreachable, so that later assembler or linker relaxation
+	 * passes could use them. A spot right after the J instruction
+	 * is one such point. Assembler and/or linker may insert padding
+	 * or literals here, breaking code flow in case the J instruction
+	 * is later replaced with NOP. Put a label right after the J to
+	 * make it reachable and wrap both into a no-transform block
+	 * to avoid any assembler interference with this.
+	 */
+	asm_volatile_goto("1:\n\t"
+			  ".begin no-transform\n\t"
+			  "_j %l[l_yes]\n\t"
+			  "2:\n\t"
+			  ".end no-transform\n\t"
+			  ".pushsection __jump_table,  \"aw\"\n\t"
+			  ".word 1b, %l[l_yes], %c0\n\t"
+			  ".popsection\n\t"
+			  : :  "i" (&((char *)key)[branch]) :  : l_yes);
+
+	return false;
+l_yes:
+	return true;
+}
+
+typedef u32 jump_label_t;
+
+struct jump_entry {
+	jump_label_t code;
+	jump_label_t target;
+	jump_label_t key;
+};
+
+#endif  /* __ASSEMBLY__ */
+#endif
diff --git a/arch/xtensa/include/asm/pci-bridge.h b/arch/xtensa/include/asm/pci-bridge.h
index 0b68c76..4055269 100644
--- a/arch/xtensa/include/asm/pci-bridge.h
+++ b/arch/xtensa/include/asm/pci-bridge.h
@@ -11,8 +11,6 @@
 #ifndef _XTENSA_PCI_BRIDGE_H
 #define _XTENSA_PCI_BRIDGE_H
 
-#ifdef __KERNEL__
-
 struct device_node;
 struct pci_controller;
 
@@ -84,5 +82,4 @@
 int early_write_config_word(struct pci_controller*, int, int, int, u16);
 int early_write_config_dword(struct pci_controller*, int, int, int, u32);
 
-#endif	/* __KERNEL__ */
 #endif	/* _XTENSA_PCI_BRIDGE_H */
diff --git a/arch/xtensa/include/asm/pci.h b/arch/xtensa/include/asm/pci.h
index 8830240..8e2b48a 100644
--- a/arch/xtensa/include/asm/pci.h
+++ b/arch/xtensa/include/asm/pci.h
@@ -11,8 +11,6 @@
 #ifndef _XTENSA_PCI_H
 #define _XTENSA_PCI_H
 
-#ifdef __KERNEL__
-
 /* Can be used to override the logic in pci_scan_bus for skipping
  * already-configured bus numbers - to be used for buggy BIOSes
  * or architectures with incomplete PCI setup by the loader
@@ -45,8 +43,6 @@
 #define ARCH_GENERIC_PCI_MMAP_RESOURCE	1
 #define arch_can_pci_mmap_io()		1
 
-#endif /* __KERNEL__ */
-
 /* Generic PCI */
 #include <asm-generic/pci.h>
 
diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h
index 1065bc8..1d38f0e 100644
--- a/arch/xtensa/include/asm/pgalloc.h
+++ b/arch/xtensa/include/asm/pgalloc.h
@@ -1,18 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * include/asm-xtensa/pgalloc.h
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Copyright (C) 2001-2007 Tensilica Inc.
  */
 
 #ifndef _XTENSA_PGALLOC_H
 #define _XTENSA_PGALLOC_H
 
-#ifdef __KERNEL__
-
 #include <linux/highmem.h>
 #include <linux/slab.h>
 
@@ -38,8 +33,7 @@
 	free_page((unsigned long)pgd);
 }
 
-static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
-					 unsigned long address)
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
 {
 	pte_t *ptep;
 	int i;
@@ -52,17 +46,16 @@
 	return ptep;
 }
 
-static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
-					unsigned long addr)
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
 {
 	pte_t *pte;
 	struct page *page;
 
-	pte = pte_alloc_one_kernel(mm, addr);
+	pte = pte_alloc_one_kernel(mm);
 	if (!pte)
 		return NULL;
 	page = virt_to_page(pte);
-	if (!pgtable_page_ctor(page)) {
+	if (!pgtable_pte_page_ctor(page)) {
 		__free_page(page);
 		return NULL;
 	}
@@ -76,10 +69,9 @@
 
 static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
 {
-	pgtable_page_dtor(pte);
+	pgtable_pte_page_dtor(pte);
 	__free_page(pte);
 }
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
-#endif /* __KERNEL__ */
 #endif /* _XTENSA_PGALLOC_H */
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index 29cfe42..3f7fe5a 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * include/asm-xtensa/pgtable.h
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
  * Copyright (C) 2001 - 2013 Tensilica Inc.
  */
 
@@ -241,7 +238,6 @@
 # define swapper_pg_dir NULL
 static inline void paging_init(void) { }
 #endif
-static inline void pgtable_cache_init(void) { }
 
 /*
  * The pmd contains the kernel virtual address of the pte page.
diff --git a/arch/xtensa/include/asm/platform.h b/arch/xtensa/include/asm/platform.h
index 5604833..913826d 100644
--- a/arch/xtensa/include/asm/platform.h
+++ b/arch/xtensa/include/asm/platform.h
@@ -55,16 +55,6 @@
 extern void platform_heartbeat (void);
 
 /*
- * platform_pcibios_init is called to allow the platform to setup the pci bus.
- */
-extern void platform_pcibios_init (void);
-
-/*
- * platform_pcibios_fixup allows to modify the PCI configuration.
- */
-extern int platform_pcibios_fixup (void);
-
-/*
  * platform_calibrate_ccount calibrates cpu clock freq (CONFIG_XTENSA_CALIBRATE)
  */
 extern void platform_calibrate_ccount (void);
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h
index 677bc76..7495520 100644
--- a/arch/xtensa/include/asm/processor.h
+++ b/arch/xtensa/include/asm/processor.h
@@ -10,9 +10,10 @@
 #ifndef _XTENSA_PROCESSOR_H
 #define _XTENSA_PROCESSOR_H
 
-#include <variant/core.h>
+#include <asm/core.h>
 
 #include <linux/compiler.h>
+#include <linux/stringify.h>
 #include <asm/ptrace.h>
 #include <asm/types.h>
 #include <asm/regs.h>
@@ -156,14 +157,6 @@
 	int align[0] __attribute__ ((aligned(16)));
 };
 
-
-/*
- * Default implementation of macro that returns current
- * instruction pointer ("program counter").
- */
-#define current_text_addr()  ({ __label__ _l; _l: &&_l;})
-
-
 /* This decides where the kernel will search for a free chunk of vm
  * space during mmap's.
  */
@@ -183,26 +176,36 @@
 
 /*
  * Do necessary setup to start up a newly executed thread.
- * Note: We set-up ps as if we did a call4 to the new pc.
+ * Note: When windowed ABI is used for userspace we set-up ps
+ *       as if we did a call4 to the new pc.
  *       set_thread_state in signal.c depends on it.
  */
-#define USER_PS_VALUE ((1 << PS_WOE_BIT) |				\
+#if IS_ENABLED(CONFIG_USER_ABI_CALL0)
+#define USER_PS_VALUE ((USER_RING << PS_RING_SHIFT) |			\
+		       (1 << PS_UM_BIT) |				\
+		       (1 << PS_EXCM_BIT))
+#else
+#define USER_PS_VALUE (PS_WOE_MASK |					\
 		       (1 << PS_CALLINC_SHIFT) |			\
 		       (USER_RING << PS_RING_SHIFT) |			\
 		       (1 << PS_UM_BIT) |				\
 		       (1 << PS_EXCM_BIT))
+#endif
 
 /* Clearing a0 terminates the backtrace. */
 #define start_thread(regs, new_pc, new_sp) \
-	memset(regs, 0, sizeof(*regs)); \
-	regs->pc = new_pc; \
-	regs->ps = USER_PS_VALUE; \
-	regs->areg[1] = new_sp; \
-	regs->areg[0] = 0; \
-	regs->wmask = 1; \
-	regs->depc = 0; \
-	regs->windowbase = 0; \
-	regs->windowstart = 1;
+	do { \
+		memset((regs), 0, sizeof(*(regs))); \
+		(regs)->pc = (new_pc); \
+		(regs)->ps = USER_PS_VALUE; \
+		(regs)->areg[1] = (new_sp); \
+		(regs)->areg[0] = 0; \
+		(regs)->wmask = 1; \
+		(regs)->depc = 0; \
+		(regs)->windowbase = 0; \
+		(regs)->windowstart = 1; \
+		(regs)->syscall = NO_SYSCALL; \
+	} while (0)
 
 /* Forward declaration */
 struct task_struct;
@@ -220,11 +223,18 @@
 
 /* Special register access. */
 
-#define WSR(v,sr) __asm__ __volatile__ ("wsr %0,"__stringify(sr) :: "a"(v));
-#define RSR(v,sr) __asm__ __volatile__ ("rsr %0,"__stringify(sr) : "=a"(v));
+#define xtensa_set_sr(x, sr) \
+	({ \
+	 unsigned int v = (unsigned int)(x); \
+	 __asm__ __volatile__ ("wsr %0, "__stringify(sr) :: "a"(v)); \
+	 })
 
-#define set_sr(x,sr) ({unsigned int v=(unsigned int)x; WSR(v,sr);})
-#define get_sr(sr) ({unsigned int v; RSR(v,sr); v; })
+#define xtensa_get_sr(sr) \
+	({ \
+	 unsigned int v; \
+	 __asm__ __volatile__ ("rsr %0, "__stringify(sr) : "=a"(v)); \
+	 v; \
+	 })
 
 #ifndef XCHAL_HAVE_EXTERN_REGS
 #define XCHAL_HAVE_EXTERN_REGS 0
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 3a5c591..b109416 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -39,6 +39,8 @@
  *		+-----------------------+ --------
  */
 
+#define NO_SYSCALL (-1)
+
 #ifndef __ASSEMBLY__
 
 #include <asm/coprocessor.h>
@@ -78,7 +80,7 @@
 	unsigned long areg[16];
 };
 
-#include <variant/core.h>
+#include <asm/core.h>
 
 # define arch_has_single_step()	(1)
 # define task_pt_regs(tsk) ((struct pt_regs*) \
@@ -100,6 +102,11 @@
 
 #define user_stack_pointer(regs) ((regs)->areg[1])
 
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+	return regs->areg[2];
+}
+
 #else	/* __ASSEMBLY__ */
 
 # include <asm/asm-offsets.h>
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h
index 477594e..ce184e7 100644
--- a/arch/xtensa/include/asm/regs.h
+++ b/arch/xtensa/include/asm/regs.h
@@ -81,6 +81,7 @@
 /*  PS register fields.  */
 
 #define PS_WOE_BIT		18
+#define PS_WOE_MASK		0x00040000
 #define PS_CALLINC_SHIFT	16
 #define PS_CALLINC_MASK		0x00030000
 #define PS_OWB_SHIFT		8
diff --git a/arch/xtensa/include/asm/segment.h b/arch/xtensa/include/asm/segment.h
deleted file mode 100644
index 98964ad..0000000
--- a/arch/xtensa/include/asm/segment.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * include/asm-xtensa/segment.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_SEGMENT_H
-#define _XTENSA_SEGMENT_H
-
-#include <linux/uaccess.h>
-
-#endif	/* _XTENSA_SEGEMENT_H */
diff --git a/arch/xtensa/include/asm/spinlock.h b/arch/xtensa/include/asm/spinlock.h
index c6e1290..584b0de 100644
--- a/arch/xtensa/include/asm/spinlock.h
+++ b/arch/xtensa/include/asm/spinlock.h
@@ -12,188 +12,9 @@
 #define _XTENSA_SPINLOCK_H
 
 #include <asm/barrier.h>
-#include <asm/processor.h>
+#include <asm/qrwlock.h>
+#include <asm/qspinlock.h>
 
-/*
- * spinlock
- *
- * There is at most one owner of a spinlock.  There are not different
- * types of spinlock owners like there are for rwlocks (see below).
- *
- * When trying to obtain a spinlock, the function "spins" forever, or busy-
- * waits, until the lock is obtained.  When spinning, presumably some other
- * owner will soon give up the spinlock making it available to others.  Use
- * the trylock functions to avoid spinning forever.
- *
- * possible values:
- *
- *    0         nobody owns the spinlock
- *    1         somebody owns the spinlock
- */
-
-#define arch_spin_is_locked(x) ((x)->slock != 0)
-
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       wsr     %0, scompare1\n"
-			"1:     movi    %0, 1\n"
-			"       s32c1i  %0, %1, 0\n"
-			"       bnez    %0, 1b\n"
-			: "=&a" (tmp)
-			: "a" (&lock->slock)
-			: "memory");
-}
-
-/* Returns 1 if the lock is obtained, 0 otherwise. */
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       wsr     %0, scompare1\n"
-			"       movi    %0, 1\n"
-			"       s32c1i  %0, %1, 0\n"
-			: "=&a" (tmp)
-			: "a" (&lock->slock)
-			: "memory");
-
-	return tmp == 0 ? 1 : 0;
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       s32ri   %0, %1, 0\n"
-			: "=&a" (tmp)
-			: "a" (&lock->slock)
-			: "memory");
-}
-
-/*
- * rwlock
- *
- * Read-write locks are really a more flexible spinlock.  They allow
- * multiple readers but only one writer.  Write ownership is exclusive
- * (i.e., all other readers and writers are blocked from ownership while
- * there is a write owner).  These rwlocks are unfair to writers.  Writers
- * can be starved for an indefinite time by readers.
- *
- * possible values:
- *
- *   0          nobody owns the rwlock
- *  >0          one or more readers own the rwlock
- *                (the positive value is the actual number of readers)
- *  0x80000000  one writer owns the rwlock, no other writers, no readers
- */
-
-static inline void arch_write_lock(arch_rwlock_t *rw)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       wsr     %0, scompare1\n"
-			"1:     movi    %0, 1\n"
-			"       slli    %0, %0, 31\n"
-			"       s32c1i  %0, %1, 0\n"
-			"       bnez    %0, 1b\n"
-			: "=&a" (tmp)
-			: "a" (&rw->lock)
-			: "memory");
-}
-
-/* Returns 1 if the lock is obtained, 0 otherwise. */
-
-static inline int arch_write_trylock(arch_rwlock_t *rw)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       wsr     %0, scompare1\n"
-			"       movi    %0, 1\n"
-			"       slli    %0, %0, 31\n"
-			"       s32c1i  %0, %1, 0\n"
-			: "=&a" (tmp)
-			: "a" (&rw->lock)
-			: "memory");
-
-	return tmp == 0 ? 1 : 0;
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *rw)
-{
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       movi    %0, 0\n"
-			"       s32ri   %0, %1, 0\n"
-			: "=&a" (tmp)
-			: "a" (&rw->lock)
-			: "memory");
-}
-
-static inline void arch_read_lock(arch_rwlock_t *rw)
-{
-	unsigned long tmp;
-	unsigned long result;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %2, 0\n"
-			"       bltz    %1, 1b\n"
-			"       wsr     %1, scompare1\n"
-			"       addi    %0, %1, 1\n"
-			"       s32c1i  %0, %2, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (&rw->lock)
-			: "memory");
-}
-
-/* Returns 1 if the lock is obtained, 0 otherwise. */
-
-static inline int arch_read_trylock(arch_rwlock_t *rw)
-{
-	unsigned long result;
-	unsigned long tmp;
-
-	__asm__ __volatile__(
-			"       l32i    %1, %2, 0\n"
-			"       addi    %0, %1, 1\n"
-			"       bltz    %0, 1f\n"
-			"       wsr     %1, scompare1\n"
-			"       s32c1i  %0, %2, 0\n"
-			"       sub     %0, %0, %1\n"
-			"1:\n"
-			: "=&a" (result), "=&a" (tmp)
-			: "a" (&rw->lock)
-			: "memory");
-
-	return result == 0;
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *rw)
-{
-	unsigned long tmp1, tmp2;
-
-	__asm__ __volatile__(
-			"1:     l32i    %1, %2, 0\n"
-			"       addi    %0, %1, -1\n"
-			"       wsr     %1, scompare1\n"
-			"       s32c1i  %0, %2, 0\n"
-			"       bne     %0, %1, 1b\n"
-			: "=&a" (tmp1), "=&a" (tmp2)
-			: "a" (&rw->lock)
-			: "memory");
-}
+#define smp_mb__after_spinlock()	smp_mb()
 
 #endif	/* _XTENSA_SPINLOCK_H */
diff --git a/arch/xtensa/include/asm/spinlock_types.h b/arch/xtensa/include/asm/spinlock_types.h
index bb1fe6c..64c9389 100644
--- a/arch/xtensa/include/asm/spinlock_types.h
+++ b/arch/xtensa/include/asm/spinlock_types.h
@@ -2,20 +2,11 @@
 #ifndef __ASM_SPINLOCK_TYPES_H
 #define __ASM_SPINLOCK_TYPES_H
 
-#ifndef __LINUX_SPINLOCK_TYPES_H
+#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
 # error "please don't include this file directly"
 #endif
 
-typedef struct {
-	volatile unsigned int slock;
-} arch_spinlock_t;
-
-#define __ARCH_SPIN_LOCK_UNLOCKED	{ 0 }
-
-typedef struct {
-	volatile unsigned int lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED		{ 0 }
+#include <asm-generic/qspinlock_types.h>
+#include <asm-generic/qrwlock_types.h>
 
 #endif
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 3673ff1..359ab40 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -1,27 +1,87 @@
 /*
- * include/asm-xtensa/syscall.h
- *
  * This file is subject to the terms and conditions of the GNU General Public
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
  * Copyright (C) 2001 - 2007 Tensilica Inc.
+ * Copyright (C) 2018 Cadence Design Systems Inc.
  */
 
-struct pt_regs;
-asmlinkage long xtensa_ptrace(long, long, long, long);
-asmlinkage long xtensa_sigreturn(struct pt_regs*);
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H
+
+#include <linux/err.h>
+#include <asm/ptrace.h>
+#include <uapi/linux/audit.h>
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+	return AUDIT_ARCH_XTENSA;
+}
+
+typedef void (*syscall_t)(void);
+extern syscall_t sys_call_table[];
+
+static inline long syscall_get_nr(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	return regs->syscall;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+				    struct pt_regs *regs)
+{
+	/* Do nothing. */
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+				     struct pt_regs *regs)
+{
+	/* 0 if syscall succeeded, otherwise -Errorcode */
+	return IS_ERR_VALUE(regs->areg[2]) ? regs->areg[2] : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+					    struct pt_regs *regs)
+{
+	return regs->areg[2];
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+					    struct pt_regs *regs,
+					    int error, long val)
+{
+	regs->areg[0] = (long) error ? error : val;
+}
+
+#define SYSCALL_MAX_ARGS 6
+#define XTENSA_SYSCALL_ARGUMENT_REGS {6, 3, 4, 5, 8, 9}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 unsigned long *args)
+{
+	static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+	unsigned int i;
+
+	for (i = 0; i < 6; ++i)
+		args[i] = regs->areg[reg[i]];
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+					 struct pt_regs *regs,
+					 const unsigned long *args)
+{
+	static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
+	unsigned int i;
+
+	for (i = 0; i < 6; ++i)
+		regs->areg[reg[i]] = args[i];
+}
+
 asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
 asmlinkage long xtensa_shmat(int, char __user *, int);
 asmlinkage long xtensa_fadvise64_64(int, int,
 				    unsigned long long, unsigned long long);
 
-/* Should probably move to linux/syscalls.h */
-struct pollfd;
-asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
-			     fd_set __user *exp, struct timespec __user *tsp,
-			     void __user *sig);
-asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
-			  struct timespec __user *tsp,
-			  const sigset_t __user *sigmask,
-			  size_t sigsetsize);
+#endif
diff --git a/arch/xtensa/include/asm/thread_info.h b/arch/xtensa/include/asm/thread_info.h
index 2bd19ae..f092cc3 100644
--- a/arch/xtensa/include/asm/thread_info.h
+++ b/arch/xtensa/include/asm/thread_info.h
@@ -11,6 +11,7 @@
 #ifndef _XTENSA_THREAD_INFO_H
 #define _XTENSA_THREAD_INFO_H
 
+#include <linux/stringify.h>
 #include <asm/kmem_layout.h>
 
 #define CURRENT_SHIFT KERNEL_STACK_SHIFT
@@ -100,13 +101,12 @@
 /*
  * thread information flags
  * - these are process state flags that various assembly files may need to access
- * - pending work-to-be-done flags are in LSW
- * - other flags in MSW
  */
 #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
 #define TIF_SIGPENDING		1	/* signal pending */
 #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
 #define TIF_SINGLESTEP		3	/* restore singlestep on return to user mode */
+#define TIF_SYSCALL_TRACEPOINT	4	/* syscall tracepoint instrumentation */
 #define TIF_MEMDIE		5	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	6	/* restore signal mask in do_signal() */
 #define TIF_NOTIFY_RESUME	7	/* callback before returning to user */
@@ -116,18 +116,10 @@
 #define _TIF_SIGPENDING		(1<<TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1<<TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP		(1<<TIF_SINGLESTEP)
+#define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 
-#define _TIF_WORK_MASK		0x0000FFFE	/* work to do on interrupt/exception return */
-#define _TIF_ALLWORK_MASK	0x0000FFFF	/* work to do on any return to u-space */
-
-/*
- * Thread-synchronous status.
- *
- * This is different from the flags in that nobody else
- * ever touches our thread-synchronous status, so we don't
- * have to worry about atomic accesses.
- */
-#define TS_USEDFPU		0x0001	/* FPU was used by this task this quantum (SMP) */
+#define _TIF_WORK_MASK		(_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \
+				 _TIF_SYSCALL_TRACEPOINT)
 
 #define THREAD_SIZE KERNEL_STACK_SIZE
 #define THREAD_SIZE_ORDER (KERNEL_STACK_SHIFT - PAGE_SHIFT)
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index f9b389d..233ec75 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -10,7 +10,6 @@
 #define _XTENSA_TIMEX_H
 
 #include <asm/processor.h>
-#include <linux/stringify.h>
 
 #if XCHAL_NUM_TIMERS > 0 && \
 	XTENSA_INT_LEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL
@@ -40,33 +39,24 @@
  * Register access.
  */
 
-#define WSR_CCOUNT(r)	  asm volatile ("wsr %0, ccount" :: "a" (r))
-#define RSR_CCOUNT(r)	  asm volatile ("rsr %0, ccount" : "=a" (r))
-#define WSR_CCOMPARE(x,r) asm volatile ("wsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) :: "a"(r))
-#define RSR_CCOMPARE(x,r) asm volatile ("rsr %0,"__stringify(SREG_CCOMPARE)"+"__stringify(x) : "=a"(r))
-
 static inline unsigned long get_ccount (void)
 {
-	unsigned long ccount;
-	RSR_CCOUNT(ccount);
-	return ccount;
+	return xtensa_get_sr(ccount);
 }
 
 static inline void set_ccount (unsigned long ccount)
 {
-	WSR_CCOUNT(ccount);
+	xtensa_set_sr(ccount, ccount);
 }
 
 static inline unsigned long get_linux_timer (void)
 {
-	unsigned ccompare;
-	RSR_CCOMPARE(LINUX_TIMER, ccompare);
-	return ccompare;
+	return xtensa_get_sr(SREG_CCOMPARE + LINUX_TIMER);
 }
 
 static inline void set_linux_timer (unsigned long ccompare)
 {
-	WSR_CCOMPARE(LINUX_TIMER, ccompare);
+	xtensa_set_sr(ccompare, SREG_CCOMPARE + LINUX_TIMER);
 }
 
 #endif	/* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlb.h b/arch/xtensa/include/asm/tlb.h
index 0d766f9..5088993 100644
--- a/arch/xtensa/include/asm/tlb.h
+++ b/arch/xtensa/include/asm/tlb.h
@@ -14,32 +14,6 @@
 #include <asm/cache.h>
 #include <asm/page.h>
 
-#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
-
-/* Note, read http://lkml.org/lkml/2004/1/15/6 */
-
-# define tlb_start_vma(tlb,vma)			do { } while (0)
-# define tlb_end_vma(tlb,vma)			do { } while (0)
-
-#else
-
-# define tlb_start_vma(tlb, vma)					      \
-	do {								      \
-		if (!tlb->fullmm)					      \
-			flush_cache_range(vma, vma->vm_start, vma->vm_end);   \
-	} while(0)
-
-# define tlb_end_vma(tlb, vma)						      \
-	do {								      \
-		if (!tlb->fullmm)					      \
-			flush_tlb_range(vma, vma->vm_start, vma->vm_end);     \
-	} while(0)
-
-#endif
-
-#define __tlb_remove_tlb_entry(tlb,pte,addr)	do { } while (0)
-#define tlb_flush(tlb)				flush_tlb_mm((tlb)->mm)
-
 #include <asm-generic/tlb.h>
 
 #define __pte_free_tlb(tlb, pte, address)	pte_free((tlb)->mm, pte)
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 06875fe..856e2da 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -160,9 +160,6 @@
 		invalidate_dtlb_entry(tlb_entry);
 }
 
-#define check_pgt_cache()	do { } while (0)
-
-
 /*
  * DO NOT USE THESE FUNCTIONS.  These instructions aren't part of the Xtensa
  * ISA and exist only for test purposes..
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index f5cd7a7..f720a57 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -25,8 +25,6 @@
 	void *fixup;
 	/* For passing a parameter to fixup */
 	void *fixup_param;
-	/* For fast syscall handler */
-	unsigned long syscall_save;
 	/* Fast user exception handlers */
 	void *fast_user_handler[EXCCAUSE_N];
 	/* Fast kernel exception handlers */
diff --git a/arch/xtensa/include/asm/types.h b/arch/xtensa/include/asm/types.h
deleted file mode 100644
index 2b410b8..0000000
--- a/arch/xtensa/include/asm/types.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
- * include/asm-xtensa/types.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-#ifndef _XTENSA_TYPES_H
-#define _XTENSA_TYPES_H
-
-#include <uapi/asm/types.h>
-
-#ifndef __ASSEMBLY__
-/*
- * These aren't exported outside the kernel to avoid name space clashes
- */
-
-#define BITS_PER_LONG 32
-
-#endif
-#endif	/* _XTENSA_TYPES_H */
diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h
index f1158b4..3f80386 100644
--- a/arch/xtensa/include/asm/uaccess.h
+++ b/arch/xtensa/include/asm/uaccess.h
@@ -32,7 +32,6 @@
 #define KERNEL_DS	((mm_segment_t) { 0 })
 #define USER_DS		((mm_segment_t) { 1 })
 
-#define get_ds()	(KERNEL_DS)
 #define get_fs()	(current->thread.current_ds)
 #define set_fs(val)	(current->thread.current_ds = (val))
 
@@ -42,7 +41,7 @@
 #define __user_ok(addr, size) \
 		(((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size)))
 #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size)))
-#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size))
+#define access_ok(addr, size) __access_ok((unsigned long)(addr), (size))
 
 #define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE)
 
@@ -86,7 +85,7 @@
 ({									\
 	long __pu_err = -EFAULT;					\
 	__typeof__(*(ptr)) *__pu_addr = (ptr);				\
-	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
+	if (access_ok(__pu_addr, size))			\
 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
 	__pu_err;							\
 })
@@ -101,7 +100,7 @@
 	case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break;	\
 	case 8: {							\
 		     __typeof__(*ptr) __v64 = x;			\
-		     retval = __copy_to_user(ptr, &__v64, 8);		\
+		     retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;	\
 		     break;						\
 	        }							\
 	default: __put_user_bad();					\
@@ -133,14 +132,14 @@
 #define __check_align_1  ""
 
 #define __check_align_2				\
-	"   _bbci.l %3,  0, 1f		\n"	\
-	"   movi    %0, %4		\n"	\
+	"   _bbci.l %[addr], 0, 1f	\n"	\
+	"   movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
 #define __check_align_4				\
-	"   _bbsi.l %3,  0, 0f		\n"	\
-	"   _bbci.l %3,  1, 1f		\n"	\
-	"0: movi    %0, %4		\n"	\
+	"   _bbsi.l %[addr], 0, 0f	\n"	\
+	"   _bbci.l %[addr], 1, 1f	\n"	\
+	"0: movi    %[err], %[efault]	\n"	\
 	"   _j      2f			\n"
 
 
@@ -152,41 +151,40 @@
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __put_user_asm(x, addr, err, align, insn, cb)	\
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(					\
 	__check_align_##align				\
-	"1: "insn"  %2, %3, 0		\n"		\
+	"1: "insn"  %[x], %[addr], 0	\n"		\
 	"2:				\n"		\
 	"   .section  .fixup,\"ax\"	\n"		\
 	"   .align 4			\n"		\
-	"4:				\n"		\
-	"   .long  2b			\n"		\
+	"   .literal_position		\n"		\
 	"5:				\n"		\
-	"   l32r   %1, 4b		\n"		\
-	"   movi   %0, %4		\n"		\
-	"   jx     %1			\n"		\
+	"   movi   %[tmp], 2b		\n"		\
+	"   movi   %[err], %[efault]	\n"		\
+	"   jx     %[tmp]		\n"		\
 	"   .previous			\n"		\
 	"   .section  __ex_table,\"a\"	\n"		\
 	"   .long	1b, 5b		\n"		\
 	"   .previous"					\
-	:"=r" (err), "=r" (cb)				\
-	:"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+	:[err] "+r"(err_), [tmp] "=r"(cb)		\
+	:[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
 
 #define __get_user_nocheck(x, ptr, size)			\
 ({								\
-	long __gu_err, __gu_val;				\
-	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
-	(x) = (__force __typeof__(*(ptr)))__gu_val;		\
+	long __gu_err;						\
+	__get_user_size((x), (ptr), (size), __gu_err);		\
 	__gu_err;						\
 })
 
 #define __get_user_check(x, ptr, size)					\
 ({									\
-	long __gu_err = -EFAULT, __gu_val = 0;				\
+	long __gu_err = -EFAULT;					\
 	const __typeof__(*(ptr)) *__gu_addr = (ptr);			\
-	if (access_ok(VERIFY_READ, __gu_addr, size))			\
-		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
-	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
+	if (access_ok(__gu_addr, size))					\
+		__get_user_size((x), __gu_addr, (size), __gu_err);	\
+	else								\
+		(x) = 0;						\
 	__gu_err;							\
 })
 
@@ -200,8 +198,17 @@
 	case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
 	case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
 	case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
-	case 8: retval = __copy_from_user(&x, ptr, 8);    break;	\
-	default: (x) = __get_user_bad();				\
+	case 8: {							\
+		u64 __x;						\
+		if (unlikely(__copy_from_user(&__x, ptr, 8))) {		\
+			retval = -EFAULT;				\
+			(x) = 0;					\
+		} else {						\
+			(x) = *(__force __typeof__((ptr)))&__x;		\
+		}							\
+		break;							\
+	}								\
+	default: (x) = 0; __get_user_bad();				\
 	}								\
 } while (0)
 
@@ -210,26 +217,28 @@
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__(			\
-	__check_align_##align			\
-	"1: "insn"  %2, %3, 0		\n"	\
-	"2:				\n"	\
-	"   .section  .fixup,\"ax\"	\n"	\
-	"   .align 4			\n"	\
-	"4:				\n"	\
-	"   .long  2b			\n"	\
-	"5:				\n"	\
-	"   l32r   %1, 4b		\n"	\
-	"   movi   %2, 0		\n"	\
-	"   movi   %0, %4		\n"	\
-	"   jx     %1			\n"	\
-	"   .previous			\n"	\
-	"   .section  __ex_table,\"a\"	\n"	\
-	"   .long	1b, 5b		\n"	\
-	"   .previous"				\
-	:"=r" (err), "=r" (cb), "=r" (x)	\
-	:"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do {							\
+	u32 __x = 0;					\
+	__asm__ __volatile__(				\
+		__check_align_##align			\
+		"1: "insn"  %[x], %[addr], 0	\n"	\
+		"2:				\n"	\
+		"   .section  .fixup,\"ax\"	\n"	\
+		"   .align 4			\n"	\
+		"   .literal_position		\n"	\
+		"5:				\n"	\
+		"   movi   %[tmp], 2b		\n"	\
+		"   movi   %[err], %[efault]	\n"	\
+		"   jx     %[tmp]		\n"	\
+		"   .previous			\n"	\
+		"   .section  __ex_table,\"a\"	\n"	\
+		"   .long	1b, 5b		\n"	\
+		"   .previous"				\
+		:[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+		:[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+	(x_) = (__force __typeof__(*(addr_)))__x;	\
+} while (0)
 
 
 /*
@@ -271,7 +280,7 @@
 static inline unsigned long
 clear_user(void *addr, unsigned long size)
 {
-	if (access_ok(VERIFY_WRITE, addr, size))
+	if (access_ok(addr, size))
 		return __xtensa_clear_user(addr, size);
 	return size ? -EFAULT : 0;
 }
@@ -286,7 +295,7 @@
 static inline long
 strncpy_from_user(char *dst, const char *src, long count)
 {
-	if (access_ok(VERIFY_READ, src, 1))
+	if (access_ok(src, 1))
 		return __strncpy_user(dst, src, count);
 	return -EFAULT;
 }
diff --git a/arch/xtensa/include/asm/unistd.h b/arch/xtensa/include/asm/unistd.h
index ed66db3..b522362 100644
--- a/arch/xtensa/include/asm/unistd.h
+++ b/arch/xtensa/include/asm/unistd.h
@@ -3,23 +3,14 @@
 #define _XTENSA_UNISTD_H
 
 #define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
 #include <uapi/asm/unistd.h>
 
+#define __ARCH_WANT_NEW_STAT
 #define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_LLSEEK
+#define __ARCH_WANT_SYS_UTIME32
 #define __ARCH_WANT_SYS_GETPGRP
 
-/* 
- * Ignore legacy system calls in the checksyscalls.sh script
- */
-
-#define __IGNORE_fork				/* use clone */
-#define __IGNORE_time
-#define __IGNORE_alarm				/* use setitimer */
-#define __IGNORE_pause
-#define __IGNORE_mmap				/* use mmap2 */
-#define __IGNORE_vfork				/* use clone */
-#define __IGNORE_fadvise64			/* use fadvise64_64 */
+#define NR_syscalls				__NR_syscalls
 
 #endif /* _XTENSA_UNISTD_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 7111280..79fe300 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -18,7 +18,7 @@
 #ifndef _XTENSA_VECTORS_H
 #define _XTENSA_VECTORS_H
 
-#include <variant/core.h>
+#include <asm/core.h>
 #include <asm/kmem_layout.h>
 
 #if XCHAL_HAVE_PTP_MMU
diff --git a/arch/xtensa/include/asm/vga.h b/arch/xtensa/include/asm/vga.h
deleted file mode 100644
index 1fd8cab..0000000
--- a/arch/xtensa/include/asm/vga.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * include/asm-xtensa/vga.h
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2001 - 2005 Tensilica Inc.
- */
-
-#ifndef _XTENSA_VGA_H
-#define _XTENSA_VGA_H
-
-#define VGA_MAP_MEM(x,s) (unsigned long)phys_to_virt(x)
-
-#define vga_readb(x)	(*(x))
-#define vga_writeb(x,y)	(*(y) = (x))
-
-#endif