Update Linux to v5.4.2

Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index 73474bb..f6947da 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -19,13 +19,29 @@
 
 #define p4d_alloc(mm, pgd, address)	(pgd)
 #define p4d_offset(pgd, start)		(pgd)
-#define p4d_none(p4d)			0
-#define p4d_bad(p4d)			0
-#define p4d_present(p4d)		1
+
+#ifndef __ASSEMBLY__
+static inline int p4d_none(p4d_t p4d)
+{
+	return 0;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+	return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+	return 1;
+}
+#endif
+
 #define p4d_ERROR(p4d)			do { } while (0)
 #define p4d_clear(p4d)			pgd_clear(p4d)
 #define p4d_val(p4d)			pgd_val(p4d)
 #define p4d_populate(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
+#define p4d_populate_safe(mm, p4d, pud)	pgd_populate(mm, p4d, pud)
 #define p4d_page(p4d)			pgd_page(p4d)
 #define p4d_page_vaddr(p4d)		pgd_page_vaddr(p4d)
 
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
new file mode 100644
index 0000000..adff14f
--- /dev/null
+++ b/include/asm-generic/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# asm headers that all architectures except um should have
+# (This file is not included when SRCARCH=um since UML borrows several
+# asm headers from the host architecutre.)
+
+mandatory-y += simd.h
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index 0d4b1d3..e8730c6 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -1,3 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-instrumented.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
 /*
  * This file provides wrappers with KASAN instrumentation for atomic operations.
  * To use this functionality an arch's atomic.h file needs to define all
@@ -9,459 +14,1775 @@
  * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid
  * double instrumentation.
  */
-
-#ifndef _LINUX_ATOMIC_INSTRUMENTED_H
-#define _LINUX_ATOMIC_INSTRUMENTED_H
+#ifndef _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
+#define _ASM_GENERIC_ATOMIC_INSTRUMENTED_H
 
 #include <linux/build_bug.h>
 #include <linux/kasan-checks.h>
 
-static __always_inline int atomic_read(const atomic_t *v)
+static inline int
+atomic_read(const atomic_t *v)
 {
 	kasan_check_read(v, sizeof(*v));
 	return arch_atomic_read(v);
 }
+#define atomic_read atomic_read
 
-static __always_inline s64 atomic64_read(const atomic64_t *v)
+#if defined(arch_atomic_read_acquire)
+static inline int
+atomic_read_acquire(const atomic_t *v)
 {
 	kasan_check_read(v, sizeof(*v));
-	return arch_atomic64_read(v);
+	return arch_atomic_read_acquire(v);
 }
+#define atomic_read_acquire atomic_read_acquire
+#endif
 
-static __always_inline void atomic_set(atomic_t *v, int i)
+static inline void
+atomic_set(atomic_t *v, int i)
 {
 	kasan_check_write(v, sizeof(*v));
 	arch_atomic_set(v, i);
 }
+#define atomic_set atomic_set
 
-static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+#if defined(arch_atomic_set_release)
+static inline void
+atomic_set_release(atomic_t *v, int i)
 {
 	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_set(v, i);
+	arch_atomic_set_release(v, i);
 }
-
-static __always_inline int atomic_xchg(atomic_t *v, int i)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_xchg(v, i);
-}
-
-static __always_inline s64 atomic64_xchg(atomic64_t *v, s64 i)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_xchg(v, i);
-}
-
-static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_cmpxchg(v, old, new);
-}
-
-static __always_inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_cmpxchg(v, old, new);
-}
-
-#ifdef arch_atomic_try_cmpxchg
-#define atomic_try_cmpxchg atomic_try_cmpxchg
-static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
-{
-	kasan_check_write(v, sizeof(*v));
-	kasan_check_read(old, sizeof(*old));
-	return arch_atomic_try_cmpxchg(v, old, new);
-}
+#define atomic_set_release atomic_set_release
 #endif
 
-#ifdef arch_atomic64_try_cmpxchg
-#define atomic64_try_cmpxchg atomic64_try_cmpxchg
-static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
-{
-	kasan_check_write(v, sizeof(*v));
-	kasan_check_read(old, sizeof(*old));
-	return arch_atomic64_try_cmpxchg(v, old, new);
-}
-#endif
-
-#ifdef arch_atomic_fetch_add_unless
-#define atomic_fetch_add_unless atomic_fetch_add_unless
-static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_fetch_add_unless(v, a, u);
-}
-#endif
-
-#ifdef arch_atomic64_fetch_add_unless
-#define atomic64_fetch_add_unless atomic64_fetch_add_unless
-static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_add_unless(v, a, u);
-}
-#endif
-
-#ifdef arch_atomic_inc
-#define atomic_inc atomic_inc
-static __always_inline void atomic_inc(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_inc(v);
-}
-#endif
-
-#ifdef arch_atomic64_inc
-#define atomic64_inc atomic64_inc
-static __always_inline void atomic64_inc(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_inc(v);
-}
-#endif
-
-#ifdef arch_atomic_dec
-#define atomic_dec atomic_dec
-static __always_inline void atomic_dec(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_dec(v);
-}
-#endif
-
-#ifdef atch_atomic64_dec
-#define atomic64_dec
-static __always_inline void atomic64_dec(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_dec(v);
-}
-#endif
-
-static __always_inline void atomic_add(int i, atomic_t *v)
+static inline void
+atomic_add(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	arch_atomic_add(i, v);
 }
+#define atomic_add atomic_add
 
-static __always_inline void atomic64_add(s64 i, atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_add(i, v);
-}
-
-static __always_inline void atomic_sub(int i, atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_sub(i, v);
-}
-
-static __always_inline void atomic64_sub(s64 i, atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_sub(i, v);
-}
-
-static __always_inline void atomic_and(int i, atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_and(i, v);
-}
-
-static __always_inline void atomic64_and(s64 i, atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_and(i, v);
-}
-
-static __always_inline void atomic_or(int i, atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_or(i, v);
-}
-
-static __always_inline void atomic64_or(s64 i, atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_or(i, v);
-}
-
-static __always_inline void atomic_xor(int i, atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic_xor(i, v);
-}
-
-static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	arch_atomic64_xor(i, v);
-}
-
-#ifdef arch_atomic_inc_return
-#define atomic_inc_return atomic_inc_return
-static __always_inline int atomic_inc_return(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_inc_return(v);
-}
-#endif
-
-#ifdef arch_atomic64_in_return
-#define atomic64_inc_return atomic64_inc_return
-static __always_inline s64 atomic64_inc_return(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_inc_return(v);
-}
-#endif
-
-#ifdef arch_atomic_dec_return
-#define atomic_dec_return atomic_dec_return
-static __always_inline int atomic_dec_return(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_dec_return(v);
-}
-#endif
-
-#ifdef arch_atomic64_dec_return
-#define atomic64_dec_return atomic64_dec_return
-static __always_inline s64 atomic64_dec_return(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_dec_return(v);
-}
-#endif
-
-#ifdef arch_atomic64_inc_not_zero
-#define atomic64_inc_not_zero atomic64_inc_not_zero
-static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_inc_not_zero(v);
-}
-#endif
-
-#ifdef arch_atomic64_dec_if_positive
-#define atomic64_dec_if_positive atomic64_dec_if_positive
-static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_dec_if_positive(v);
-}
-#endif
-
-#ifdef arch_atomic_dec_and_test
-#define atomic_dec_and_test atomic_dec_and_test
-static __always_inline bool atomic_dec_and_test(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_dec_and_test(v);
-}
-#endif
-
-#ifdef arch_atomic64_dec_and_test
-#define atomic64_dec_and_test atomic64_dec_and_test
-static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_dec_and_test(v);
-}
-#endif
-
-#ifdef arch_atomic_inc_and_test
-#define atomic_inc_and_test atomic_inc_and_test
-static __always_inline bool atomic_inc_and_test(atomic_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_inc_and_test(v);
-}
-#endif
-
-#ifdef arch_atomic64_inc_and_test
-#define atomic64_inc_and_test atomic64_inc_and_test
-static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
-{
-	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_inc_and_test(v);
-}
-#endif
-
-static __always_inline int atomic_add_return(int i, atomic_t *v)
+#if !defined(arch_atomic_add_return_relaxed) || defined(arch_atomic_add_return)
+static inline int
+atomic_add_return(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_add_return(i, v);
 }
+#define atomic_add_return atomic_add_return
+#endif
 
-static __always_inline s64 atomic64_add_return(s64 i, atomic64_t *v)
+#if defined(arch_atomic_add_return_acquire)
+static inline int
+atomic_add_return_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_add_return(i, v);
+	return arch_atomic_add_return_acquire(i, v);
 }
+#define atomic_add_return_acquire atomic_add_return_acquire
+#endif
 
-static __always_inline int atomic_sub_return(int i, atomic_t *v)
+#if defined(arch_atomic_add_return_release)
+static inline int
+atomic_add_return_release(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic_sub_return(i, v);
+	return arch_atomic_add_return_release(i, v);
 }
+#define atomic_add_return_release atomic_add_return_release
+#endif
 
-static __always_inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
+#if defined(arch_atomic_add_return_relaxed)
+static inline int
+atomic_add_return_relaxed(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_sub_return(i, v);
+	return arch_atomic_add_return_relaxed(i, v);
 }
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#endif
 
-static __always_inline int atomic_fetch_add(int i, atomic_t *v)
+#if !defined(arch_atomic_fetch_add_relaxed) || defined(arch_atomic_fetch_add)
+static inline int
+atomic_fetch_add(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_fetch_add(i, v);
 }
+#define atomic_fetch_add atomic_fetch_add
+#endif
 
-static __always_inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_add_acquire)
+static inline int
+atomic_fetch_add_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_add(i, v);
+	return arch_atomic_fetch_add_acquire(i, v);
 }
+#define atomic_fetch_add_acquire atomic_fetch_add_acquire
+#endif
 
-static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_add_release)
+static inline int
+atomic_fetch_add_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_add_release(i, v);
+}
+#define atomic_fetch_add_release atomic_fetch_add_release
+#endif
+
+#if defined(arch_atomic_fetch_add_relaxed)
+static inline int
+atomic_fetch_add_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_add_relaxed(i, v);
+}
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#endif
+
+static inline void
+atomic_sub(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_sub(i, v);
+}
+#define atomic_sub atomic_sub
+
+#if !defined(arch_atomic_sub_return_relaxed) || defined(arch_atomic_sub_return)
+static inline int
+atomic_sub_return(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_sub_return(i, v);
+}
+#define atomic_sub_return atomic_sub_return
+#endif
+
+#if defined(arch_atomic_sub_return_acquire)
+static inline int
+atomic_sub_return_acquire(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_sub_return_acquire(i, v);
+}
+#define atomic_sub_return_acquire atomic_sub_return_acquire
+#endif
+
+#if defined(arch_atomic_sub_return_release)
+static inline int
+atomic_sub_return_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_sub_return_release(i, v);
+}
+#define atomic_sub_return_release atomic_sub_return_release
+#endif
+
+#if defined(arch_atomic_sub_return_relaxed)
+static inline int
+atomic_sub_return_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_sub_return_relaxed(i, v);
+}
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#endif
+
+#if !defined(arch_atomic_fetch_sub_relaxed) || defined(arch_atomic_fetch_sub)
+static inline int
+atomic_fetch_sub(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_fetch_sub(i, v);
 }
+#define atomic_fetch_sub atomic_fetch_sub
+#endif
 
-static __always_inline s64 atomic64_fetch_sub(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_sub_acquire)
+static inline int
+atomic_fetch_sub_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_sub(i, v);
+	return arch_atomic_fetch_sub_acquire(i, v);
 }
+#define atomic_fetch_sub_acquire atomic_fetch_sub_acquire
+#endif
 
-static __always_inline int atomic_fetch_and(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_sub_release)
+static inline int
+atomic_fetch_sub_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_sub_release(i, v);
+}
+#define atomic_fetch_sub_release atomic_fetch_sub_release
+#endif
+
+#if defined(arch_atomic_fetch_sub_relaxed)
+static inline int
+atomic_fetch_sub_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_sub_relaxed(i, v);
+}
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#endif
+
+#if defined(arch_atomic_inc)
+static inline void
+atomic_inc(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_inc(v);
+}
+#define atomic_inc atomic_inc
+#endif
+
+#if defined(arch_atomic_inc_return)
+static inline int
+atomic_inc_return(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_return(v);
+}
+#define atomic_inc_return atomic_inc_return
+#endif
+
+#if defined(arch_atomic_inc_return_acquire)
+static inline int
+atomic_inc_return_acquire(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_return_acquire(v);
+}
+#define atomic_inc_return_acquire atomic_inc_return_acquire
+#endif
+
+#if defined(arch_atomic_inc_return_release)
+static inline int
+atomic_inc_return_release(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_return_release(v);
+}
+#define atomic_inc_return_release atomic_inc_return_release
+#endif
+
+#if defined(arch_atomic_inc_return_relaxed)
+static inline int
+atomic_inc_return_relaxed(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_return_relaxed(v);
+}
+#define atomic_inc_return_relaxed atomic_inc_return_relaxed
+#endif
+
+#if defined(arch_atomic_fetch_inc)
+static inline int
+atomic_fetch_inc(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_inc(v);
+}
+#define atomic_fetch_inc atomic_fetch_inc
+#endif
+
+#if defined(arch_atomic_fetch_inc_acquire)
+static inline int
+atomic_fetch_inc_acquire(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_inc_acquire(v);
+}
+#define atomic_fetch_inc_acquire atomic_fetch_inc_acquire
+#endif
+
+#if defined(arch_atomic_fetch_inc_release)
+static inline int
+atomic_fetch_inc_release(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_inc_release(v);
+}
+#define atomic_fetch_inc_release atomic_fetch_inc_release
+#endif
+
+#if defined(arch_atomic_fetch_inc_relaxed)
+static inline int
+atomic_fetch_inc_relaxed(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_inc_relaxed(v);
+}
+#define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed
+#endif
+
+#if defined(arch_atomic_dec)
+static inline void
+atomic_dec(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_dec(v);
+}
+#define atomic_dec atomic_dec
+#endif
+
+#if defined(arch_atomic_dec_return)
+static inline int
+atomic_dec_return(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_return(v);
+}
+#define atomic_dec_return atomic_dec_return
+#endif
+
+#if defined(arch_atomic_dec_return_acquire)
+static inline int
+atomic_dec_return_acquire(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_return_acquire(v);
+}
+#define atomic_dec_return_acquire atomic_dec_return_acquire
+#endif
+
+#if defined(arch_atomic_dec_return_release)
+static inline int
+atomic_dec_return_release(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_return_release(v);
+}
+#define atomic_dec_return_release atomic_dec_return_release
+#endif
+
+#if defined(arch_atomic_dec_return_relaxed)
+static inline int
+atomic_dec_return_relaxed(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_return_relaxed(v);
+}
+#define atomic_dec_return_relaxed atomic_dec_return_relaxed
+#endif
+
+#if defined(arch_atomic_fetch_dec)
+static inline int
+atomic_fetch_dec(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_dec(v);
+}
+#define atomic_fetch_dec atomic_fetch_dec
+#endif
+
+#if defined(arch_atomic_fetch_dec_acquire)
+static inline int
+atomic_fetch_dec_acquire(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_dec_acquire(v);
+}
+#define atomic_fetch_dec_acquire atomic_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic_fetch_dec_release)
+static inline int
+atomic_fetch_dec_release(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_dec_release(v);
+}
+#define atomic_fetch_dec_release atomic_fetch_dec_release
+#endif
+
+#if defined(arch_atomic_fetch_dec_relaxed)
+static inline int
+atomic_fetch_dec_relaxed(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_dec_relaxed(v);
+}
+#define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed
+#endif
+
+static inline void
+atomic_and(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_and(i, v);
+}
+#define atomic_and atomic_and
+
+#if !defined(arch_atomic_fetch_and_relaxed) || defined(arch_atomic_fetch_and)
+static inline int
+atomic_fetch_and(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_fetch_and(i, v);
 }
+#define atomic_fetch_and atomic_fetch_and
+#endif
 
-static __always_inline s64 atomic64_fetch_and(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_and_acquire)
+static inline int
+atomic_fetch_and_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_and(i, v);
+	return arch_atomic_fetch_and_acquire(i, v);
 }
+#define atomic_fetch_and_acquire atomic_fetch_and_acquire
+#endif
 
-static __always_inline int atomic_fetch_or(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_and_release)
+static inline int
+atomic_fetch_and_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_and_release(i, v);
+}
+#define atomic_fetch_and_release atomic_fetch_and_release
+#endif
+
+#if defined(arch_atomic_fetch_and_relaxed)
+static inline int
+atomic_fetch_and_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_and_relaxed(i, v);
+}
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#endif
+
+#if defined(arch_atomic_andnot)
+static inline void
+atomic_andnot(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_andnot(i, v);
+}
+#define atomic_andnot atomic_andnot
+#endif
+
+#if defined(arch_atomic_fetch_andnot)
+static inline int
+atomic_fetch_andnot(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_andnot(i, v);
+}
+#define atomic_fetch_andnot atomic_fetch_andnot
+#endif
+
+#if defined(arch_atomic_fetch_andnot_acquire)
+static inline int
+atomic_fetch_andnot_acquire(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_andnot_acquire(i, v);
+}
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire
+#endif
+
+#if defined(arch_atomic_fetch_andnot_release)
+static inline int
+atomic_fetch_andnot_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_andnot_release(i, v);
+}
+#define atomic_fetch_andnot_release atomic_fetch_andnot_release
+#endif
+
+#if defined(arch_atomic_fetch_andnot_relaxed)
+static inline int
+atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_andnot_relaxed(i, v);
+}
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
+#endif
+
+static inline void
+atomic_or(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_or(i, v);
+}
+#define atomic_or atomic_or
+
+#if !defined(arch_atomic_fetch_or_relaxed) || defined(arch_atomic_fetch_or)
+static inline int
+atomic_fetch_or(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_fetch_or(i, v);
 }
+#define atomic_fetch_or atomic_fetch_or
+#endif
 
-static __always_inline s64 atomic64_fetch_or(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_or_acquire)
+static inline int
+atomic_fetch_or_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_or(i, v);
+	return arch_atomic_fetch_or_acquire(i, v);
 }
+#define atomic_fetch_or_acquire atomic_fetch_or_acquire
+#endif
 
-static __always_inline int atomic_fetch_xor(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_or_release)
+static inline int
+atomic_fetch_or_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_or_release(i, v);
+}
+#define atomic_fetch_or_release atomic_fetch_or_release
+#endif
+
+#if defined(arch_atomic_fetch_or_relaxed)
+static inline int
+atomic_fetch_or_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_or_relaxed(i, v);
+}
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#endif
+
+static inline void
+atomic_xor(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic_xor(i, v);
+}
+#define atomic_xor atomic_xor
+
+#if !defined(arch_atomic_fetch_xor_relaxed) || defined(arch_atomic_fetch_xor)
+static inline int
+atomic_fetch_xor(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_fetch_xor(i, v);
 }
+#define atomic_fetch_xor atomic_fetch_xor
+#endif
 
-static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_xor_acquire)
+static inline int
+atomic_fetch_xor_acquire(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_fetch_xor(i, v);
+	return arch_atomic_fetch_xor_acquire(i, v);
 }
+#define atomic_fetch_xor_acquire atomic_fetch_xor_acquire
+#endif
 
-#ifdef arch_atomic_sub_and_test
-#define atomic_sub_and_test atomic_sub_and_test
-static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
+#if defined(arch_atomic_fetch_xor_release)
+static inline int
+atomic_fetch_xor_release(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_xor_release(i, v);
+}
+#define atomic_fetch_xor_release atomic_fetch_xor_release
+#endif
+
+#if defined(arch_atomic_fetch_xor_relaxed)
+static inline int
+atomic_fetch_xor_relaxed(int i, atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_xor_relaxed(i, v);
+}
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#endif
+
+#if !defined(arch_atomic_xchg_relaxed) || defined(arch_atomic_xchg)
+static inline int
+atomic_xchg(atomic_t *v, int i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_xchg(v, i);
+}
+#define atomic_xchg atomic_xchg
+#endif
+
+#if defined(arch_atomic_xchg_acquire)
+static inline int
+atomic_xchg_acquire(atomic_t *v, int i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_xchg_acquire(v, i);
+}
+#define atomic_xchg_acquire atomic_xchg_acquire
+#endif
+
+#if defined(arch_atomic_xchg_release)
+static inline int
+atomic_xchg_release(atomic_t *v, int i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_xchg_release(v, i);
+}
+#define atomic_xchg_release atomic_xchg_release
+#endif
+
+#if defined(arch_atomic_xchg_relaxed)
+static inline int
+atomic_xchg_relaxed(atomic_t *v, int i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_xchg_relaxed(v, i);
+}
+#define atomic_xchg_relaxed atomic_xchg_relaxed
+#endif
+
+#if !defined(arch_atomic_cmpxchg_relaxed) || defined(arch_atomic_cmpxchg)
+static inline int
+atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_cmpxchg(v, old, new);
+}
+#define atomic_cmpxchg atomic_cmpxchg
+#endif
+
+#if defined(arch_atomic_cmpxchg_acquire)
+static inline int
+atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_cmpxchg_acquire(v, old, new);
+}
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_cmpxchg_release)
+static inline int
+atomic_cmpxchg_release(atomic_t *v, int old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_cmpxchg_release(v, old, new);
+}
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_cmpxchg_relaxed)
+static inline int
+atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_try_cmpxchg)
+static inline bool
+atomic_try_cmpxchg(atomic_t *v, int *old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic_try_cmpxchg(v, old, new);
+}
+#define atomic_try_cmpxchg atomic_try_cmpxchg
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_acquire)
+static inline bool
+atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic_try_cmpxchg_acquire(v, old, new);
+}
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_release)
+static inline bool
+atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic_try_cmpxchg_release(v, old, new);
+}
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg_release
+#endif
+
+#if defined(arch_atomic_try_cmpxchg_relaxed)
+static inline bool
+atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic_sub_and_test)
+static inline bool
+atomic_sub_and_test(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_sub_and_test(i, v);
 }
+#define atomic_sub_and_test atomic_sub_and_test
 #endif
 
-#ifdef arch_atomic64_sub_and_test
-#define atomic64_sub_and_test atomic64_sub_and_test
-static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
+#if defined(arch_atomic_dec_and_test)
+static inline bool
+atomic_dec_and_test(atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
-	return arch_atomic64_sub_and_test(i, v);
+	return arch_atomic_dec_and_test(v);
 }
+#define atomic_dec_and_test atomic_dec_and_test
 #endif
 
-#ifdef arch_atomic_add_negative
-#define atomic_add_negative atomic_add_negative
-static __always_inline bool atomic_add_negative(int i, atomic_t *v)
+#if defined(arch_atomic_inc_and_test)
+static inline bool
+atomic_inc_and_test(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_and_test(v);
+}
+#define atomic_inc_and_test atomic_inc_and_test
+#endif
+
+#if defined(arch_atomic_add_negative)
+static inline bool
+atomic_add_negative(int i, atomic_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic_add_negative(i, v);
 }
+#define atomic_add_negative atomic_add_negative
 #endif
 
-#ifdef arch_atomic64_add_negative
-#define atomic64_add_negative atomic64_add_negative
-static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
+#if defined(arch_atomic_fetch_add_unless)
+static inline int
+atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_fetch_add_unless(v, a, u);
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+#endif
+
+#if defined(arch_atomic_add_unless)
+static inline bool
+atomic_add_unless(atomic_t *v, int a, int u)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_add_unless(v, a, u);
+}
+#define atomic_add_unless atomic_add_unless
+#endif
+
+#if defined(arch_atomic_inc_not_zero)
+static inline bool
+atomic_inc_not_zero(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_not_zero(v);
+}
+#define atomic_inc_not_zero atomic_inc_not_zero
+#endif
+
+#if defined(arch_atomic_inc_unless_negative)
+static inline bool
+atomic_inc_unless_negative(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_inc_unless_negative(v);
+}
+#define atomic_inc_unless_negative atomic_inc_unless_negative
+#endif
+
+#if defined(arch_atomic_dec_unless_positive)
+static inline bool
+atomic_dec_unless_positive(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_unless_positive(v);
+}
+#define atomic_dec_unless_positive atomic_dec_unless_positive
+#endif
+
+#if defined(arch_atomic_dec_if_positive)
+static inline int
+atomic_dec_if_positive(atomic_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic_dec_if_positive(v);
+}
+#define atomic_dec_if_positive atomic_dec_if_positive
+#endif
+
+static inline s64
+atomic64_read(const atomic64_t *v)
+{
+	kasan_check_read(v, sizeof(*v));
+	return arch_atomic64_read(v);
+}
+#define atomic64_read atomic64_read
+
+#if defined(arch_atomic64_read_acquire)
+static inline s64
+atomic64_read_acquire(const atomic64_t *v)
+{
+	kasan_check_read(v, sizeof(*v));
+	return arch_atomic64_read_acquire(v);
+}
+#define atomic64_read_acquire atomic64_read_acquire
+#endif
+
+static inline void
+atomic64_set(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_set(v, i);
+}
+#define atomic64_set atomic64_set
+
+#if defined(arch_atomic64_set_release)
+static inline void
+atomic64_set_release(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_set_release(v, i);
+}
+#define atomic64_set_release atomic64_set_release
+#endif
+
+static inline void
+atomic64_add(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_add(i, v);
+}
+#define atomic64_add atomic64_add
+
+#if !defined(arch_atomic64_add_return_relaxed) || defined(arch_atomic64_add_return)
+static inline s64
+atomic64_add_return(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_add_return(i, v);
+}
+#define atomic64_add_return atomic64_add_return
+#endif
+
+#if defined(arch_atomic64_add_return_acquire)
+static inline s64
+atomic64_add_return_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_add_return_acquire(i, v);
+}
+#define atomic64_add_return_acquire atomic64_add_return_acquire
+#endif
+
+#if defined(arch_atomic64_add_return_release)
+static inline s64
+atomic64_add_return_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_add_return_release(i, v);
+}
+#define atomic64_add_return_release atomic64_add_return_release
+#endif
+
+#if defined(arch_atomic64_add_return_relaxed)
+static inline s64
+atomic64_add_return_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_add_return_relaxed(i, v);
+}
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#endif
+
+#if !defined(arch_atomic64_fetch_add_relaxed) || defined(arch_atomic64_fetch_add)
+static inline s64
+atomic64_fetch_add(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_add(i, v);
+}
+#define atomic64_fetch_add atomic64_fetch_add
+#endif
+
+#if defined(arch_atomic64_fetch_add_acquire)
+static inline s64
+atomic64_fetch_add_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_add_acquire(i, v);
+}
+#define atomic64_fetch_add_acquire atomic64_fetch_add_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_add_release)
+static inline s64
+atomic64_fetch_add_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_add_release(i, v);
+}
+#define atomic64_fetch_add_release atomic64_fetch_add_release
+#endif
+
+#if defined(arch_atomic64_fetch_add_relaxed)
+static inline s64
+atomic64_fetch_add_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_add_relaxed(i, v);
+}
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#endif
+
+static inline void
+atomic64_sub(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_sub(i, v);
+}
+#define atomic64_sub atomic64_sub
+
+#if !defined(arch_atomic64_sub_return_relaxed) || defined(arch_atomic64_sub_return)
+static inline s64
+atomic64_sub_return(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_sub_return(i, v);
+}
+#define atomic64_sub_return atomic64_sub_return
+#endif
+
+#if defined(arch_atomic64_sub_return_acquire)
+static inline s64
+atomic64_sub_return_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_sub_return_acquire(i, v);
+}
+#define atomic64_sub_return_acquire atomic64_sub_return_acquire
+#endif
+
+#if defined(arch_atomic64_sub_return_release)
+static inline s64
+atomic64_sub_return_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_sub_return_release(i, v);
+}
+#define atomic64_sub_return_release atomic64_sub_return_release
+#endif
+
+#if defined(arch_atomic64_sub_return_relaxed)
+static inline s64
+atomic64_sub_return_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_sub_return_relaxed(i, v);
+}
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#endif
+
+#if !defined(arch_atomic64_fetch_sub_relaxed) || defined(arch_atomic64_fetch_sub)
+static inline s64
+atomic64_fetch_sub(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_sub(i, v);
+}
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
+
+#if defined(arch_atomic64_fetch_sub_acquire)
+static inline s64
+atomic64_fetch_sub_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_sub_acquire(i, v);
+}
+#define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_sub_release)
+static inline s64
+atomic64_fetch_sub_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_sub_release(i, v);
+}
+#define atomic64_fetch_sub_release atomic64_fetch_sub_release
+#endif
+
+#if defined(arch_atomic64_fetch_sub_relaxed)
+static inline s64
+atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_sub_relaxed(i, v);
+}
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#endif
+
+#if defined(arch_atomic64_inc)
+static inline void
+atomic64_inc(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_inc(v);
+}
+#define atomic64_inc atomic64_inc
+#endif
+
+#if defined(arch_atomic64_inc_return)
+static inline s64
+atomic64_inc_return(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_return(v);
+}
+#define atomic64_inc_return atomic64_inc_return
+#endif
+
+#if defined(arch_atomic64_inc_return_acquire)
+static inline s64
+atomic64_inc_return_acquire(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_return_acquire(v);
+}
+#define atomic64_inc_return_acquire atomic64_inc_return_acquire
+#endif
+
+#if defined(arch_atomic64_inc_return_release)
+static inline s64
+atomic64_inc_return_release(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_return_release(v);
+}
+#define atomic64_inc_return_release atomic64_inc_return_release
+#endif
+
+#if defined(arch_atomic64_inc_return_relaxed)
+static inline s64
+atomic64_inc_return_relaxed(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_return_relaxed(v);
+}
+#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
+#endif
+
+#if defined(arch_atomic64_fetch_inc)
+static inline s64
+atomic64_fetch_inc(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_inc(v);
+}
+#define atomic64_fetch_inc atomic64_fetch_inc
+#endif
+
+#if defined(arch_atomic64_fetch_inc_acquire)
+static inline s64
+atomic64_fetch_inc_acquire(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_inc_acquire(v);
+}
+#define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_inc_release)
+static inline s64
+atomic64_fetch_inc_release(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_inc_release(v);
+}
+#define atomic64_fetch_inc_release atomic64_fetch_inc_release
+#endif
+
+#if defined(arch_atomic64_fetch_inc_relaxed)
+static inline s64
+atomic64_fetch_inc_relaxed(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_inc_relaxed(v);
+}
+#define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed
+#endif
+
+#if defined(arch_atomic64_dec)
+static inline void
+atomic64_dec(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_dec(v);
+}
+#define atomic64_dec atomic64_dec
+#endif
+
+#if defined(arch_atomic64_dec_return)
+static inline s64
+atomic64_dec_return(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_return(v);
+}
+#define atomic64_dec_return atomic64_dec_return
+#endif
+
+#if defined(arch_atomic64_dec_return_acquire)
+static inline s64
+atomic64_dec_return_acquire(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_return_acquire(v);
+}
+#define atomic64_dec_return_acquire atomic64_dec_return_acquire
+#endif
+
+#if defined(arch_atomic64_dec_return_release)
+static inline s64
+atomic64_dec_return_release(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_return_release(v);
+}
+#define atomic64_dec_return_release atomic64_dec_return_release
+#endif
+
+#if defined(arch_atomic64_dec_return_relaxed)
+static inline s64
+atomic64_dec_return_relaxed(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_return_relaxed(v);
+}
+#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
+#endif
+
+#if defined(arch_atomic64_fetch_dec)
+static inline s64
+atomic64_fetch_dec(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_dec(v);
+}
+#define atomic64_fetch_dec atomic64_fetch_dec
+#endif
+
+#if defined(arch_atomic64_fetch_dec_acquire)
+static inline s64
+atomic64_fetch_dec_acquire(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_dec_acquire(v);
+}
+#define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_dec_release)
+static inline s64
+atomic64_fetch_dec_release(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_dec_release(v);
+}
+#define atomic64_fetch_dec_release atomic64_fetch_dec_release
+#endif
+
+#if defined(arch_atomic64_fetch_dec_relaxed)
+static inline s64
+atomic64_fetch_dec_relaxed(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_dec_relaxed(v);
+}
+#define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed
+#endif
+
+static inline void
+atomic64_and(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_and(i, v);
+}
+#define atomic64_and atomic64_and
+
+#if !defined(arch_atomic64_fetch_and_relaxed) || defined(arch_atomic64_fetch_and)
+static inline s64
+atomic64_fetch_and(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_and(i, v);
+}
+#define atomic64_fetch_and atomic64_fetch_and
+#endif
+
+#if defined(arch_atomic64_fetch_and_acquire)
+static inline s64
+atomic64_fetch_and_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_and_acquire(i, v);
+}
+#define atomic64_fetch_and_acquire atomic64_fetch_and_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_and_release)
+static inline s64
+atomic64_fetch_and_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_and_release(i, v);
+}
+#define atomic64_fetch_and_release atomic64_fetch_and_release
+#endif
+
+#if defined(arch_atomic64_fetch_and_relaxed)
+static inline s64
+atomic64_fetch_and_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_and_relaxed(i, v);
+}
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#endif
+
+#if defined(arch_atomic64_andnot)
+static inline void
+atomic64_andnot(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_andnot(i, v);
+}
+#define atomic64_andnot atomic64_andnot
+#endif
+
+#if defined(arch_atomic64_fetch_andnot)
+static inline s64
+atomic64_fetch_andnot(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_andnot(i, v);
+}
+#define atomic64_fetch_andnot atomic64_fetch_andnot
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_acquire)
+static inline s64
+atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_andnot_acquire(i, v);
+}
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_release)
+static inline s64
+atomic64_fetch_andnot_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_andnot_release(i, v);
+}
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot_release
+#endif
+
+#if defined(arch_atomic64_fetch_andnot_relaxed)
+static inline s64
+atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_andnot_relaxed(i, v);
+}
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
+#endif
+
+static inline void
+atomic64_or(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_or(i, v);
+}
+#define atomic64_or atomic64_or
+
+#if !defined(arch_atomic64_fetch_or_relaxed) || defined(arch_atomic64_fetch_or)
+static inline s64
+atomic64_fetch_or(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_or(i, v);
+}
+#define atomic64_fetch_or atomic64_fetch_or
+#endif
+
+#if defined(arch_atomic64_fetch_or_acquire)
+static inline s64
+atomic64_fetch_or_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_or_acquire(i, v);
+}
+#define atomic64_fetch_or_acquire atomic64_fetch_or_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_or_release)
+static inline s64
+atomic64_fetch_or_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_or_release(i, v);
+}
+#define atomic64_fetch_or_release atomic64_fetch_or_release
+#endif
+
+#if defined(arch_atomic64_fetch_or_relaxed)
+static inline s64
+atomic64_fetch_or_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_or_relaxed(i, v);
+}
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#endif
+
+static inline void
+atomic64_xor(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	arch_atomic64_xor(i, v);
+}
+#define atomic64_xor atomic64_xor
+
+#if !defined(arch_atomic64_fetch_xor_relaxed) || defined(arch_atomic64_fetch_xor)
+static inline s64
+atomic64_fetch_xor(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_xor(i, v);
+}
+#define atomic64_fetch_xor atomic64_fetch_xor
+#endif
+
+#if defined(arch_atomic64_fetch_xor_acquire)
+static inline s64
+atomic64_fetch_xor_acquire(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_xor_acquire(i, v);
+}
+#define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire
+#endif
+
+#if defined(arch_atomic64_fetch_xor_release)
+static inline s64
+atomic64_fetch_xor_release(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_xor_release(i, v);
+}
+#define atomic64_fetch_xor_release atomic64_fetch_xor_release
+#endif
+
+#if defined(arch_atomic64_fetch_xor_relaxed)
+static inline s64
+atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_xor_relaxed(i, v);
+}
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#endif
+
+#if !defined(arch_atomic64_xchg_relaxed) || defined(arch_atomic64_xchg)
+static inline s64
+atomic64_xchg(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_xchg(v, i);
+}
+#define atomic64_xchg atomic64_xchg
+#endif
+
+#if defined(arch_atomic64_xchg_acquire)
+static inline s64
+atomic64_xchg_acquire(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_xchg_acquire(v, i);
+}
+#define atomic64_xchg_acquire atomic64_xchg_acquire
+#endif
+
+#if defined(arch_atomic64_xchg_release)
+static inline s64
+atomic64_xchg_release(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_xchg_release(v, i);
+}
+#define atomic64_xchg_release atomic64_xchg_release
+#endif
+
+#if defined(arch_atomic64_xchg_relaxed)
+static inline s64
+atomic64_xchg_relaxed(atomic64_t *v, s64 i)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_xchg_relaxed(v, i);
+}
+#define atomic64_xchg_relaxed atomic64_xchg_relaxed
+#endif
+
+#if !defined(arch_atomic64_cmpxchg_relaxed) || defined(arch_atomic64_cmpxchg)
+static inline s64
+atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_cmpxchg(v, old, new);
+}
+#define atomic64_cmpxchg atomic64_cmpxchg
+#endif
+
+#if defined(arch_atomic64_cmpxchg_acquire)
+static inline s64
+atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_cmpxchg_acquire(v, old, new);
+}
+#define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic64_cmpxchg_release)
+static inline s64
+atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_cmpxchg_release(v, old, new);
+}
+#define atomic64_cmpxchg_release atomic64_cmpxchg_release
+#endif
+
+#if defined(arch_atomic64_cmpxchg_relaxed)
+static inline s64
+atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg)
+static inline bool
+atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic64_try_cmpxchg(v, old, new);
+}
+#define atomic64_try_cmpxchg atomic64_try_cmpxchg
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_acquire)
+static inline bool
+atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic64_try_cmpxchg_acquire(v, old, new);
+}
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_release)
+static inline bool
+atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic64_try_cmpxchg_release(v, old, new);
+}
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release
+#endif
+
+#if defined(arch_atomic64_try_cmpxchg_relaxed)
+static inline bool
+atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new)
+{
+	kasan_check_write(v, sizeof(*v));
+	kasan_check_write(old, sizeof(*old));
+	return arch_atomic64_try_cmpxchg_relaxed(v, old, new);
+}
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed
+#endif
+
+#if defined(arch_atomic64_sub_and_test)
+static inline bool
+atomic64_sub_and_test(s64 i, atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_sub_and_test(i, v);
+}
+#define atomic64_sub_and_test atomic64_sub_and_test
+#endif
+
+#if defined(arch_atomic64_dec_and_test)
+static inline bool
+atomic64_dec_and_test(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_and_test(v);
+}
+#define atomic64_dec_and_test atomic64_dec_and_test
+#endif
+
+#if defined(arch_atomic64_inc_and_test)
+static inline bool
+atomic64_inc_and_test(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_and_test(v);
+}
+#define atomic64_inc_and_test atomic64_inc_and_test
+#endif
+
+#if defined(arch_atomic64_add_negative)
+static inline bool
+atomic64_add_negative(s64 i, atomic64_t *v)
 {
 	kasan_check_write(v, sizeof(*v));
 	return arch_atomic64_add_negative(i, v);
 }
+#define atomic64_add_negative atomic64_add_negative
 #endif
 
-#define xchg(ptr, new)							\
+#if defined(arch_atomic64_fetch_add_unless)
+static inline s64
+atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_fetch_add_unless(v, a, u);
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+#if defined(arch_atomic64_add_unless)
+static inline bool
+atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_add_unless(v, a, u);
+}
+#define atomic64_add_unless atomic64_add_unless
+#endif
+
+#if defined(arch_atomic64_inc_not_zero)
+static inline bool
+atomic64_inc_not_zero(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_not_zero(v);
+}
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+#endif
+
+#if defined(arch_atomic64_inc_unless_negative)
+static inline bool
+atomic64_inc_unless_negative(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_inc_unless_negative(v);
+}
+#define atomic64_inc_unless_negative atomic64_inc_unless_negative
+#endif
+
+#if defined(arch_atomic64_dec_unless_positive)
+static inline bool
+atomic64_dec_unless_positive(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_unless_positive(v);
+}
+#define atomic64_dec_unless_positive atomic64_dec_unless_positive
+#endif
+
+#if defined(arch_atomic64_dec_if_positive)
+static inline s64
+atomic64_dec_if_positive(atomic64_t *v)
+{
+	kasan_check_write(v, sizeof(*v));
+	return arch_atomic64_dec_if_positive(v);
+}
+#define atomic64_dec_if_positive atomic64_dec_if_positive
+#endif
+
+#if !defined(arch_xchg_relaxed) || defined(arch_xchg)
+#define xchg(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_xchg(__ai_ptr, (new));					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_xchg(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define cmpxchg(ptr, old, new)						\
+#if defined(arch_xchg_acquire)
+#define xchg_acquire(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_cmpxchg(__ai_ptr, (old), (new));				\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_xchg_acquire(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define sync_cmpxchg(ptr, old, new)					\
+#if defined(arch_xchg_release)
+#define xchg_release(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_sync_cmpxchg(__ai_ptr, (old), (new));			\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_xchg_release(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define cmpxchg_local(ptr, old, new)					\
+#if defined(arch_xchg_relaxed)
+#define xchg_relaxed(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_cmpxchg_local(__ai_ptr, (old), (new));			\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_xchg_relaxed(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define cmpxchg64(ptr, old, new)					\
+#if !defined(arch_cmpxchg_relaxed) || defined(arch_cmpxchg)
+#define cmpxchg(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_cmpxchg64(__ai_ptr, (old), (new));				\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define cmpxchg64_local(ptr, old, new)					\
+#if defined(arch_cmpxchg_acquire)
+#define cmpxchg_acquire(ptr, ...)						\
 ({									\
 	typeof(ptr) __ai_ptr = (ptr);					\
-	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));			\
-	arch_cmpxchg64_local(__ai_ptr, (old), (new));			\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__);				\
 })
+#endif
 
-#define cmpxchg_double(p1, p2, o1, o2, n1, n2)				\
+#if defined(arch_cmpxchg_release)
+#define cmpxchg_release(ptr, ...)						\
 ({									\
-	typeof(p1) __ai_p1 = (p1);					\
-	kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));		\
-	arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2));	\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg_release(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#if defined(arch_cmpxchg_relaxed)
+#define cmpxchg_relaxed(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#if !defined(arch_cmpxchg64_relaxed) || defined(arch_cmpxchg64)
+#define cmpxchg64(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg64(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#if defined(arch_cmpxchg64_acquire)
+#define cmpxchg64_acquire(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#if defined(arch_cmpxchg64_release)
+#define cmpxchg64_release(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#if defined(arch_cmpxchg64_relaxed)
+#define cmpxchg64_relaxed(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__);				\
+})
+#endif
+
+#define cmpxchg_local(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg_local(__ai_ptr, __VA_ARGS__);				\
 })
 
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2)				\
-({										\
-	typeof(p1) __ai_p1 = (p1);						\
-	kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1));			\
-	arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2));	\
+#define cmpxchg64_local(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__);				\
 })
 
-#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
+#define sync_cmpxchg(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, sizeof(*__ai_ptr));		\
+	arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__);				\
+})
+
+#define cmpxchg_double(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
+	arch_cmpxchg_double(__ai_ptr, __VA_ARGS__);				\
+})
+
+
+#define cmpxchg_double_local(ptr, ...)						\
+({									\
+	typeof(ptr) __ai_ptr = (ptr);					\
+	kasan_check_write(__ai_ptr, 2 * sizeof(*__ai_ptr));		\
+	arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__);				\
+})
+
+#endif /* _ASM_GENERIC_ATOMIC_INSTRUMENTED_H */
+// b29b625d5de9280f680e42c7be859b55b15e5f6a
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index 87d1447..881c7e2 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -1,269 +1,1013 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0
+
+// Generated by scripts/atomic/gen-atomic-long.sh
+// DO NOT MODIFY THIS FILE DIRECTLY
+
 #ifndef _ASM_GENERIC_ATOMIC_LONG_H
 #define _ASM_GENERIC_ATOMIC_LONG_H
-/*
- * Copyright (C) 2005 Silicon Graphics, Inc.
- *	Christoph Lameter
- *
- * Allows to provide arch independent atomic definitions without the need to
- * edit all arch specific atomic.h files.
- */
 
 #include <asm/types.h>
 
-/*
- * Suppport for atomic_long_t
- *
- * Casts for parameters are avoided for existing atomic functions in order to
- * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
- * macros of a platform may have.
- */
-
-#if BITS_PER_LONG == 64
-
+#ifdef CONFIG_64BIT
 typedef atomic64_t atomic_long_t;
-
-#define ATOMIC_LONG_INIT(i)	ATOMIC64_INIT(i)
-#define ATOMIC_LONG_PFX(x)	atomic64 ## x
-#define ATOMIC_LONG_TYPE	s64
-
+#define ATOMIC_LONG_INIT(i)		ATOMIC64_INIT(i)
+#define atomic_long_cond_read_acquire	atomic64_cond_read_acquire
+#define atomic_long_cond_read_relaxed	atomic64_cond_read_relaxed
 #else
-
 typedef atomic_t atomic_long_t;
-
-#define ATOMIC_LONG_INIT(i)	ATOMIC_INIT(i)
-#define ATOMIC_LONG_PFX(x)	atomic ## x
-#define ATOMIC_LONG_TYPE	int
-
+#define ATOMIC_LONG_INIT(i)		ATOMIC_INIT(i)
+#define atomic_long_cond_read_acquire	atomic_cond_read_acquire
+#define atomic_long_cond_read_relaxed	atomic_cond_read_relaxed
 #endif
 
-#define ATOMIC_LONG_READ_OP(mo)						\
-static inline long atomic_long_read##mo(const atomic_long_t *l)		\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	return (long)ATOMIC_LONG_PFX(_read##mo)(v);			\
-}
-ATOMIC_LONG_READ_OP()
-ATOMIC_LONG_READ_OP(_acquire)
+#ifdef CONFIG_64BIT
 
-#undef ATOMIC_LONG_READ_OP
-
-#define ATOMIC_LONG_SET_OP(mo)						\
-static inline void atomic_long_set##mo(atomic_long_t *l, long i)	\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	ATOMIC_LONG_PFX(_set##mo)(v, i);				\
-}
-ATOMIC_LONG_SET_OP()
-ATOMIC_LONG_SET_OP(_release)
-
-#undef ATOMIC_LONG_SET_OP
-
-#define ATOMIC_LONG_ADD_SUB_OP(op, mo)					\
-static inline long							\
-atomic_long_##op##_return##mo(long i, atomic_long_t *l)			\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(i, v);		\
-}
-ATOMIC_LONG_ADD_SUB_OP(add,)
-ATOMIC_LONG_ADD_SUB_OP(add, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(add, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(add, _release)
-ATOMIC_LONG_ADD_SUB_OP(sub,)
-ATOMIC_LONG_ADD_SUB_OP(sub, _relaxed)
-ATOMIC_LONG_ADD_SUB_OP(sub, _acquire)
-ATOMIC_LONG_ADD_SUB_OP(sub, _release)
-
-#undef ATOMIC_LONG_ADD_SUB_OP
-
-#define atomic_long_cmpxchg_relaxed(l, old, new) \
-	(ATOMIC_LONG_PFX(_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (old), (new)))
-#define atomic_long_cmpxchg_acquire(l, old, new) \
-	(ATOMIC_LONG_PFX(_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (old), (new)))
-#define atomic_long_cmpxchg_release(l, old, new) \
-	(ATOMIC_LONG_PFX(_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (old), (new)))
-#define atomic_long_cmpxchg(l, old, new) \
-	(ATOMIC_LONG_PFX(_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), (old), (new)))
-
-
-#define atomic_long_try_cmpxchg_relaxed(l, old, new) \
-	(ATOMIC_LONG_PFX(_try_cmpxchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_acquire(l, old, new) \
-	(ATOMIC_LONG_PFX(_try_cmpxchg_acquire)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg_release(l, old, new) \
-	(ATOMIC_LONG_PFX(_try_cmpxchg_release)((ATOMIC_LONG_PFX(_t) *)(l), \
-					   (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-#define atomic_long_try_cmpxchg(l, old, new) \
-	(ATOMIC_LONG_PFX(_try_cmpxchg)((ATOMIC_LONG_PFX(_t) *)(l), \
-				       (ATOMIC_LONG_TYPE *)(old), (ATOMIC_LONG_TYPE)(new)))
-
-
-#define atomic_long_xchg_relaxed(v, new) \
-	(ATOMIC_LONG_PFX(_xchg_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_acquire(v, new) \
-	(ATOMIC_LONG_PFX(_xchg_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg_release(v, new) \
-	(ATOMIC_LONG_PFX(_xchg_release)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-#define atomic_long_xchg(v, new) \
-	(ATOMIC_LONG_PFX(_xchg)((ATOMIC_LONG_PFX(_t) *)(v), (new)))
-
-static __always_inline void atomic_long_inc(atomic_long_t *l)
+static inline long
+atomic_long_read(const atomic_long_t *v)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	ATOMIC_LONG_PFX(_inc)(v);
+	return atomic64_read(v);
 }
 
-static __always_inline void atomic_long_dec(atomic_long_t *l)
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	ATOMIC_LONG_PFX(_dec)(v);
+	return atomic64_read_acquire(v);
 }
 
-#define ATOMIC_LONG_FETCH_OP(op, mo)					\
-static inline long							\
-atomic_long_fetch_##op##mo(long i, atomic_long_t *l)			\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(i, v);		\
-}
-
-ATOMIC_LONG_FETCH_OP(add, )
-ATOMIC_LONG_FETCH_OP(add, _relaxed)
-ATOMIC_LONG_FETCH_OP(add, _acquire)
-ATOMIC_LONG_FETCH_OP(add, _release)
-ATOMIC_LONG_FETCH_OP(sub, )
-ATOMIC_LONG_FETCH_OP(sub, _relaxed)
-ATOMIC_LONG_FETCH_OP(sub, _acquire)
-ATOMIC_LONG_FETCH_OP(sub, _release)
-ATOMIC_LONG_FETCH_OP(and, )
-ATOMIC_LONG_FETCH_OP(and, _relaxed)
-ATOMIC_LONG_FETCH_OP(and, _acquire)
-ATOMIC_LONG_FETCH_OP(and, _release)
-ATOMIC_LONG_FETCH_OP(andnot, )
-ATOMIC_LONG_FETCH_OP(andnot, _relaxed)
-ATOMIC_LONG_FETCH_OP(andnot, _acquire)
-ATOMIC_LONG_FETCH_OP(andnot, _release)
-ATOMIC_LONG_FETCH_OP(or, )
-ATOMIC_LONG_FETCH_OP(or, _relaxed)
-ATOMIC_LONG_FETCH_OP(or, _acquire)
-ATOMIC_LONG_FETCH_OP(or, _release)
-ATOMIC_LONG_FETCH_OP(xor, )
-ATOMIC_LONG_FETCH_OP(xor, _relaxed)
-ATOMIC_LONG_FETCH_OP(xor, _acquire)
-ATOMIC_LONG_FETCH_OP(xor, _release)
-
-#undef ATOMIC_LONG_FETCH_OP
-
-#define ATOMIC_LONG_FETCH_INC_DEC_OP(op, mo)					\
-static inline long							\
-atomic_long_fetch_##op##mo(atomic_long_t *l)				\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	return (long)ATOMIC_LONG_PFX(_fetch_##op##mo)(v);		\
-}
-
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec,)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_FETCH_INC_DEC_OP(dec, _release)
-
-#undef ATOMIC_LONG_FETCH_INC_DEC_OP
-
-#define ATOMIC_LONG_OP(op)						\
-static __always_inline void						\
-atomic_long_##op(long i, atomic_long_t *l)				\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	ATOMIC_LONG_PFX(_##op)(i, v);					\
-}
-
-ATOMIC_LONG_OP(add)
-ATOMIC_LONG_OP(sub)
-ATOMIC_LONG_OP(and)
-ATOMIC_LONG_OP(andnot)
-ATOMIC_LONG_OP(or)
-ATOMIC_LONG_OP(xor)
-
-#undef ATOMIC_LONG_OP
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	return ATOMIC_LONG_PFX(_sub_and_test)(i, v);
+	atomic64_set(v, i);
 }
 
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	return ATOMIC_LONG_PFX(_dec_and_test)(v);
+	atomic64_set_release(v, i);
 }
 
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	return ATOMIC_LONG_PFX(_inc_and_test)(v);
+	atomic64_add(i, v);
 }
 
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	return ATOMIC_LONG_PFX(_add_negative)(i, v);
+	return atomic64_add_return(i, v);
 }
 
-#define ATOMIC_LONG_INC_DEC_OP(op, mo)					\
-static inline long							\
-atomic_long_##op##_return##mo(atomic_long_t *l)				\
-{									\
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;		\
-									\
-	return (long)ATOMIC_LONG_PFX(_##op##_return##mo)(v);		\
-}
-ATOMIC_LONG_INC_DEC_OP(inc,)
-ATOMIC_LONG_INC_DEC_OP(inc, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(inc, _acquire)
-ATOMIC_LONG_INC_DEC_OP(inc, _release)
-ATOMIC_LONG_INC_DEC_OP(dec,)
-ATOMIC_LONG_INC_DEC_OP(dec, _relaxed)
-ATOMIC_LONG_INC_DEC_OP(dec, _acquire)
-ATOMIC_LONG_INC_DEC_OP(dec, _release)
-
-#undef ATOMIC_LONG_INC_DEC_OP
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
 {
-	ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
-	return (long)ATOMIC_LONG_PFX(_add_unless)(v, a, u);
+	return atomic64_add_return_acquire(i, v);
 }
 
-#define atomic_long_inc_not_zero(l) \
-	ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+	return atomic64_add_return_release(i, v);
+}
 
-#define atomic_long_cond_read_relaxed(v, c) \
-	ATOMIC_LONG_PFX(_cond_read_relaxed)((ATOMIC_LONG_PFX(_t) *)(v), (c))
-#define atomic_long_cond_read_acquire(v, c) \
-	ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_add_return_relaxed(i, v);
+}
 
-#endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+	atomic64_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+	return atomic64_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+	return atomic64_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+	atomic64_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+	return atomic64_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+	return atomic64_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+	return atomic64_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+	return atomic64_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+	return atomic64_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+	return atomic64_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+	return atomic64_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+	return atomic64_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+	atomic64_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+	return atomic64_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+	return atomic64_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+	return atomic64_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+	return atomic64_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+	return atomic64_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+	return atomic64_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+	return atomic64_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+	return atomic64_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+	atomic64_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+	atomic64_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_andnot_relaxed(i, v);
+}
+
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+	atomic64_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_or_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_or_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_or_relaxed(i, v);
+}
+
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+	atomic64_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_xor_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_xor_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+	return atomic64_fetch_xor_relaxed(i, v);
+}
+
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+	return atomic64_xchg(v, i);
+}
+
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+	return atomic64_xchg_acquire(v, i);
+}
+
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+	return atomic64_xchg_release(v, i);
+}
+
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+	return atomic64_xchg_relaxed(v, i);
+}
+
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+	return atomic64_cmpxchg(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+	return atomic64_cmpxchg_acquire(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+	return atomic64_cmpxchg_release(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+	return atomic64_cmpxchg_relaxed(v, old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+	return atomic64_try_cmpxchg(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+	return atomic64_try_cmpxchg_acquire(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+	return atomic64_try_cmpxchg_release(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+	return atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+	return atomic64_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+	return atomic64_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+	return atomic64_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+	return atomic64_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+	return atomic64_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+	return atomic64_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+	return atomic64_inc_not_zero(v);
+}
+
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+	return atomic64_inc_unless_negative(v);
+}
+
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+	return atomic64_dec_unless_positive(v);
+}
+
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+	return atomic64_dec_if_positive(v);
+}
+
+#else /* CONFIG_64BIT */
+
+static inline long
+atomic_long_read(const atomic_long_t *v)
+{
+	return atomic_read(v);
+}
+
+static inline long
+atomic_long_read_acquire(const atomic_long_t *v)
+{
+	return atomic_read_acquire(v);
+}
+
+static inline void
+atomic_long_set(atomic_long_t *v, long i)
+{
+	atomic_set(v, i);
+}
+
+static inline void
+atomic_long_set_release(atomic_long_t *v, long i)
+{
+	atomic_set_release(v, i);
+}
+
+static inline void
+atomic_long_add(long i, atomic_long_t *v)
+{
+	atomic_add(i, v);
+}
+
+static inline long
+atomic_long_add_return(long i, atomic_long_t *v)
+{
+	return atomic_add_return(i, v);
+}
+
+static inline long
+atomic_long_add_return_acquire(long i, atomic_long_t *v)
+{
+	return atomic_add_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_add_return_release(long i, atomic_long_t *v)
+{
+	return atomic_add_return_release(i, v);
+}
+
+static inline long
+atomic_long_add_return_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_add_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_add(long i, atomic_long_t *v)
+{
+	return atomic_fetch_add(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_add_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_add_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_add_relaxed(i, v);
+}
+
+static inline void
+atomic_long_sub(long i, atomic_long_t *v)
+{
+	atomic_sub(i, v);
+}
+
+static inline long
+atomic_long_sub_return(long i, atomic_long_t *v)
+{
+	return atomic_sub_return(i, v);
+}
+
+static inline long
+atomic_long_sub_return_acquire(long i, atomic_long_t *v)
+{
+	return atomic_sub_return_acquire(i, v);
+}
+
+static inline long
+atomic_long_sub_return_release(long i, atomic_long_t *v)
+{
+	return atomic_sub_return_release(i, v);
+}
+
+static inline long
+atomic_long_sub_return_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_sub_return_relaxed(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub(long i, atomic_long_t *v)
+{
+	return atomic_fetch_sub(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_sub_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_sub_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_sub_relaxed(i, v);
+}
+
+static inline void
+atomic_long_inc(atomic_long_t *v)
+{
+	atomic_inc(v);
+}
+
+static inline long
+atomic_long_inc_return(atomic_long_t *v)
+{
+	return atomic_inc_return(v);
+}
+
+static inline long
+atomic_long_inc_return_acquire(atomic_long_t *v)
+{
+	return atomic_inc_return_acquire(v);
+}
+
+static inline long
+atomic_long_inc_return_release(atomic_long_t *v)
+{
+	return atomic_inc_return_release(v);
+}
+
+static inline long
+atomic_long_inc_return_relaxed(atomic_long_t *v)
+{
+	return atomic_inc_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_inc(atomic_long_t *v)
+{
+	return atomic_fetch_inc(v);
+}
+
+static inline long
+atomic_long_fetch_inc_acquire(atomic_long_t *v)
+{
+	return atomic_fetch_inc_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_inc_release(atomic_long_t *v)
+{
+	return atomic_fetch_inc_release(v);
+}
+
+static inline long
+atomic_long_fetch_inc_relaxed(atomic_long_t *v)
+{
+	return atomic_fetch_inc_relaxed(v);
+}
+
+static inline void
+atomic_long_dec(atomic_long_t *v)
+{
+	atomic_dec(v);
+}
+
+static inline long
+atomic_long_dec_return(atomic_long_t *v)
+{
+	return atomic_dec_return(v);
+}
+
+static inline long
+atomic_long_dec_return_acquire(atomic_long_t *v)
+{
+	return atomic_dec_return_acquire(v);
+}
+
+static inline long
+atomic_long_dec_return_release(atomic_long_t *v)
+{
+	return atomic_dec_return_release(v);
+}
+
+static inline long
+atomic_long_dec_return_relaxed(atomic_long_t *v)
+{
+	return atomic_dec_return_relaxed(v);
+}
+
+static inline long
+atomic_long_fetch_dec(atomic_long_t *v)
+{
+	return atomic_fetch_dec(v);
+}
+
+static inline long
+atomic_long_fetch_dec_acquire(atomic_long_t *v)
+{
+	return atomic_fetch_dec_acquire(v);
+}
+
+static inline long
+atomic_long_fetch_dec_release(atomic_long_t *v)
+{
+	return atomic_fetch_dec_release(v);
+}
+
+static inline long
+atomic_long_fetch_dec_relaxed(atomic_long_t *v)
+{
+	return atomic_fetch_dec_relaxed(v);
+}
+
+static inline void
+atomic_long_and(long i, atomic_long_t *v)
+{
+	atomic_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and(long i, atomic_long_t *v)
+{
+	return atomic_fetch_and(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_and_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_and_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_and_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_and_relaxed(i, v);
+}
+
+static inline void
+atomic_long_andnot(long i, atomic_long_t *v)
+{
+	atomic_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot(long i, atomic_long_t *v)
+{
+	return atomic_fetch_andnot(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_andnot_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_andnot_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_andnot_relaxed(i, v);
+}
+
+static inline void
+atomic_long_or(long i, atomic_long_t *v)
+{
+	atomic_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or(long i, atomic_long_t *v)
+{
+	return atomic_fetch_or(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_or_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_or_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_or_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_or_relaxed(i, v);
+}
+
+static inline void
+atomic_long_xor(long i, atomic_long_t *v)
+{
+	atomic_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor(long i, atomic_long_t *v)
+{
+	return atomic_fetch_xor(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_acquire(long i, atomic_long_t *v)
+{
+	return atomic_fetch_xor_acquire(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_release(long i, atomic_long_t *v)
+{
+	return atomic_fetch_xor_release(i, v);
+}
+
+static inline long
+atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
+{
+	return atomic_fetch_xor_relaxed(i, v);
+}
+
+static inline long
+atomic_long_xchg(atomic_long_t *v, long i)
+{
+	return atomic_xchg(v, i);
+}
+
+static inline long
+atomic_long_xchg_acquire(atomic_long_t *v, long i)
+{
+	return atomic_xchg_acquire(v, i);
+}
+
+static inline long
+atomic_long_xchg_release(atomic_long_t *v, long i)
+{
+	return atomic_xchg_release(v, i);
+}
+
+static inline long
+atomic_long_xchg_relaxed(atomic_long_t *v, long i)
+{
+	return atomic_xchg_relaxed(v, i);
+}
+
+static inline long
+atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
+{
+	return atomic_cmpxchg(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
+{
+	return atomic_cmpxchg_acquire(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
+{
+	return atomic_cmpxchg_release(v, old, new);
+}
+
+static inline long
+atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
+{
+	return atomic_cmpxchg_relaxed(v, old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
+{
+	return atomic_try_cmpxchg(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
+{
+	return atomic_try_cmpxchg_acquire(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
+{
+	return atomic_try_cmpxchg_release(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new)
+{
+	return atomic_try_cmpxchg_relaxed(v, (int *)old, new);
+}
+
+static inline bool
+atomic_long_sub_and_test(long i, atomic_long_t *v)
+{
+	return atomic_sub_and_test(i, v);
+}
+
+static inline bool
+atomic_long_dec_and_test(atomic_long_t *v)
+{
+	return atomic_dec_and_test(v);
+}
+
+static inline bool
+atomic_long_inc_and_test(atomic_long_t *v)
+{
+	return atomic_inc_and_test(v);
+}
+
+static inline bool
+atomic_long_add_negative(long i, atomic_long_t *v)
+{
+	return atomic_add_negative(i, v);
+}
+
+static inline long
+atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
+{
+	return atomic_fetch_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_add_unless(atomic_long_t *v, long a, long u)
+{
+	return atomic_add_unless(v, a, u);
+}
+
+static inline bool
+atomic_long_inc_not_zero(atomic_long_t *v)
+{
+	return atomic_inc_not_zero(v);
+}
+
+static inline bool
+atomic_long_inc_unless_negative(atomic_long_t *v)
+{
+	return atomic_inc_unless_negative(v);
+}
+
+static inline bool
+atomic_long_dec_unless_positive(atomic_long_t *v)
+{
+	return atomic_dec_unless_positive(v);
+}
+
+static inline long
+atomic_long_dec_if_positive(atomic_long_t *v)
+{
+	return atomic_dec_if_positive(v);
+}
+
+#endif /* CONFIG_64BIT */
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
+// 77558968132ce4f911ad53f6f52ce423006f6268
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 13324aa..286867f 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,14 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Generic C implementation of atomic counter operations. Usable on
  * UP systems only. Do not include in machine independent code.
  *
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
  */
 #ifndef __ASM_GENERIC_ATOMIC_H
 #define __ASM_GENERIC_ATOMIC_H
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 97b28b7..370f01d 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -1,37 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Generic implementation of 64-bit atomics using spinlocks,
  * useful on processors that don't have 64-bit atomic instructions.
  *
  * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 #ifndef _ASM_GENERIC_ATOMIC64_H
 #define _ASM_GENERIC_ATOMIC64_H
 #include <linux/types.h>
 
 typedef struct {
-	long long counter;
+	s64 counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i)	{ (i) }
 
-extern long long atomic64_read(const atomic64_t *v);
-extern void	 atomic64_set(atomic64_t *v, long long i);
+extern s64 atomic64_read(const atomic64_t *v);
+extern void atomic64_set(atomic64_t *v, s64 i);
 
 #define atomic64_set_release(v, i)	atomic64_set((v), (i))
 
 #define ATOMIC64_OP(op)							\
-extern void	 atomic64_##op(long long a, atomic64_t *v);
+extern void	 atomic64_##op(s64 a, atomic64_t *v);
 
 #define ATOMIC64_OP_RETURN(op)						\
-extern long long atomic64_##op##_return(long long a, atomic64_t *v);
+extern s64 atomic64_##op##_return(s64 a, atomic64_t *v);
 
 #define ATOMIC64_FETCH_OP(op)						\
-extern long long atomic64_fetch_##op(long long a, atomic64_t *v);
+extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v);
 
 #define ATOMIC64_OPS(op)	ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
 
@@ -50,11 +46,11 @@
 #undef ATOMIC64_OP_RETURN
 #undef ATOMIC64_OP
 
-extern long long atomic64_dec_if_positive(atomic64_t *v);
+extern s64 atomic64_dec_if_positive(atomic64_t *v);
 #define atomic64_dec_if_positive atomic64_dec_if_positive
-extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
-extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
+extern s64 atomic64_xchg(atomic64_t *v, s64 new);
+extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
 
 #endif  /*  _ASM_GENERIC_ATOMIC64_H  */
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 2cafdbb..85b28eb 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Generic barrier definitions.
  *
@@ -6,11 +7,6 @@
  *
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
  */
 #ifndef __ASM_GENERIC_BARRIER_H
 #define __ASM_GENERIC_BARRIER_H
diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
new file mode 100644
index 0000000..ddd1c6d
--- /dev/null
+++ b/include/asm-generic/bitops-instrumented.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for bit
+ * operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
+
+#include <linux/kasan-checks.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_set_bit(nr, addr);
+}
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___set_bit(nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ */
+static inline void clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_clear_bit(nr, addr);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___clear_bit(nr, addr);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_clear_bit_unlock(nr, addr);
+}
+
+/**
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a non-atomic operation but implies a release barrier before the
+ * memory operation. It can be used for an unlock if no other CPUs can
+ * concurrently modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___clear_bit_unlock(nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch_change_bit(nr, addr);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	arch___change_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_set_bit(nr, addr);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_set_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_set_bit_lock(nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_clear_bit(nr, addr);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_clear_bit(nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_and_change_bit(nr, addr);
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch___test_and_change_bit(nr, addr);
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline bool test_bit(long nr, const volatile unsigned long *addr)
+{
+	kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+	return arch_test_bit(nr, addr);
+}
+
+#if defined(arch_clear_bit_unlock_is_negative_byte)
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ *                                     byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+static inline bool
+clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+	kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+	return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+}
+/* Let everybody know we have it. */
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
diff --git a/include/asm-generic/bitops/builtin-fls.h b/include/asm-generic/bitops/builtin-fls.h
index 62daf94..c8455cc 100644
--- a/include/asm-generic/bitops/builtin-fls.h
+++ b/include/asm-generic/bitops/builtin-fls.h
@@ -9,7 +9,7 @@
  * This is defined the same way as ffs.
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
-static __always_inline int fls(int x)
+static __always_inline int fls(unsigned int x)
 {
 	return x ? sizeof(x) * 8 - __builtin_clz(x) : 0;
 }
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
index 753aeca..b168bb1 100644
--- a/include/asm-generic/bitops/fls.h
+++ b/include/asm-generic/bitops/fls.h
@@ -10,7 +10,7 @@
  * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
  */
 
-static __always_inline int fls(int x)
+static __always_inline int fls(unsigned int x)
 {
 	int r = 32;
 
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 20561a6..384b5c8 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -10,6 +10,7 @@
 #define BUGFLAG_WARNING		(1 << 0)
 #define BUGFLAG_ONCE		(1 << 1)
 #define BUGFLAG_DONE		(1 << 2)
+#define BUGFLAG_NO_CUT_HERE	(1 << 3)	/* CUT_HERE already sent */
 #define BUGFLAG_TAINT(taint)	((taint) << 8)
 #define BUG_GET_TAINT(bug)	((bug)->flags >> 8)
 #endif
@@ -61,18 +62,6 @@
 #define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
 #endif
 
-#ifdef __WARN_FLAGS
-#define __WARN_TAINT(taint)		__WARN_FLAGS(BUGFLAG_TAINT(taint))
-#define __WARN_ONCE_TAINT(taint)	__WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
-
-#define WARN_ON_ONCE(condition) ({				\
-	int __ret_warn_on = !!(condition);			\
-	if (unlikely(__ret_warn_on))				\
-		__WARN_ONCE_TAINT(TAINT_WARN);			\
-	unlikely(__ret_warn_on);				\
-})
-#endif
-
 /*
  * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
  * significant kernel issues that need prompt attention if they should ever
@@ -89,25 +78,27 @@
  *
  * Use the versions with printk format strings to provide better diagnostics.
  */
-#ifndef __WARN_TAINT
-extern __printf(3, 4)
-void warn_slowpath_fmt(const char *file, const int line,
-		       const char *fmt, ...);
+#ifndef __WARN_FLAGS
 extern __printf(4, 5)
-void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
-			     const char *fmt, ...);
-extern void warn_slowpath_null(const char *file, const int line);
-#define WANT_WARN_ON_SLOWPATH
-#define __WARN()		warn_slowpath_null(__FILE__, __LINE__)
-#define __WARN_printf(arg...)	warn_slowpath_fmt(__FILE__, __LINE__, arg)
-#define __WARN_printf_taint(taint, arg...)				\
-	warn_slowpath_fmt_taint(__FILE__, __LINE__, taint, arg)
+void warn_slowpath_fmt(const char *file, const int line, unsigned taint,
+		       const char *fmt, ...);
+#define __WARN()		__WARN_printf(TAINT_WARN, NULL)
+#define __WARN_printf(taint, arg...)					\
+	warn_slowpath_fmt(__FILE__, __LINE__, taint, arg)
 #else
 extern __printf(1, 2) void __warn_printk(const char *fmt, ...);
-#define __WARN()		__WARN_TAINT(TAINT_WARN)
-#define __WARN_printf(arg...)	do { __warn_printk(arg); __WARN(); } while (0)
-#define __WARN_printf_taint(taint, arg...)				\
-	do { __warn_printk(arg); __WARN_TAINT(taint); } while (0)
+#define __WARN()		__WARN_FLAGS(BUGFLAG_TAINT(TAINT_WARN))
+#define __WARN_printf(taint, arg...) do {				\
+		__warn_printk(arg);					\
+		__WARN_FLAGS(BUGFLAG_NO_CUT_HERE | BUGFLAG_TAINT(taint));\
+	} while (0)
+#define WARN_ON_ONCE(condition) ({				\
+	int __ret_warn_on = !!(condition);			\
+	if (unlikely(__ret_warn_on))				\
+		__WARN_FLAGS(BUGFLAG_ONCE |			\
+			     BUGFLAG_TAINT(TAINT_WARN));	\
+	unlikely(__ret_warn_on);				\
+})
 #endif
 
 /* used internally by panic.c */
@@ -130,7 +121,7 @@
 #define WARN(condition, format...) ({					\
 	int __ret_warn_on = !!(condition);				\
 	if (unlikely(__ret_warn_on))					\
-		__WARN_printf(format);					\
+		__WARN_printf(TAINT_WARN, format);			\
 	unlikely(__ret_warn_on);					\
 })
 #endif
@@ -138,7 +129,7 @@
 #define WARN_TAINT(condition, taint, format...) ({			\
 	int __ret_warn_on = !!(condition);				\
 	if (unlikely(__ret_warn_on))					\
-		__WARN_printf_taint(taint, format);			\
+		__WARN_printf(taint, format);				\
 	unlikely(__ret_warn_on);					\
 })
 
@@ -183,7 +174,7 @@
 #endif
 
 #ifndef HAVE_ARCH_BUG_ON
-#define BUG_ON(condition) do { if (condition) BUG(); } while (0)
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
 #endif
 
 #ifndef HAVE_ARCH_WARN_ON
@@ -211,9 +202,6 @@
 /*
  * WARN_ON_SMP() is for cases that the warning is either
  * meaningless for !SMP or may even cause failures.
- * This is usually used for cases that we have
- * WARN_ON(!spin_is_locked(&lock)) checks, as spin_is_locked()
- * returns 0 for uniprocessor settings.
  * It can also be used with values that are only defined
  * on SMP:
  *
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 0dd47a6..a950a22 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -5,24 +5,70 @@
 /* Keep includes the same across arches.  */
 #include <linux/mm.h>
 
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+
 /*
  * The cache doesn't need to be flushed when TLB entries change when
  * the cache is mapped to physical memory, not virtual memory
  */
-#define flush_cache_all()			do { } while (0)
-#define flush_cache_mm(mm)			do { } while (0)
-#define flush_cache_dup_mm(mm)			do { } while (0)
-#define flush_cache_range(vma, start, end)	do { } while (0)
-#define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)			do { } while (0)
-#define flush_dcache_mmap_lock(mapping)		do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)	do { } while (0)
-#define flush_icache_range(start, end)		do { } while (0)
-#define flush_icache_page(vma,pg)		do { } while (0)
-#define flush_icache_user_range(vma,pg,adr,len)	do { } while (0)
-#define flush_cache_vmap(start, end)		do { } while (0)
-#define flush_cache_vunmap(start, end)		do { } while (0)
+static inline void flush_cache_all(void)
+{
+}
+
+static inline void flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_cache_dup_mm(struct mm_struct *mm)
+{
+}
+
+static inline void flush_cache_range(struct vm_area_struct *vma,
+				     unsigned long start,
+				     unsigned long end)
+{
+}
+
+static inline void flush_cache_page(struct vm_area_struct *vma,
+				    unsigned long vmaddr,
+				    unsigned long pfn)
+{
+}
+
+static inline void flush_dcache_page(struct page *page)
+{
+}
+
+static inline void flush_dcache_mmap_lock(struct address_space *mapping)
+{
+}
+
+static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
+{
+}
+
+static inline void flush_icache_range(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+				     struct page *page)
+{
+}
+
+static inline void flush_icache_user_range(struct vm_area_struct *vma,
+					   struct page *page,
+					   unsigned long addr, int len)
+{
+}
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+}
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+}
 
 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
 	do { \
diff --git a/include/asm-generic/compat.h b/include/asm-generic/compat.h
index 2881945..a86f65b 100644
--- a/include/asm-generic/compat.h
+++ b/include/asm-generic/compat.h
@@ -1,3 +1,25 @@
 /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_COMPAT_H
+#define __ASM_GENERIC_COMPAT_H
 
-/* This is an empty stub for 32-bit-only architectures */
+/* These types are common across all compat ABIs */
+typedef u32 compat_size_t;
+typedef s32 compat_ssize_t;
+typedef s32 compat_clock_t;
+typedef s32 compat_pid_t;
+typedef u32 compat_ino_t;
+typedef s32 compat_off_t;
+typedef s64 compat_loff_t;
+typedef s32 compat_daddr_t;
+typedef s32 compat_timer_t;
+typedef s32 compat_key_t;
+typedef s16 compat_short_t;
+typedef s32 compat_int_t;
+typedef s32 compat_long_t;
+typedef u16 compat_ushort_t;
+typedef u32 compat_uint_t;
+typedef u32 compat_ulong_t;
+typedef u32 compat_uptr_t;
+typedef u32 compat_aio_context_t;
+
+#endif
diff --git a/include/asm-generic/device.h b/include/asm-generic/device.h
index d7c76bb..974517c 100644
--- a/include/asm-generic/device.h
+++ b/include/asm-generic/device.h
@@ -1,7 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Arch specific extensions to struct device
- *
- * This file is released under the GPLv2
  */
 #ifndef _ASM_GENERIC_DEVICE_H
 #define _ASM_GENERIC_DEVICE_H
diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h
index dc9726f..a3b98c8 100644
--- a/include/asm-generic/div64.h
+++ b/include/asm-generic/div64.h
@@ -28,12 +28,12 @@
 
 /**
  * do_div - returns 2 values: calculate remainder and update new dividend
- * @n: pointer to uint64_t dividend (will be updated)
+ * @n: uint64_t dividend (will be updated)
  * @base: uint32_t divisor
  *
  * Summary:
- * ``uint32_t remainder = *n % base;``
- * ``*n = *n / base;``
+ * ``uint32_t remainder = n % base;``
+ * ``n = n / base;``
  *
  * Return: (uint32_t)remainder
  *
@@ -178,7 +178,8 @@
 	uint32_t m_hi = m >> 32;
 	uint32_t n_lo = n;
 	uint32_t n_hi = n >> 32;
-	uint64_t res, tmp;
+	uint64_t res;
+	uint32_t res_lo, res_hi, tmp;
 
 	if (!bias) {
 		res = ((uint64_t)m_lo * n_lo) >> 32;
@@ -187,8 +188,9 @@
 		res = (m + (uint64_t)m_lo * n_lo) >> 32;
 	} else {
 		res = m + (uint64_t)m_lo * n_lo;
-		tmp = (res < m) ? (1ULL << 32) : 0;
-		res = (res >> 32) + tmp;
+		res_lo = res >> 32;
+		res_hi = (res_lo < m_hi);
+		res = res_lo | ((uint64_t)res_hi << 32);
 	}
 
 	if (!(m & ((1ULL << 63) | (1ULL << 31)))) {
@@ -197,10 +199,12 @@
 		res += (uint64_t)m_hi * n_lo;
 		res >>= 32;
 	} else {
-		tmp = res += (uint64_t)m_lo * n_hi;
+		res += (uint64_t)m_lo * n_hi;
+		tmp = res >> 32;
 		res += (uint64_t)m_hi * n_lo;
-		tmp = (res < tmp) ? (1ULL << 32) : 0;
-		res = (res >> 32) + tmp;
+		res_lo = res >> 32;
+		res_hi = (res_lo < tmp);
+		res = res_lo | ((uint64_t)res_hi << 32);
 	}
 
 	res += (uint64_t)m_hi * n_hi;
diff --git a/include/asm-generic/dma-mapping.h b/include/asm-generic/dma-mapping.h
index ad28682..c13f461 100644
--- a/include/asm-generic/dma-mapping.h
+++ b/include/asm-generic/dma-mapping.h
@@ -4,16 +4,7 @@
 
 static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
 {
-	/*
-	 * Use the non-coherent ops if available.  If an architecture wants a
-	 * more fine-grained selection of operations it will have to implement
-	 * get_arch_dma_ops itself or use the per-device dma_ops.
-	 */
-#ifdef CONFIG_DMA_NONCOHERENT_OPS
-	return &dma_noncoherent_ops;
-#else
-	return &dma_direct_ops;
-#endif
+	return NULL;
 }
 
 #endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h
index 296c654..80ca610 100644
--- a/include/asm-generic/error-injection.h
+++ b/include/asm-generic/error-injection.h
@@ -8,6 +8,7 @@
 	EI_ETYPE_NULL,		/* Return NULL if failure */
 	EI_ETYPE_ERRNO,		/* Return -ERRNO if failure */
 	EI_ETYPE_ERRNO_NULL,	/* Return -ERRNO or NULL if failure */
+	EI_ETYPE_TRUE,		/* Return true if failure */
 };
 
 struct error_injection_entry {
@@ -15,6 +16,8 @@
 	int		etype;
 };
 
+struct pt_regs;
+
 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
 /*
  * Whitelist ganerating macro. Specify functions which can be
@@ -27,8 +30,12 @@
 		.addr = (unsigned long)fname,				\
 		.etype = EI_ETYPE_##_etype,				\
 	};
+
+void override_function_with_return(struct pt_regs *regs);
 #else
 #define ALLOW_ERROR_INJECTION(fname, _etype)
+
+static inline void override_function_with_return(struct pt_regs *regs) { }
 #endif
 #endif
 
diff --git a/include/asm-generic/exec.h b/include/asm-generic/exec.h
index 32c0a21..f66dc71 100644
--- a/include/asm-generic/exec.h
+++ b/include/asm-generic/exec.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* Generic process execution definitions.
  *
  * It should be possible to use these on really simple architectures,
@@ -5,11 +6,6 @@
  *
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
  */
 #ifndef __ASM_GENERIC_EXEC_H
 #define __ASM_GENERIC_EXEC_H
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 4d73e6e..fa57797 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -4,14 +4,12 @@
 #ifndef KSYM_FUNC
 #define KSYM_FUNC(x) x
 #endif
-#ifdef CONFIG_64BIT
-#ifndef KSYM_ALIGN
-#define KSYM_ALIGN 8
-#endif
-#else
-#ifndef KSYM_ALIGN
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 #define KSYM_ALIGN 4
-#endif
+#elif defined(CONFIG_64BIT)
+#define KSYM_ALIGN 8
+#else
+#define KSYM_ALIGN 4
 #endif
 #ifndef KCRC_ALIGN
 #define KCRC_ALIGN 4
@@ -19,11 +17,11 @@
 
 .macro __put, val, name
 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
-	.long	\val - ., \name - .
+	.long	\val - ., \name - ., 0
 #elif defined(CONFIG_64BIT)
-	.quad	\val, \name
+	.quad	\val, \name, 0
 #else
-	.long	\val, \name
+	.long	\val, \name, 0
 #endif
 .endm
 
@@ -57,18 +55,20 @@
 #endif
 #endif
 .endm
-#undef __put
 
-#if defined(__KSYM_DEPS__)
-
-#define __EXPORT_SYMBOL(sym, val, sec)	=== __KSYM_##sym ===
-
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#if defined(CONFIG_TRIM_UNUSED_KSYMS)
 
 #include <linux/kconfig.h>
 #include <generated/autoksyms.h>
 
+.macro __ksym_marker sym
+	.section ".discard.ksym","a"
+__ksym_marker_\sym:
+	 .previous
+.endm
+
 #define __EXPORT_SYMBOL(sym, val, sec)				\
+	__ksym_marker sym;					\
 	__cond_export_sym(sym, val, sec, __is_defined(__KSYM_##sym))
 #define __cond_export_sym(sym, val, sec, conf)			\
 	___cond_export_sym(sym, val, sec, conf)
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
index 827e4d3..8cc7b09 100644
--- a/include/asm-generic/fixmap.h
+++ b/include/asm-generic/fixmap.h
@@ -16,6 +16,7 @@
 #define __ASM_GENERIC_FIXMAP_H
 
 #include <linux/bug.h>
+#include <linux/mm_types.h>
 
 #define __fix_to_virt(x)	(FIXADDR_TOP - ((x) << PAGE_SHIFT))
 #define __virt_to_fix(x)	((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
diff --git a/include/asm-generic/flat.h b/include/asm-generic/flat.h
new file mode 100644
index 0000000..1928a35
--- /dev/null
+++ b/include/asm-generic/flat.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_GENERIC_FLAT_H
+#define _ASM_GENERIC_FLAT_H
+
+#include <linux/uaccess.h>
+
+static inline int flat_get_addr_from_rp(u32 __user *rp, u32 relval, u32 flags,
+		u32 *addr)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	return copy_from_user(addr, rp, 4) ? -EFAULT : 0;
+#else
+	return get_user(*addr, rp);
+#endif
+}
+
+static inline int flat_put_addr_at_rp(u32 __user *rp, u32 addr, u32 rel)
+{
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+	return copy_to_user(rp, &addr, 4) ? -EFAULT : 0;
+#else
+	return put_user(addr, rp);
+#endif
+}
+
+#endif /* _ASM_GENERIC_FLAT_H */
diff --git a/include/asm-generic/ftrace.h b/include/asm-generic/ftrace.h
index 51abba9..3a23028 100644
--- a/include/asm-generic/ftrace.h
+++ b/include/asm-generic/ftrace.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * linux/include/asm-generic/ftrace.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #ifndef __ASM_GENERIC_FTRACE_H__
 #define __ASM_GENERIC_FTRACE_H__
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index fcb61b4..02970b1 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -23,7 +23,9 @@
  *
  * Return:
  * 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Operation not supported
  */
 static inline int
 arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
@@ -85,7 +87,9 @@
  *
  * Return:
  * 0 - On success
- * <0 - On error
+ * -EFAULT - User access resulted in a page fault
+ * -EAGAIN - Atomic operation was unable to complete due to contention
+ * -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
  */
 static inline int
 futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
@@ -114,26 +118,7 @@
 static inline int
 arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
 {
-	int oldval = 0, ret;
-
-	pagefault_disable();
-
-	switch (op) {
-	case FUTEX_OP_SET:
-	case FUTEX_OP_ADD:
-	case FUTEX_OP_OR:
-	case FUTEX_OP_ANDN:
-	case FUTEX_OP_XOR:
-	default:
-		ret = -ENOSYS;
-	}
-
-	pagefault_enable();
-
-	if (!ret)
-		*oval = oldval;
-
-	return ret;
+	return -ENOSYS;
 }
 
 static inline int
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7..e9f20b8 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
 #include <linux/compiler.h>
 #include <linux/log2.h>
 
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
-	int order;
-
-	size--;
-	size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
-	order = fls(size);
-#else
-	order = fls64(size);
-#endif
-	return order;
-}
-
 /**
  * get_order - Determine the allocation order of a memory size
  * @size: The size for which to get the order
@@ -43,19 +25,27 @@
  * to hold an object of the specified size.
  *
  * The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
  */
-#define get_order(n)						\
-(								\
-	__builtin_constant_p(n) ? (				\
-		((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT :	\
-		(((n) < (1UL << PAGE_SHIFT)) ? 0 :		\
-		 ilog2((n) - 1) - PAGE_SHIFT + 1)		\
-	) :							\
-	__get_order(n)						\
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+	if (__builtin_constant_p(size)) {
+		if (!size)
+			return BITS_PER_LONG - PAGE_SHIFT;
+
+		if (size < (1UL << PAGE_SHIFT))
+			return 0;
+
+		return ilog2((size) - 1) - PAGE_SHIFT + 1;
+	}
+
+	size--;
+	size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+	return fls(size);
+#else
+	return fls64(size);
+#endif
+}
 
 #endif	/* __ASSEMBLY__ */
 
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 9d0cde8..822f433 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -32,7 +32,7 @@
 	return pte_modify(pte, newprot);
 }
 
-#ifndef huge_pte_clear
+#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR
 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
 		    pte_t *ptep, unsigned long sz)
 {
@@ -40,4 +40,97 @@
 }
 #endif
 
+#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+		unsigned long addr, unsigned long end,
+		unsigned long floor, unsigned long ceiling)
+{
+	free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+		pte_t *ptep, pte_t pte)
+{
+	set_pte_at(mm, addr, ptep, pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	return ptep_get_and_clear(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+		unsigned long addr, pte_t *ptep)
+{
+	ptep_clear_flush(vma, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_NONE
+static inline int huge_pte_none(pte_t pte)
+{
+	return pte_none(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+	return pte_wrprotect(pte);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
+static inline int prepare_hugepage_range(struct file *file,
+		unsigned long addr, unsigned long len)
+{
+	struct hstate *h = hstate_file(file);
+
+	if (len & ~huge_page_mask(h))
+		return -EINVAL;
+	if (addr & ~huge_page_mask(h))
+		return -EINVAL;
+
+	return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+		unsigned long addr, pte_t *ptep)
+{
+	ptep_set_wrprotect(mm, addr, ptep);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+		unsigned long addr, pte_t *ptep,
+		pte_t pte, int dirty)
+{
+	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+#endif
+
+#ifndef __HAVE_ARCH_HUGE_PTEP_GET
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+#endif
+
+#ifndef __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED
+static inline bool gigantic_page_runtime_supported(void)
+{
+	return IS_ENABLED(CONFIG_ARCH_HAS_GIGANTIC_PAGE);
+}
+#endif /* __HAVE_ARCH_GIGANTIC_PAGE_RUNTIME_SUPPORTED */
+
 #endif /* _ASM_GENERIC_HUGETLB_H */
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d356f80..d028065 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* Generic I/O port emulation.
  *
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
  */
 #ifndef __ASM_GENERIC_IO_H
 #define __ASM_GENERIC_IO_H
@@ -19,12 +15,9 @@
 #include <asm-generic/iomap.h>
 #endif
 
+#include <asm/mmiowb.h>
 #include <asm-generic/pci_iomap.h>
 
-#ifndef mmiowb
-#define mmiowb() do {} while (0)
-#endif
-
 #ifndef __io_br
 #define __io_br()      barrier()
 #endif
@@ -32,9 +25,9 @@
 /* prevent prefetching of coherent DMA data ahead of a dma-complete */
 #ifndef __io_ar
 #ifdef rmb
-#define __io_ar()      rmb()
+#define __io_ar(v)      rmb()
 #else
-#define __io_ar()      barrier()
+#define __io_ar(v)      barrier()
 #endif
 #endif
 
@@ -49,7 +42,7 @@
 
 /* serialize device access against a spin_unlock, usually handled there. */
 #ifndef __io_aw
-#define __io_aw()      barrier()
+#define __io_aw()      mmiowb_set_pending()
 #endif
 
 #ifndef __io_pbw
@@ -65,7 +58,7 @@
 #endif
 
 #ifndef __io_par
-#define __io_par()     __io_ar()
+#define __io_par(v)     __io_ar(v)
 #endif
 
 
@@ -158,7 +151,7 @@
 
 	__io_br();
 	val = __raw_readb(addr);
-	__io_ar();
+	__io_ar(val);
 	return val;
 }
 #endif
@@ -171,7 +164,7 @@
 
 	__io_br();
 	val = __le16_to_cpu(__raw_readw(addr));
-	__io_ar();
+	__io_ar(val);
 	return val;
 }
 #endif
@@ -184,7 +177,7 @@
 
 	__io_br();
 	val = __le32_to_cpu(__raw_readl(addr));
-	__io_ar();
+	__io_ar(val);
 	return val;
 }
 #endif
@@ -198,7 +191,7 @@
 
 	__io_br();
 	val = __le64_to_cpu(__raw_readq(addr));
-	__io_ar();
+	__io_ar(val);
 	return val;
 }
 #endif
@@ -471,7 +464,7 @@
 
 	__io_pbr();
 	val = __raw_readb(PCI_IOBASE + addr);
-	__io_par();
+	__io_par(val);
 	return val;
 }
 #endif
@@ -484,7 +477,7 @@
 
 	__io_pbr();
 	val = __le16_to_cpu(__raw_readw(PCI_IOBASE + addr));
-	__io_par();
+	__io_par(val);
 	return val;
 }
 #endif
@@ -497,7 +490,7 @@
 
 	__io_pbr();
 	val = __le32_to_cpu(__raw_readl(PCI_IOBASE + addr));
-	__io_par();
+	__io_par(val);
 	return val;
 }
 #endif
@@ -970,15 +963,6 @@
 }
 #endif
 
-#ifndef __ioremap
-#define __ioremap __ioremap
-static inline void __iomem *__ioremap(phys_addr_t offset, size_t size,
-				      unsigned long flags)
-{
-	return ioremap(offset, size);
-}
-#endif
-
 #ifndef iounmap
 #define iounmap iounmap
 
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 5b63b94..a008f50 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -36,6 +36,17 @@
 extern u64 ioread64be(void __iomem *);
 #endif
 
+#ifdef readq
+#define ioread64_lo_hi ioread64_lo_hi
+#define ioread64_hi_lo ioread64_hi_lo
+#define ioread64be_lo_hi ioread64be_lo_hi
+#define ioread64be_hi_lo ioread64be_hi_lo
+extern u64 ioread64_lo_hi(void __iomem *addr);
+extern u64 ioread64_hi_lo(void __iomem *addr);
+extern u64 ioread64be_lo_hi(void __iomem *addr);
+extern u64 ioread64be_hi_lo(void __iomem *addr);
+#endif
+
 extern void iowrite8(u8, void __iomem *);
 extern void iowrite16(u16, void __iomem *);
 extern void iowrite16be(u16, void __iomem *);
@@ -46,6 +57,17 @@
 extern void iowrite64be(u64, void __iomem *);
 #endif
 
+#ifdef writeq
+#define iowrite64_lo_hi iowrite64_lo_hi
+#define iowrite64_hi_lo iowrite64_hi_lo
+#define iowrite64be_lo_hi iowrite64be_lo_hi
+#define iowrite64be_hi_lo iowrite64be_hi_lo
+extern void iowrite64_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64_hi_lo(u64 val, void __iomem *addr);
+extern void iowrite64be_lo_hi(u64 val, void __iomem *addr);
+extern void iowrite64be_hi_lo(u64 val, void __iomem *addr);
+#endif
+
 /*
  * "string" versions of the above. Note that they
  * use native byte ordering for the accesses (on
diff --git a/include/asm-generic/irq_regs.h b/include/asm-generic/irq_regs.h
index 6bf9355..2e7c6e8 100644
--- a/include/asm-generic/irq_regs.h
+++ b/include/asm-generic/irq_regs.h
@@ -1,12 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* Fallback per-CPU frame pointer holder
  *
  * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 
 #ifndef _ASM_GENERIC_IRQ_REGS_H
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 8ac4e68..6736ed2 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -18,7 +18,6 @@
 }
 
 static inline void arch_unmap(struct mm_struct *mm,
-			struct vm_area_struct *vma,
 			unsigned long start, unsigned long end)
 {
 }
diff --git a/include/asm-generic/mmiowb.h b/include/asm-generic/mmiowb.h
new file mode 100644
index 0000000..9439ff0
--- /dev/null
+++ b/include/asm-generic/mmiowb.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_H
+#define __ASM_GENERIC_MMIOWB_H
+
+/*
+ * Generic implementation of mmiowb() tracking for spinlocks.
+ *
+ * If your architecture doesn't ensure that writes to an I/O peripheral
+ * within two spinlocked sections on two different CPUs are seen by the
+ * peripheral in the order corresponding to the lock handover, then you
+ * need to follow these FIVE easy steps:
+ *
+ * 	1. Implement mmiowb() (and arch_mmiowb_state() if you're fancy)
+ *	   in asm/mmiowb.h, then #include this file
+ *	2. Ensure your I/O write accessors call mmiowb_set_pending()
+ *	3. Select ARCH_HAS_MMIOWB
+ *	4. Untangle the resulting mess of header files
+ *	5. Complain to your architects
+ */
+#ifdef CONFIG_MMIOWB
+
+#include <linux/compiler.h>
+#include <asm-generic/mmiowb_types.h>
+
+#ifndef arch_mmiowb_state
+#include <asm/percpu.h>
+#include <asm/smp.h>
+
+DECLARE_PER_CPU(struct mmiowb_state, __mmiowb_state);
+#define __mmiowb_state()	this_cpu_ptr(&__mmiowb_state)
+#else
+#define __mmiowb_state()	arch_mmiowb_state()
+#endif	/* arch_mmiowb_state */
+
+static inline void mmiowb_set_pending(void)
+{
+	struct mmiowb_state *ms = __mmiowb_state();
+	ms->mmiowb_pending = ms->nesting_count;
+}
+
+static inline void mmiowb_spin_lock(void)
+{
+	struct mmiowb_state *ms = __mmiowb_state();
+	ms->nesting_count++;
+}
+
+static inline void mmiowb_spin_unlock(void)
+{
+	struct mmiowb_state *ms = __mmiowb_state();
+
+	if (unlikely(ms->mmiowb_pending)) {
+		ms->mmiowb_pending = 0;
+		mmiowb();
+	}
+
+	ms->nesting_count--;
+}
+#else
+#define mmiowb_set_pending()		do { } while (0)
+#define mmiowb_spin_lock()		do { } while (0)
+#define mmiowb_spin_unlock()		do { } while (0)
+#endif	/* CONFIG_MMIOWB */
+#endif	/* __ASM_GENERIC_MMIOWB_H */
diff --git a/include/asm-generic/mmiowb_types.h b/include/asm-generic/mmiowb_types.h
new file mode 100644
index 0000000..8eb0095
--- /dev/null
+++ b/include/asm-generic/mmiowb_types.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_MMIOWB_TYPES_H
+#define __ASM_GENERIC_MMIOWB_TYPES_H
+
+#include <linux/types.h>
+
+struct mmiowb_state {
+	u16	nesting_count;
+	u16	mmiowb_pending;
+};
+
+#endif	/* __ASM_GENERIC_MMIOWB_TYPES_H */
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
new file mode 100644
index 0000000..18d8e2d
--- /dev/null
+++ b/include/asm-generic/mshyperv.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Linux-specific definitions for managing interactions with Microsoft's
+ * Hyper-V hypervisor. The definitions in this file are architecture
+ * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
+ * that are specific to architecture <arch>.
+ *
+ * Definitions that are specified in the Hyper-V Top Level Functional
+ * Spec (TLFS) should not go in this file, but should instead go in
+ * hyperv-tlfs.h.
+ *
+ * Copyright (C) 2019, Microsoft, Inc.
+ *
+ * Author : Michael Kelley <mikelley@microsoft.com>
+ */
+
+#ifndef _ASM_GENERIC_MSHYPERV_H
+#define _ASM_GENERIC_MSHYPERV_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/cpumask.h>
+#include <asm/ptrace.h>
+#include <asm/hyperv-tlfs.h>
+
+struct ms_hyperv_info {
+	u32 features;
+	u32 misc_features;
+	u32 hints;
+	u32 nested_features;
+	u32 max_vp_index;
+	u32 max_lp_index;
+};
+extern struct ms_hyperv_info ms_hyperv;
+
+extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
+extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
+
+
+/* Generate the guest OS identifier as described in the Hyper-V TLFS */
+static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
+				       __u64 d_info2)
+{
+	__u64 guest_id = 0;
+
+	guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
+	guest_id |= (d_info1 << 48);
+	guest_id |= (kernel_version << 16);
+	guest_id |= d_info2;
+
+	return guest_id;
+}
+
+
+/* Free the message slot and signal end-of-message if required */
+static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
+{
+	/*
+	 * On crash we're reading some other CPU's message page and we need
+	 * to be careful: this other CPU may already had cleared the header
+	 * and the host may already had delivered some other message there.
+	 * In case we blindly write msg->header.message_type we're going
+	 * to lose it. We can still lose a message of the same type but
+	 * we count on the fact that there can only be one
+	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
+	 * on crash.
+	 */
+	if (cmpxchg(&msg->header.message_type, old_msg_type,
+		    HVMSG_NONE) != old_msg_type)
+		return;
+
+	/*
+	 * The cmxchg() above does an implicit memory barrier to
+	 * ensure the write to MessageType (ie set to
+	 * HVMSG_NONE) happens before we read the
+	 * MessagePending and EOMing. Otherwise, the EOMing
+	 * will not deliver any more messages since there is
+	 * no empty slot
+	 */
+	if (msg->header.message_flags.msg_pending) {
+		/*
+		 * This will cause message queue rescan to
+		 * possibly deliver another msg from the
+		 * hypervisor
+		 */
+		hv_signal_eom();
+	}
+}
+
+void hv_setup_vmbus_irq(void (*handler)(void));
+void hv_remove_vmbus_irq(void);
+void hv_enable_vmbus_irq(void);
+void hv_disable_vmbus_irq(void);
+
+void hv_setup_kexec_handler(void (*handler)(void));
+void hv_remove_kexec_handler(void);
+void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
+void hv_remove_crash_handler(void);
+
+#if IS_ENABLED(CONFIG_HYPERV)
+/*
+ * Hypervisor's notion of virtual processor ID is different from
+ * Linux' notion of CPU ID. This information can only be retrieved
+ * in the context of the calling CPU. Setup a map for easy access
+ * to this information.
+ */
+extern u32 *hv_vp_index;
+extern u32 hv_max_vp_index;
+
+/* Sentinel value for an uninitialized entry in hv_vp_index array */
+#define VP_INVAL	U32_MAX
+
+/**
+ * hv_cpu_number_to_vp_number() - Map CPU to VP.
+ * @cpu_number: CPU number in Linux terms
+ *
+ * This function returns the mapping between the Linux processor
+ * number and the hypervisor's virtual processor number, useful
+ * in making hypercalls and such that talk about specific
+ * processors.
+ *
+ * Return: Virtual processor number in Hyper-V terms
+ */
+static inline int hv_cpu_number_to_vp_number(int cpu_number)
+{
+	return hv_vp_index[cpu_number];
+}
+
+static inline int cpumask_to_vpset(struct hv_vpset *vpset,
+				    const struct cpumask *cpus)
+{
+	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
+
+	/* valid_bank_mask can represent up to 64 banks */
+	if (hv_max_vp_index / 64 >= 64)
+		return 0;
+
+	/*
+	 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
+	 * structs are not cleared between calls, we risk flushing unneeded
+	 * vCPUs otherwise.
+	 */
+	for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
+		vpset->bank_contents[vcpu_bank] = 0;
+
+	/*
+	 * Some banks may end up being empty but this is acceptable.
+	 */
+	for_each_cpu(cpu, cpus) {
+		vcpu = hv_cpu_number_to_vp_number(cpu);
+		if (vcpu == VP_INVAL)
+			return -1;
+		vcpu_bank = vcpu / 64;
+		vcpu_offset = vcpu % 64;
+		__set_bit(vcpu_offset, (unsigned long *)
+			  &vpset->bank_contents[vcpu_bank]);
+		if (vcpu_bank >= nr_bank)
+			nr_bank = vcpu_bank + 1;
+	}
+	vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
+	return nr_bank;
+}
+
+void hyperv_report_panic(struct pt_regs *regs, long err);
+void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
+bool hv_is_hyperv_initialized(void);
+void hyperv_cleanup(void);
+void hv_setup_sched_clock(void *sched_clock);
+#else /* CONFIG_HYPERV */
+static inline bool hv_is_hyperv_initialized(void) { return false; }
+static inline void hyperv_cleanup(void) {}
+#endif /* CONFIG_HYPERV */
+
+#if IS_ENABLED(CONFIG_HYPERV)
+extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
+extern void hv_remove_stimer0_irq(int irq);
+#endif
+
+#endif
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 27bf337..fe801f0 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -7,7 +7,7 @@
  */
 
 #ifdef CONFIG_MMU
-#error need to prove a real asm/page.h
+#error need to provide a real asm/page.h
 #endif
 
 
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 1817a84..c2de013 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -62,10 +62,6 @@
 #define PER_CPU_ATTRIBUTES
 #endif
 
-#ifndef PER_CPU_DEF_ATTRIBUTES
-#define PER_CPU_DEF_ATTRIBUTES
-#endif
-
 #define raw_cpu_generic_read(pcp)					\
 ({									\
 	*raw_cpu_ptr(&(pcp));						\
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 948714c..73f7421 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -1,13 +1,107 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef __ASM_GENERIC_PGALLOC_H
 #define __ASM_GENERIC_PGALLOC_H
-/*
- * an empty file is enough for a nommu architecture
- */
+
 #ifdef CONFIG_MMU
-#error need to implement an architecture specific asm/pgalloc.h
+
+#define GFP_PGTABLE_KERNEL	(GFP_KERNEL | __GFP_ZERO)
+#define GFP_PGTABLE_USER	(GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
+
+/**
+ * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation.
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
+{
+	return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
+/**
+ * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * @mm: the mm_struct of the current context
+ *
+ * Return: pointer to the allocated memory or %NULL on error
+ */
+static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
+{
+	return __pte_alloc_one_kernel(mm);
+}
 #endif
 
-#define check_pgt_cache()          do { } while (0)
+/**
+ * pte_free_kernel - free PTE-level kernel page table page
+ * @mm: the mm_struct of the current context
+ * @pte: pointer to the memory containing the page table
+ */
+static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
+{
+	free_page((unsigned long)pte);
+}
+
+/**
+ * __pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ * @gfp: GFP flags to use for the allocation
+ *
+ * Allocates a page and runs the pgtable_pte_page_ctor().
+ *
+ * This function is intended for architectures that need
+ * anything beyond simple page allocation or must have custom GFP flags.
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
+{
+	struct page *pte;
+
+	pte = alloc_page(gfp);
+	if (!pte)
+		return NULL;
+	if (!pgtable_pte_page_ctor(pte)) {
+		__free_page(pte);
+		return NULL;
+	}
+
+	return pte;
+}
+
+#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
+/**
+ * pte_alloc_one - allocate a page for PTE-level user page table
+ * @mm: the mm_struct of the current context
+ *
+ * Allocates a page and runs the pgtable_pte_page_ctor().
+ *
+ * Return: `struct page` initialized as page table or %NULL on error
+ */
+static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
+{
+	return __pte_alloc_one(mm, GFP_PGTABLE_USER);
+}
+#endif
+
+/*
+ * Should really implement gc for free page table pages. This could be
+ * done with a reference count in struct page.
+ */
+
+/**
+ * pte_free - free PTE-level user page table page
+ * @mm: the mm_struct of the current context
+ * @pte_page: the `struct page` representing the page table
+ */
+static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
+{
+	pgtable_pte_page_dtor(pte_page);
+	__free_page(pte_page);
+}
+
+#endif /* CONFIG_MMU */
 
 #endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable-nop4d-hack.h b/include/asm-generic/pgtable-nop4d-hack.h
index 1d6dd38..829bdb0 100644
--- a/include/asm-generic/pgtable-nop4d-hack.h
+++ b/include/asm-generic/pgtable-nop4d-hack.h
@@ -31,6 +31,7 @@
 #define pud_ERROR(pud)				(pgd_ERROR((pud).pgd))
 
 #define pgd_populate(mm, pgd, pud)		do { } while (0)
+#define pgd_populate_safe(mm, pgd, pud)		do { } while (0)
 /*
  * (puds are folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index 04cb913..aebab90 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -26,6 +26,7 @@
 #define p4d_ERROR(p4d)				(pgd_ERROR((p4d).pgd))
 
 #define pgd_populate(mm, pgd, p4d)		do { } while (0)
+#define pgd_populate_safe(mm, pgd, p4d)		do { } while (0)
 /*
  * (p4ds are folded into pgds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index 9bef475..c77a1d3 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -35,6 +35,7 @@
 #define pud_ERROR(pud)				(p4d_ERROR((pud).p4d))
 
 #define p4d_populate(mm, p4d, pud)		do { } while (0)
+#define p4d_populate_safe(mm, p4d, pud)		do { } while (0)
 /*
  * (puds are folded into p4ds so this doesn't get actually called,
  * but the define is needed for a generic inline function.)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 15fd027..8186918 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -375,7 +375,6 @@
 #endif
 
 #ifndef __HAVE_ARCH_PMD_SAME
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 {
 	return pmd_val(pmd_a) == pmd_val(pmd_b);
@@ -385,21 +384,60 @@
 {
 	return pud_val(pud_a) == pud_val(pud_b);
 }
-#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
-{
-	BUILD_BUG();
-	return 0;
-}
-
-static inline int pud_same(pud_t pud_a, pud_t pud_b)
-{
-	BUILD_BUG();
-	return 0;
-}
-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 #endif
 
+#ifndef __HAVE_ARCH_P4D_SAME
+static inline int p4d_same(p4d_t p4d_a, p4d_t p4d_b)
+{
+	return p4d_val(p4d_a) == p4d_val(p4d_b);
+}
+#endif
+
+#ifndef __HAVE_ARCH_PGD_SAME
+static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b)
+{
+	return pgd_val(pgd_a) == pgd_val(pgd_b);
+}
+#endif
+
+/*
+ * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
+ * TLB flush will be required as a result of the "set". For example, use
+ * in scenarios where it is known ahead of time that the routine is
+ * setting non-present entries, or re-setting an existing entry to the
+ * same value. Otherwise, use the typical "set" helpers and flush the
+ * TLB.
+ */
+#define set_pte_safe(ptep, pte) \
+({ \
+	WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
+	set_pte(ptep, pte); \
+})
+
+#define set_pmd_safe(pmdp, pmd) \
+({ \
+	WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
+	set_pmd(pmdp, pmd); \
+})
+
+#define set_pud_safe(pudp, pud) \
+({ \
+	WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
+	set_pud(pudp, pud); \
+})
+
+#define set_p4d_safe(p4dp, p4d) \
+({ \
+	WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
+	set_p4d(p4dp, p4d); \
+})
+
+#define set_pgd_safe(pgdp, pgd) \
+({ \
+	WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
+	set_pgd(pgdp, pgd); \
+})
+
 #ifndef __HAVE_ARCH_DO_SWAP_PAGE
 /*
  * Some architectures support metadata associated with a page. When a
@@ -568,7 +606,7 @@
 	return 0;
 }
 
-static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
+static inline pte_t __ptep_modify_prot_start(struct vm_area_struct *vma,
 					     unsigned long addr,
 					     pte_t *ptep)
 {
@@ -577,10 +615,10 @@
 	 * non-present, preventing the hardware from asynchronously
 	 * updating it.
 	 */
-	return ptep_get_and_clear(mm, addr, ptep);
+	return ptep_get_and_clear(vma->vm_mm, addr, ptep);
 }
 
-static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
+static inline void __ptep_modify_prot_commit(struct vm_area_struct *vma,
 					     unsigned long addr,
 					     pte_t *ptep, pte_t pte)
 {
@@ -588,7 +626,7 @@
 	 * The pte is non-present, so there's no hardware state to
 	 * preserve.
 	 */
-	set_pte_at(mm, addr, ptep, pte);
+	set_pte_at(vma->vm_mm, addr, ptep, pte);
 }
 
 #ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -606,22 +644,22 @@
  * queue the update to be done at some later time.  The update must be
  * actually committed before the pte lock is released, however.
  */
-static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
+static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
 					   unsigned long addr,
 					   pte_t *ptep)
 {
-	return __ptep_modify_prot_start(mm, addr, ptep);
+	return __ptep_modify_prot_start(vma, addr, ptep);
 }
 
 /*
  * Commit an update to a pte, leaving any hardware-controlled bits in
  * the PTE unmodified.
  */
-static inline void ptep_modify_prot_commit(struct mm_struct *mm,
+static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
 					   unsigned long addr,
-					   pte_t *ptep, pte_t pte)
+					   pte_t *ptep, pte_t old_pte, pte_t pte)
 {
-	__ptep_modify_prot_commit(mm, addr, ptep, pte);
+	__ptep_modify_prot_commit(vma, addr, ptep, pte);
 }
 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
 #endif /* CONFIG_MMU */
@@ -757,7 +795,7 @@
 /*
  * Interfaces that can be used by architecture code to keep track of
  * memory type of pfn mappings specified by the remap_pfn_range,
- * vm_insert_pfn.
+ * vmf_insert_pfn.
  */
 
 /*
@@ -773,7 +811,7 @@
 
 /*
  * track_pfn_insert is called when a _new_ single pfn is established
- * by vm_insert_pfn().
+ * by vmf_insert_pfn().
  */
 static inline void track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 				    pfn_t pfn)
@@ -964,9 +1002,8 @@
  * need this). If THP is not enabled, the pmd can't go away under the
  * code even if MADV_DONTNEED runs, but if THP is enabled we need to
  * run a pmd_trans_unstable before walking the ptes after
- * split_huge_page_pmd returns (because it may have run when the pmd
- * become null, but then a page fault can map in a THP and not a
- * regular page).
+ * split_huge_pmd returns (because it may have run when the pmd become
+ * null, but then a page fault can map in a THP and not a regular page).
  */
 static inline int pmd_trans_unstable(pmd_t *pmd)
 {
@@ -1019,6 +1056,7 @@
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
 int pud_clear_huge(pud_t *pud);
 int pmd_clear_huge(pmd_t *pmd);
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr);
 int pud_free_pmd_page(pud_t *pud, unsigned long addr);
 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
 #else	/* !CONFIG_HAVE_ARCH_HUGE_VMAP */
@@ -1046,6 +1084,10 @@
 {
 	return 0;
 }
+static inline int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+	return 0;
+}
 static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
 {
 	return 0;
@@ -1083,6 +1125,8 @@
 static inline void init_espfix_bsp(void) { }
 #endif
 
+extern void __init pgtable_cache_init(void);
+
 #ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
 static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
 {
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index c3046c9..d683f5e 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -78,11 +78,11 @@
 			tif_need_resched());
 }
 
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
 extern asmlinkage void preempt_schedule(void);
 #define __preempt_schedule() preempt_schedule()
 extern asmlinkage void preempt_schedule_notrace(void);
 #define __preempt_schedule_notrace() preempt_schedule_notrace()
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
 
 #endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
deleted file mode 100644
index 82e674f..0000000
--- a/include/asm-generic/ptrace.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Common low level (register) ptrace helpers
- *
- * Copyright 2004-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef __ASM_GENERIC_PTRACE_H__
-#define __ASM_GENERIC_PTRACE_H__
-
-#ifndef __ASSEMBLY__
-
-/* Helpers for working with the instruction pointer */
-#ifndef GET_IP
-#define GET_IP(regs) ((regs)->pc)
-#endif
-#ifndef SET_IP
-#define SET_IP(regs, val) (GET_IP(regs) = (val))
-#endif
-
-static inline unsigned long instruction_pointer(struct pt_regs *regs)
-{
-	return GET_IP(regs);
-}
-static inline void instruction_pointer_set(struct pt_regs *regs,
-                                           unsigned long val)
-{
-	SET_IP(regs, val);
-}
-
-#ifndef profile_pc
-#define profile_pc(regs) instruction_pointer(regs)
-#endif
-
-/* Helpers for working with the user stack pointer */
-#ifndef GET_USP
-#define GET_USP(regs) ((regs)->usp)
-#endif
-#ifndef SET_USP
-#define SET_USP(regs, val) (GET_USP(regs) = (val))
-#endif
-
-static inline unsigned long user_stack_pointer(struct pt_regs *regs)
-{
-	return GET_USP(regs);
-}
-static inline void user_stack_pointer_set(struct pt_regs *regs,
-                                          unsigned long val)
-{
-	SET_USP(regs, val);
-}
-
-/* Helpers for working with the frame pointer */
-#ifndef GET_FP
-#define GET_FP(regs) ((regs)->fp)
-#endif
-#ifndef SET_FP
-#define SET_FP(regs, val) (GET_FP(regs) = (val))
-#endif
-
-static inline unsigned long frame_pointer(struct pt_regs *regs)
-{
-	return GET_FP(regs);
-}
-static inline void frame_pointer_set(struct pt_regs *regs,
-                                     unsigned long val)
-{
-	SET_FP(regs, val);
-}
-
-#endif /* __ASSEMBLY__ */
-
-#endif
diff --git a/include/asm-generic/qrwlock.h b/include/asm-generic/qrwlock.h
index 0f7062b..3aefde2 100644
--- a/include/asm-generic/qrwlock.h
+++ b/include/asm-generic/qrwlock.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Queue read/write lock
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
  *
  * Authors: Waiman Long <waiman.long@hp.com>
@@ -71,8 +62,8 @@
 	if (unlikely(cnts))
 		return 0;
 
-	return likely(atomic_cmpxchg_acquire(&lock->cnts,
-					     cnts, cnts | _QW_LOCKED) == cnts);
+	return likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts,
+				_QW_LOCKED));
 }
 /**
  * queued_read_lock - acquire read lock of a queue rwlock
@@ -96,8 +87,9 @@
  */
 static inline void queued_write_lock(struct qrwlock *lock)
 {
+	u32 cnts = 0;
 	/* Optimize for the unfair lock case where the fair flag is 0. */
-	if (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0)
+	if (likely(atomic_try_cmpxchg_acquire(&lock->cnts, &cnts, _QW_LOCKED)))
 		return;
 
 	queued_write_lock_slowpath(lock);
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 9cc4575..fde943d 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Queued spinlock
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
  *
@@ -66,10 +57,12 @@
  */
 static __always_inline int queued_spin_trylock(struct qspinlock *lock)
 {
-	if (!atomic_read(&lock->val) &&
-	   (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
-		return 1;
-	return 0;
+	u32 val = atomic_read(&lock->val);
+
+	if (unlikely(val))
+		return 0;
+
+	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
 }
 
 extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
@@ -80,11 +73,11 @@
  */
 static __always_inline void queued_spin_lock(struct qspinlock *lock)
 {
-	u32 val;
+	u32 val = 0;
 
-	val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
-	if (likely(val == 0))
+	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
 		return;
+
 	queued_spin_lock_slowpath(lock, val);
 }
 
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index d10f1e7..56d1309 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -1,16 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * Queued spinlock
  *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
  * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  *
  * Authors: Waiman Long <waiman.long@hp.com>
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
deleted file mode 100644
index 93e67a0..0000000
--- a/include/asm-generic/rwsem.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_GENERIC_RWSEM_H
-#define _ASM_GENERIC_RWSEM_H
-
-#ifndef _LINUX_RWSEM_H
-#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
-#endif
-
-#ifdef __KERNEL__
-
-/*
- * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
- * Adapted largely from include/asm-i386/rwsem.h
- * by Paul Mackerras <paulus@samba.org>.
- */
-
-/*
- * the semaphore definition
- */
-#ifdef CONFIG_64BIT
-# define RWSEM_ACTIVE_MASK		0xffffffffL
-#else
-# define RWSEM_ACTIVE_MASK		0x0000ffffL
-#endif
-
-#define RWSEM_UNLOCKED_VALUE		0x00000000L
-#define RWSEM_ACTIVE_BIAS		0x00000001L
-#define RWSEM_WAITING_BIAS		(-RWSEM_ACTIVE_MASK-1)
-#define RWSEM_ACTIVE_READ_BIAS		RWSEM_ACTIVE_BIAS
-#define RWSEM_ACTIVE_WRITE_BIAS		(RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
-
-/*
- * lock for reading
- */
-static inline void __down_read(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0))
-		rwsem_down_read_failed(sem);
-}
-
-static inline int __down_read_killable(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
-		if (IS_ERR(rwsem_down_read_failed_killable(sem)))
-			return -EINTR;
-	}
-
-	return 0;
-}
-
-static inline int __down_read_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	while ((tmp = atomic_long_read(&sem->count)) >= 0) {
-		if (tmp == atomic_long_cmpxchg_acquire(&sem->count, tmp,
-				   tmp + RWSEM_ACTIVE_READ_BIAS)) {
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/*
- * lock for writing
- */
-static inline void __down_write(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-					     &sem->count);
-	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-		rwsem_down_write_failed(sem);
-}
-
-static inline int __down_write_killable(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_add_return_acquire(RWSEM_ACTIVE_WRITE_BIAS,
-					     &sem->count);
-	if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
-		if (IS_ERR(rwsem_down_write_failed_killable(sem)))
-			return -EINTR;
-	return 0;
-}
-
-static inline int __down_write_trylock(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_cmpxchg_acquire(&sem->count, RWSEM_UNLOCKED_VALUE,
-		      RWSEM_ACTIVE_WRITE_BIAS);
-	return tmp == RWSEM_UNLOCKED_VALUE;
-}
-
-/*
- * unlock after reading
- */
-static inline void __up_read(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	tmp = atomic_long_dec_return_release(&sem->count);
-	if (unlikely(tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0))
-		rwsem_wake(sem);
-}
-
-/*
- * unlock after writing
- */
-static inline void __up_write(struct rw_semaphore *sem)
-{
-	if (unlikely(atomic_long_sub_return_release(RWSEM_ACTIVE_WRITE_BIAS,
-						    &sem->count) < 0))
-		rwsem_wake(sem);
-}
-
-/*
- * downgrade write lock to read lock
- */
-static inline void __downgrade_write(struct rw_semaphore *sem)
-{
-	long tmp;
-
-	/*
-	 * When downgrading from exclusive to shared ownership,
-	 * anything inside the write-locked region cannot leak
-	 * into the read side. In contrast, anything in the
-	 * read-locked region is ok to be re-ordered into the
-	 * write side. As such, rely on RELEASE semantics.
-	 */
-	tmp = atomic_long_add_return_release(-RWSEM_WAITING_BIAS, &sem->count);
-	if (tmp < 0)
-		rwsem_downgrade_wake(sem);
-}
-
-#endif	/* __KERNEL__ */
-#endif	/* _ASM_GENERIC_RWSEM_H */
diff --git a/include/asm-generic/seccomp.h b/include/asm-generic/seccomp.h
index e74072d..1321ac7 100644
--- a/include/asm-generic/seccomp.h
+++ b/include/asm-generic/seccomp.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * include/asm-generic/seccomp.h
  *
  * Copyright (C) 2014 Linaro Limited
  * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
  */
 #ifndef _ASM_GENERIC_SECCOMP_H
 #define _ASM_GENERIC_SECCOMP_H
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index 849cd8e..d1779d4 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -77,6 +77,20 @@
 }
 #endif
 
+/*
+ * Check if an address is part of freed initmem. This is needed on architectures
+ * with virt == phys kernel mapping, for code that wants to check if an address
+ * is part of a static object within [_stext, _end]. After initmem is freed,
+ * memory can be allocated from it, and such allocations would then have
+ * addresses within the range [_stext, _end].
+ */
+#ifndef arch_is_kernel_initmem_freed
+static inline int arch_is_kernel_initmem_freed(unsigned long addr)
+{
+	return 0;
+}
+#endif
+
 /**
  * memory_contains - checks if an object is contained within a memory region
  * @begin: virtual address of the beginning of the memory region
@@ -141,4 +155,18 @@
 	return memory_intersects(__init_begin, __init_end, virt, size);
 }
 
+/**
+ * is_kernel_rodata - checks if the pointer address is located in the
+ *                    .rodata section
+ *
+ * @addr: address to check
+ *
+ * Returns: true if the address is located in .rodata, false otherwise.
+ */
+static inline bool is_kernel_rodata(unsigned long addr)
+{
+	return addr >= (unsigned long)__start_rodata &&
+	       addr < (unsigned long)__end_rodata;
+}
+
 #endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h
deleted file mode 100644
index 5580eac..0000000
--- a/include/asm-generic/segment.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef __ASM_GENERIC_SEGMENT_H
-#define __ASM_GENERIC_SEGMENT_H
-/*
- * Only here because we have some old header files that expect it...
- *
- * New architectures probably don't want to have their own version.
- */
-
-#endif /* __ASM_GENERIC_SEGMENT_H */
diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
new file mode 100644
index 0000000..b8f9035
--- /dev/null
+++ b/include/asm-generic/shmparam.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_SHMPARAM_H
+#define __ASM_GENERIC_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE	 /* attach addr a multiple of this */
+
+#endif /* _ASM_GENERIC_SHMPARAM_H */
diff --git a/include/asm-generic/sizes.h b/include/asm-generic/sizes.h
deleted file mode 100644
index 1dcfad9..0000000
--- a/include/asm-generic/sizes.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* This is a placeholder, to be removed over time */
-#include <linux/sizes.h>
diff --git a/include/asm-generic/switch_to.h b/include/asm-generic/switch_to.h
index 986acc9..5897d10 100644
--- a/include/asm-generic/switch_to.h
+++ b/include/asm-generic/switch_to.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* Generic task switch macro wrapper.
  *
  * It should be possible to use these on really simple architectures,
@@ -5,11 +6,6 @@
  *
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
  */
 #ifndef __ASM_GENERIC_SWITCH_TO_H
 #define __ASM_GENERIC_SWITCH_TO_H
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 0c938a4..f3135e7 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -1,12 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
 /*
  * Access to user system call parameters and results
  *
  * Copyright (C) 2008-2009 Red Hat, Inc.  All rights reserved.
  *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License v.2.
- *
  * This file is a stub providing documentation for what functions
  * asm-ARCH/syscall.h files need to define.  Most arch definitions
  * will be simple inlines.
@@ -105,53 +102,43 @@
  * syscall_get_arguments - extract system call parameter values
  * @task:	task of interest, must be blocked
  * @regs:	task_pt_regs() of @task
- * @i:		argument index [0,5]
- * @n:		number of arguments; n+i must be [1,6].
  * @args:	array filled with argument values
  *
- * Fetches @n arguments to the system call starting with the @i'th argument
- * (from 0 through 5).  Argument @i is stored in @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Fetches 6 arguments to the system call.  First argument is stored in
+*  @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
-			   unsigned int i, unsigned int n, unsigned long *args);
+			   unsigned long *args);
 
 /**
  * syscall_set_arguments - change system call parameter value
  * @task:	task of interest, must be in system call entry tracing
  * @regs:	task_pt_regs() of @task
- * @i:		argument index [0,5]
- * @n:		number of arguments; n+i must be [1,6].
  * @args:	array of argument values to store
  *
- * Changes @n arguments to the system call starting with the @i'th argument.
- * Argument @i gets value @args[0], and so on.
- * An arch inline version is probably optimal when @i and @n are constants.
+ * Changes 6 arguments to the system call.
+ * The first argument gets value @args[0], and so on.
  *
  * It's only valid to call this when @task is stopped for tracing on
  * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
- * It's invalid to call this with @i + @n > 6; we only support system calls
- * taking up to 6 arguments.
  */
 void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
-			   unsigned int i, unsigned int n,
 			   const unsigned long *args);
 
 /**
  * syscall_get_arch - return the AUDIT_ARCH for the current system call
+ * @task:	task of interest, must be blocked
  *
  * Returns the AUDIT_ARCH_* based on the system call convention in use.
  *
- * It's only valid to call this when current is stopped on entry to a system
+ * It's only valid to call this when @task is stopped on entry to a system
  * call, due to %TIF_SYSCALL_TRACE, %TIF_SYSCALL_AUDIT, or %TIF_SECCOMP.
  *
  * Architectures which permit CONFIG_HAVE_ARCH_SECCOMP_FILTER must
  * provide an implementation of this.
  */
-int syscall_get_arch(void);
+int syscall_get_arch(struct task_struct *task);
 #endif	/* _ASM_SYSCALL_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b3353e2..04c0644 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /* include/asm-generic/tlb.h
  *
  *	Generic TLB shootdown code
@@ -6,11 +7,6 @@
  * Based on code from mm/memory.c Copyright Linus Torvalds and others.
  *
  * Copyright 2011 Red Hat, Inc., Peter Zijlstra
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
  */
 #ifndef _ASM_GENERIC__TLB_H
 #define _ASM_GENERIC__TLB_H
@@ -19,6 +15,139 @@
 #include <linux/swap.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+/*
+ * Blindly accessing user memory from NMI context can be dangerous
+ * if we're in the middle of switching the current user task or switching
+ * the loaded mm.
+ */
+#ifndef nmi_uaccess_okay
+# define nmi_uaccess_okay() true
+#endif
+
+#ifdef CONFIG_MMU
+
+/*
+ * Generic MMU-gather implementation.
+ *
+ * The mmu_gather data structure is used by the mm code to implement the
+ * correct and efficient ordering of freeing pages and TLB invalidations.
+ *
+ * This correct ordering is:
+ *
+ *  1) unhook page
+ *  2) TLB invalidate page
+ *  3) free page
+ *
+ * That is, we must never free a page before we have ensured there are no live
+ * translations left to it. Otherwise it might be possible to observe (or
+ * worse, change) the page content after it has been reused.
+ *
+ * The mmu_gather API consists of:
+ *
+ *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
+ *
+ *    Finish in particular will issue a (final) TLB invalidate and free
+ *    all (remaining) queued pages.
+ *
+ *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
+ *
+ *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
+ *    there's large holes between the VMAs.
+ *
+ *  - tlb_remove_page() / __tlb_remove_page()
+ *  - tlb_remove_page_size() / __tlb_remove_page_size()
+ *
+ *    __tlb_remove_page_size() is the basic primitive that queues a page for
+ *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
+ *    boolean indicating if the queue is (now) full and a call to
+ *    tlb_flush_mmu() is required.
+ *
+ *    tlb_remove_page() and tlb_remove_page_size() imply the call to
+ *    tlb_flush_mmu() when required and has no return value.
+ *
+ *  - tlb_change_page_size()
+ *
+ *    call before __tlb_remove_page*() to set the current page-size; implies a
+ *    possible tlb_flush_mmu() call.
+ *
+ *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
+ *
+ *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
+ *                              related state, like the range)
+ *
+ *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
+ *			whatever pages are still batched.
+ *
+ *  - mmu_gather::fullmm
+ *
+ *    A flag set by tlb_gather_mmu() to indicate we're going to free
+ *    the entire mm; this allows a number of optimizations.
+ *
+ *    - We can ignore tlb_{start,end}_vma(); because we don't
+ *      care about ranges. Everything will be shot down.
+ *
+ *    - (RISC) architectures that use ASIDs can cycle to a new ASID
+ *      and delay the invalidation until ASID space runs out.
+ *
+ *  - mmu_gather::need_flush_all
+ *
+ *    A flag that can be set by the arch code if it wants to force
+ *    flush the entire TLB irrespective of the range. For instance
+ *    x86-PAE needs this when changing top-level entries.
+ *
+ * And allows the architecture to provide and implement tlb_flush():
+ *
+ * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
+ * use of:
+ *
+ *  - mmu_gather::start / mmu_gather::end
+ *
+ *    which provides the range that needs to be flushed to cover the pages to
+ *    be freed.
+ *
+ *  - mmu_gather::freed_tables
+ *
+ *    set when we freed page table pages
+ *
+ *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
+ *
+ *    returns the smallest TLB entry size unmapped in this range.
+ *
+ * If an architecture does not provide tlb_flush() a default implementation
+ * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
+ * specified, in which case we'll default to flush_tlb_mm().
+ *
+ * Additionally there are a few opt-in features:
+ *
+ *  HAVE_MMU_GATHER_PAGE_SIZE
+ *
+ *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
+ *  changes the size and provides mmu_gather::page_size to tlb_flush().
+ *
+ *  HAVE_RCU_TABLE_FREE
+ *
+ *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
+ *  for page directores (__p*_free_tlb()). This provides separate freeing of
+ *  the page-table pages themselves in a semi-RCU fashion (see comment below).
+ *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ *  and therefore doesn't naturally serialize with software page-table walkers.
+ *
+ *  When used, an architecture is expected to provide __tlb_remove_table()
+ *  which does the actual freeing of these pages.
+ *
+ *  HAVE_RCU_TABLE_NO_INVALIDATE
+ *
+ *  This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
+ *  freeing the page-table pages. This can be avoided if you use
+ *  HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
+ *  page-tables natively.
+ *
+ *  MMU_GATHER_NO_RANGE
+ *
+ *  Use this if your architecture lacks an efficient flush_tlb_range().
+ */
 
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 /*
@@ -58,11 +187,11 @@
 #define MAX_TABLE_BATCH		\
 	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
 
-extern void tlb_table_flush(struct mmu_gather *tlb);
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
 
 #endif
 
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 /*
  * If we can't allocate a page to make a big batch of page pointers
  * to work on, then just handle a few from the on-stack structure.
@@ -87,39 +216,72 @@
  */
 #define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
 
-/* struct mmu_gather is an opaque type used by the mm code for passing around
+extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
+				   int page_size);
+#endif
+
+/*
+ * struct mmu_gather is an opaque type used by the mm code for passing around
  * any data needed by arch specific code for tlb_remove_page.
  */
 struct mmu_gather {
 	struct mm_struct	*mm;
+
 #ifdef CONFIG_HAVE_RCU_TABLE_FREE
 	struct mmu_table_batch	*batch;
 #endif
+
 	unsigned long		start;
 	unsigned long		end;
-	/* we are in the middle of an operation to clear
-	 * a full mm and can make some optimizations */
-	unsigned int		fullmm : 1,
-	/* we have performed an operation which
-	 * requires a complete flush of the tlb */
-				need_flush_all : 1;
+	/*
+	 * we are in the middle of an operation to clear
+	 * a full mm and can make some optimizations
+	 */
+	unsigned int		fullmm : 1;
 
+	/*
+	 * we have performed an operation which
+	 * requires a complete flush of the tlb
+	 */
+	unsigned int		need_flush_all : 1;
+
+	/*
+	 * we have removed page directories
+	 */
+	unsigned int		freed_tables : 1;
+
+	/*
+	 * at which levels have we cleared entries?
+	 */
+	unsigned int		cleared_ptes : 1;
+	unsigned int		cleared_pmds : 1;
+	unsigned int		cleared_puds : 1;
+	unsigned int		cleared_p4ds : 1;
+
+	/*
+	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
+	 */
+	unsigned int		vma_exec : 1;
+	unsigned int		vma_huge : 1;
+
+	unsigned int		batch_count;
+
+#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
 	struct mmu_gather_batch *active;
 	struct mmu_gather_batch	local;
 	struct page		*__pages[MMU_GATHER_BUNDLE];
-	unsigned int		batch_count;
-	int page_size;
-};
 
-#define HAVE_GENERIC_MMU_GATHER
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+	unsigned int page_size;
+#endif
+#endif
+};
 
 void arch_tlb_gather_mmu(struct mmu_gather *tlb,
 	struct mm_struct *mm, unsigned long start, unsigned long end);
 void tlb_flush_mmu(struct mmu_gather *tlb);
 void arch_tlb_finish_mmu(struct mmu_gather *tlb,
 			 unsigned long start, unsigned long end, bool force);
-extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
-				   int page_size);
 
 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
 				      unsigned long address,
@@ -137,8 +299,99 @@
 		tlb->start = TASK_SIZE;
 		tlb->end = 0;
 	}
+	tlb->freed_tables = 0;
+	tlb->cleared_ptes = 0;
+	tlb->cleared_pmds = 0;
+	tlb->cleared_puds = 0;
+	tlb->cleared_p4ds = 0;
+	/*
+	 * Do not reset mmu_gather::vma_* fields here, we do not
+	 * call into tlb_start_vma() again to set them if there is an
+	 * intermediate flush.
+	 */
 }
 
+#ifdef CONFIG_MMU_GATHER_NO_RANGE
+
+#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
+#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not have efficient means of range flushing TLBs
+ * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
+ * range small. We equally don't have to worry about page granularity or other
+ * things.
+ *
+ * All we need to do is issue a full flush for any !0 range.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->end)
+		flush_tlb_mm(tlb->mm);
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#define tlb_end_vma tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#else /* CONFIG_MMU_GATHER_NO_RANGE */
+
+#ifndef tlb_flush
+
+#if defined(tlb_start_vma) || defined(tlb_end_vma)
+#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
+#endif
+
+/*
+ * When an architecture does not provide its own tlb_flush() implementation
+ * but does have a reasonably efficient flush_vma_range() implementation
+ * use that.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+	if (tlb->fullmm || tlb->need_flush_all) {
+		flush_tlb_mm(tlb->mm);
+	} else if (tlb->end) {
+		struct vm_area_struct vma = {
+			.vm_mm = tlb->mm,
+			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
+				    (tlb->vma_huge ? VM_HUGETLB : 0),
+		};
+
+		flush_tlb_range(&vma, tlb->start, tlb->end);
+	}
+}
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	/*
+	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
+	 * mips-4k) flush only large pages.
+	 *
+	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
+	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
+	 * range.
+	 *
+	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
+	 * these values the batch is empty.
+	 */
+	tlb->vma_huge = !!(vma->vm_flags & VM_HUGETLB);
+	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
+}
+
+#else
+
+static inline void
+tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
+
+#endif
+
+#endif /* CONFIG_MMU_GATHER_NO_RANGE */
+
 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
 {
 	if (!tlb->end)
@@ -170,21 +423,37 @@
 	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
 }
 
-#ifndef tlb_remove_check_page_size_change
-#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change
-static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
+static inline void tlb_change_page_size(struct mmu_gather *tlb,
 						     unsigned int page_size)
 {
-	/*
-	 * We don't care about page size change, just update
-	 * mmu_gather page size here so that debug checks
-	 * doesn't throw false warning.
-	 */
-#ifdef CONFIG_DEBUG_VM
+#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+	if (tlb->page_size && tlb->page_size != page_size) {
+		if (!tlb->fullmm)
+			tlb_flush_mmu(tlb);
+	}
+
 	tlb->page_size = page_size;
 #endif
 }
-#endif
+
+static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
+{
+	if (tlb->cleared_ptes)
+		return PAGE_SHIFT;
+	if (tlb->cleared_pmds)
+		return PMD_SHIFT;
+	if (tlb->cleared_puds)
+		return PUD_SHIFT;
+	if (tlb->cleared_p4ds)
+		return P4D_SHIFT;
+
+	return PAGE_SHIFT;
+}
+
+static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
+{
+	return 1UL << tlb_get_unmap_shift(tlb);
+}
 
 /*
  * In the case of tlb vma handling, we can optimise these away in the
@@ -192,17 +461,30 @@
  * the vmas are adjusted to only cover the region to be torn down.
  */
 #ifndef tlb_start_vma
-#define tlb_start_vma(tlb, vma) do { } while (0)
+static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (tlb->fullmm)
+		return;
+
+	tlb_update_vma_flags(tlb, vma);
+	flush_cache_range(vma, vma->vm_start, vma->vm_end);
+}
 #endif
 
-#define __tlb_end_vma(tlb, vma)					\
-	do {							\
-		if (!tlb->fullmm)				\
-			tlb_flush_mmu_tlbonly(tlb);		\
-	} while (0)
-
 #ifndef tlb_end_vma
-#define tlb_end_vma	__tlb_end_vma
+static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
+{
+	if (tlb->fullmm)
+		return;
+
+	/*
+	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
+	 * the ranges growing with the unused space between consecutive VMAs,
+	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
+	 * this.
+	 */
+	tlb_flush_mmu_tlbonly(tlb);
+}
 #endif
 
 #ifndef __tlb_remove_tlb_entry
@@ -219,13 +501,19 @@
 #define tlb_remove_tlb_entry(tlb, ptep, address)		\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->cleared_ptes = 1;				\
 		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 	} while (0)
 
-#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	     \
-	do {							     \
-		__tlb_adjust_range(tlb, address, huge_page_size(h)); \
-		__tlb_remove_tlb_entry(tlb, ptep, address);	     \
+#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
+	do {							\
+		unsigned long _sz = huge_page_size(h);		\
+		__tlb_adjust_range(tlb, address, _sz);		\
+		if (_sz == PMD_SIZE)				\
+			tlb->cleared_pmds = 1;			\
+		else if (_sz == PUD_SIZE)			\
+			tlb->cleared_puds = 1;			\
+		__tlb_remove_tlb_entry(tlb, ptep, address);	\
 	} while (0)
 
 /**
@@ -239,6 +527,7 @@
 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
 	do {								\
 		__tlb_adjust_range(tlb, address, HPAGE_PMD_SIZE);	\
+		tlb->cleared_pmds = 1;					\
 		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
 	} while (0)
 
@@ -253,6 +542,7 @@
 #define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
 	do {								\
 		__tlb_adjust_range(tlb, address, HPAGE_PUD_SIZE);	\
+		tlb->cleared_puds = 1;					\
 		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
 	} while (0)
 
@@ -278,6 +568,8 @@
 #define pte_free_tlb(tlb, ptep, address)			\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;				\
+		tlb->cleared_pmds = 1;				\
 		__pte_free_tlb(tlb, ptep, address);		\
 	} while (0)
 #endif
@@ -285,7 +577,9 @@
 #ifndef pmd_free_tlb
 #define pmd_free_tlb(tlb, pmdp, address)			\
 	do {							\
-		__tlb_adjust_range(tlb, address, PAGE_SIZE);		\
+		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;				\
+		tlb->cleared_puds = 1;				\
 		__pmd_free_tlb(tlb, pmdp, address);		\
 	} while (0)
 #endif
@@ -295,6 +589,8 @@
 #define pud_free_tlb(tlb, pudp, address)			\
 	do {							\
 		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;				\
+		tlb->cleared_p4ds = 1;				\
 		__pud_free_tlb(tlb, pudp, address);		\
 	} while (0)
 #endif
@@ -304,12 +600,13 @@
 #ifndef p4d_free_tlb
 #define p4d_free_tlb(tlb, pudp, address)			\
 	do {							\
-		__tlb_adjust_range(tlb, address, PAGE_SIZE);		\
+		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
+		tlb->freed_tables = 1;				\
 		__p4d_free_tlb(tlb, pudp, address);		\
 	} while (0)
 #endif
 #endif
 
-#define tlb_migrate_finish(mm) do {} while (0)
+#endif /* CONFIG_MMU */
 
 #endif /* _ASM_GENERIC__TLB_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 6b2e63d..e935318 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -9,7 +9,63 @@
  */
 #include <linux/string.h>
 
-#include <asm/segment.h>
+#ifdef CONFIG_UACCESS_MEMCPY
+static inline __must_check unsigned long
+raw_copy_from_user(void *to, const void __user * from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		switch(n) {
+		case 1:
+			*(u8 *)to = *(u8 __force *)from;
+			return 0;
+		case 2:
+			*(u16 *)to = *(u16 __force *)from;
+			return 0;
+		case 4:
+			*(u32 *)to = *(u32 __force *)from;
+			return 0;
+#ifdef CONFIG_64BIT
+		case 8:
+			*(u64 *)to = *(u64 __force *)from;
+			return 0;
+#endif
+		}
+	}
+
+	memcpy(to, (const void __force *)from, n);
+	return 0;
+}
+
+static inline __must_check unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+	if (__builtin_constant_p(n)) {
+		switch(n) {
+		case 1:
+			*(u8 __force *)to = *(u8 *)from;
+			return 0;
+		case 2:
+			*(u16 __force *)to = *(u16 *)from;
+			return 0;
+		case 4:
+			*(u32 __force *)to = *(u32 *)from;
+			return 0;
+#ifdef CONFIG_64BIT
+		case 8:
+			*(u64 __force *)to = *(u64 *)from;
+			return 0;
+#endif
+		default:
+			break;
+		}
+	}
+
+	memcpy((void __force *)to, from, n);
+	return 0;
+}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
+#endif /* CONFIG_UACCESS_MEMCPY */
 
 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
 
@@ -22,7 +78,6 @@
 #endif
 
 #ifndef get_fs
-#define get_ds()	(KERNEL_DS)
 #define get_fs()	(current_thread_info()->addr_limit)
 
 static inline void set_fs(mm_segment_t fs)
@@ -35,7 +90,7 @@
 #define segment_eq(a, b) ((a).seg == (b).seg)
 #endif
 
-#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+#define access_ok(addr, size) __access_ok((unsigned long)(addr),(size))
 
 /*
  * The architecture should really override this if possible, at least
@@ -78,7 +133,7 @@
 ({								\
 	void __user *__p = (ptr);				\
 	might_fault();						\
-	access_ok(VERIFY_WRITE, __p, sizeof(*ptr)) ?		\
+	access_ok(__p, sizeof(*ptr)) ?		\
 		__put_user((x), ((__typeof__(*(ptr)) __user *)__p)) :	\
 		-EFAULT;					\
 })
@@ -140,7 +195,7 @@
 ({								\
 	const void __user *__p = (ptr);				\
 	might_fault();						\
-	access_ok(VERIFY_READ, __p, sizeof(*ptr)) ?		\
+	access_ok(__p, sizeof(*ptr)) ?		\
 		__get_user((x), (__typeof__(*(ptr)) __user *)__p) :\
 		((x) = (__typeof__(*(ptr)))0,-EFAULT);		\
 })
@@ -175,7 +230,7 @@
 static inline long
 strncpy_from_user(char *dst, const char __user *src, long count)
 {
-	if (!access_ok(VERIFY_READ, src, 1))
+	if (!access_ok(src, 1))
 		return -EFAULT;
 	return __strncpy_from_user(dst, src, count);
 }
@@ -196,7 +251,7 @@
  */
 static inline long strnlen_user(const char __user *src, long n)
 {
-	if (!access_ok(VERIFY_READ, src, 1))
+	if (!access_ok(src, 1))
 		return 0;
 	return __strnlen_user(src, n);
 }
@@ -217,7 +272,7 @@
 clear_user(void __user *to, unsigned long n)
 {
 	might_fault();
-	if (!access_ok(VERIFY_WRITE, to, n))
+	if (!access_ok(to, n))
 		return n;
 
 	return __clear_user(to, n);
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
deleted file mode 100644
index cdf9042..0000000
--- a/include/asm-generic/unistd.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#include <uapi/asm-generic/unistd.h>
-#include <linux/export.h>
-
-/*
- * These are required system calls, we should
- * invert the logic eventually and let them
- * be selected by default.
- */
-#if __BITS_PER_LONG == 32
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_LLSEEK
-#endif
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
new file mode 100644
index 0000000..ce41032
--- /dev/null
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_GENERIC_VSYSCALL_H
+#define __ASM_GENERIC_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#ifndef __arch_get_k_vdso_data
+static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
+{
+	return NULL;
+}
+#endif /* __arch_get_k_vdso_data */
+
+#ifndef __arch_update_vdso_data
+static __always_inline int __arch_update_vdso_data(void)
+{
+	return 0;
+}
+#endif /* __arch_update_vdso_data */
+
+#ifndef __arch_get_clock_mode
+static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
+{
+	return 0;
+}
+#endif /* __arch_get_clock_mode */
+
+#ifndef __arch_update_vsyscall
+static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
+						   struct timekeeper *tk)
+{
+}
+#endif /* __arch_update_vsyscall */
+
+#ifndef __arch_sync_vdso_data
+static __always_inline void __arch_sync_vdso_data(struct vdso_data *vdata)
+{
+}
+#endif /* __arch_sync_vdso_data */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_VSYSCALL_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index d7701d4..dae6460 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -110,10 +110,17 @@
 #endif
 
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define MCOUNT_REC()	. = ALIGN(8);				\
+			__start_mcount_loc = .;			\
+			KEEP(*(__patchable_function_entries))	\
+			__stop_mcount_loc = .;
+#else
 #define MCOUNT_REC()	. = ALIGN(8);				\
 			__start_mcount_loc = .;			\
 			KEEP(*(__mcount_loc))			\
 			__stop_mcount_loc = .;
+#endif
 #else
 #define MCOUNT_REC()
 #endif
@@ -203,6 +210,20 @@
 #define EARLYCON_TABLE()
 #endif
 
+#ifdef CONFIG_SECURITY
+#define LSM_TABLE()	. = ALIGN(8);					\
+			__start_lsm_info = .;				\
+			KEEP(*(.lsm_info.init))				\
+			__end_lsm_info = .;
+#define EARLY_LSM_TABLE()	. = ALIGN(8);				\
+			__start_early_lsm_info = .;			\
+			KEEP(*(.early_lsm_info.init))			\
+			__end_early_lsm_info = .;
+#else
+#define LSM_TABLE()
+#define EARLY_LSM_TABLE()
+#endif
+
 #define ___OF_TABLE(cfg, name)	_OF_TABLE_##cfg(name)
 #define __OF_TABLE(cfg, name)	___OF_TABLE(cfg, name)
 #define OF_TABLE(cfg, name)	__OF_TABLE(IS_ENABLED(cfg), name)
@@ -230,6 +251,16 @@
 #define ACPI_PROBE_TABLE(name)
 #endif
 
+#ifdef CONFIG_THERMAL
+#define THERMAL_TABLE(name)						\
+	. = ALIGN(8);							\
+	__##name##_thermal_table = .;					\
+	KEEP(*(__##name##_thermal_table))				\
+	__##name##_thermal_table_end = .;
+#else
+#define THERMAL_TABLE(name)
+#endif
+
 #define KERNEL_DTB()							\
 	STRUCT_ALIGN();							\
 	__dtb_start = .;						\
@@ -253,10 +284,6 @@
 	STRUCT_ALIGN();							\
 	*(__tracepoints)						\
 	/* implement dynamic printk debug */				\
-	. = ALIGN(8);                                                   \
-	__start___jump_table = .;					\
-	KEEP(*(__jump_table))                                           \
-	__stop___jump_table = .;					\
 	. = ALIGN(8);							\
 	__start___verbose = .;						\
 	KEEP(*(__verbose))                                              \
@@ -300,6 +327,12 @@
 	. = __start_init_task + THREAD_SIZE;				\
 	__end_init_task = .;
 
+#define JUMP_TABLE_DATA							\
+	. = ALIGN(8);							\
+	__start___jump_table = .;					\
+	KEEP(*(__jump_table))						\
+	__stop___jump_table = .;
+
 /*
  * Allow architectures to handle ro_after_init data on their
  * own by defining an empty RO_AFTER_INIT_DATA.
@@ -308,6 +341,7 @@
 #define RO_AFTER_INIT_DATA						\
 	__start_ro_after_init = .;					\
 	*(.data..ro_after_init)						\
+	JUMP_TABLE_DATA							\
 	__end_ro_after_init = .;
 #endif
 
@@ -320,7 +354,6 @@
 		__start_rodata = .;					\
 		*(.rodata) *(.rodata.*)					\
 		RO_AFTER_INIT_DATA	/* Read only after init */	\
-		KEEP(*(__vermagic))	/* Kernel version magic */	\
 		. = ALIGN(8);						\
 		__start___tracepoints_ptrs = .;				\
 		KEEP(*(__tracepoints_ptrs)) /* Tracepoints: pointer array */ \
@@ -473,13 +506,6 @@
 #define RODATA          RO_DATA_SECTION(4096)
 #define RO_DATA(align)  RO_DATA_SECTION(align)
 
-#define SECURITY_INIT							\
-	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
-		__security_initcall_start = .;				\
-		KEEP(*(.security_initcall.init))			\
-		__security_initcall_end = .;				\
-	}
-
 /*
  * .text section. Map to function alignment to avoid address changes
  * during second ld run in second ld pass when generating System.map
@@ -604,7 +630,10 @@
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
 	ACPI_PROBE_TABLE(timer)						\
-	EARLYCON_TABLE()
+	THERMAL_TABLE(governor)						\
+	EARLYCON_TABLE()						\
+	LSM_TABLE()							\
+	EARLY_LSM_TABLE()
 
 #define INIT_TEXT							\
 	*(.init.text .init.text.*)					\
@@ -727,7 +756,7 @@
 		KEEP(*(.orc_unwind_ip))					\
 		__stop_orc_unwind_ip = .;				\
 	}								\
-	. = ALIGN(6);							\
+	. = ALIGN(2);							\
 	.orc_unwind : AT(ADDR(.orc_unwind) - LOAD_OFFSET) {		\
 		__start_orc_unwind = .;					\
 		KEEP(*(.orc_unwind))					\
@@ -793,11 +822,6 @@
 		KEEP(*(.con_initcall.init))				\
 		__con_initcall_end = .;
 
-#define SECURITY_INITCALL						\
-		__security_initcall_start = .;				\
-		KEEP(*(.security_initcall.init))			\
-		__security_initcall_end = .;
-
 #ifdef CONFIG_BLK_DEV_INITRD
 #define INIT_RAM_FS							\
 	. = ALIGN(4);							\
@@ -843,6 +867,7 @@
 	EXIT_CALL							\
 	*(.discard)							\
 	*(.discard.*)							\
+	*(.modinfo)							\
 	}
 
 /**
@@ -964,7 +989,6 @@
 		INIT_SETUP(initsetup_align)				\
 		INIT_CALLS						\
 		CON_INITCALL						\
-		SECURITY_INITCALL					\
 		INIT_RAM_FS						\
 	}
 
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index b4d8432..b62a2a5 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -1,16 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  * include/asm-generic/xor.h
  *
  * Generic optimized RAID-5 checksumming functions.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * You should have received a copy of the GNU General Public License
- * (for example /usr/src/linux/COPYING); if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
 #include <linux/prefetch.h>