Update Linux to v5.4.2
Change-Id: Idf6911045d9d382da2cfe01b1edff026404ac8fd
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 5451e1f..98c6b91 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* arch/arm/include/asm/uaccess.h
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef _ASMARM_UACCESS_H
#define _ASMARM_UACCESS_H
@@ -25,7 +22,7 @@
* perform such accesses (eg, via list poison values) which could then
* be exploited for priviledge escalation.
*/
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
unsigned int old_domain = get_domain();
@@ -40,7 +37,7 @@
#endif
}
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
{
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/* Restore the user access mask */
@@ -59,7 +56,6 @@
* Note that this is actually 0x1,0000,0000
*/
#define KERNEL_DS 0x00000000
-#define get_ds() (KERNEL_DS)
#ifdef CONFIG_MMU
@@ -69,6 +65,14 @@
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+
+ /*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
@@ -78,7 +82,8 @@
#define __range_ok(addr, size) ({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \
+ __asm__(".syntax unified\n" \
+ "adds %1, %2, %3; sbcscc %1, %1, %0; movcc %0, #0" \
: "=&r" (flag), "=&r" (roksum) \
: "r" (addr), "Ir" (size), "0" (current_thread_info()->addr_limit) \
: "cc"); \
@@ -92,6 +97,33 @@
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
+{
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " .syntax unified\n"
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subshs %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -245,7 +277,7 @@
#endif /* CONFIG_MMU */
-#define access_ok(type, addr, size) (__range_ok(addr, size) == 0)
+#define access_ok(addr, size) (__range_ok(addr, size) == 0)
#define user_addr_max() \
(uaccess_kernel() ? ~0UL : get_fs())
@@ -315,6 +347,13 @@
#define __get_user_asm_byte(x, addr, err) \
__get_user_asm(x, addr, err, ldrb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __get_user_asm_half(x, addr, err) \
+ __get_user_asm(x, addr, err, ldrh)
+
+#else
+
#ifndef __ARMEB__
#define __get_user_asm_half(x, __gu_addr, err) \
({ \
@@ -333,6 +372,8 @@
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __get_user_asm_word(x, addr, err) \
__get_user_asm(x, addr, err, ldr)
#endif
@@ -362,6 +403,14 @@
__pu_err; \
})
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
@@ -369,12 +418,6 @@
__pu_err; \
})
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
- (void) 0; \
-})
-
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
@@ -406,6 +449,13 @@
#define __put_user_asm_byte(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, strb)
+#if __LINUX_ARM_ARCH__ >= 6
+
+#define __put_user_asm_half(x, __pu_addr, err) \
+ __put_user_asm(x, __pu_addr, err, strh)
+
+#else
+
#ifndef __ARMEB__
#define __put_user_asm_half(x, __pu_addr, err) \
({ \
@@ -422,6 +472,8 @@
})
#endif
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
#define __put_user_asm_word(x, __pu_addr, err) \
__put_user_asm(x, __pu_addr, err, str)
@@ -454,6 +506,7 @@
: "r" (x), "i" (-EFAULT) \
: "cc")
+#endif /* !CONFIG_CPU_SPECTRE */
#ifdef CONFIG_MMU
extern unsigned long __must_check
@@ -523,7 +576,7 @@
static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
{
- if (access_ok(VERIFY_WRITE, to, n))
+ if (access_ok(to, n))
n = __clear_user(to, n);
return n;
}