| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/include/asm/uaccess.h |
| 4 | * |
| 5 | * Copyright (C) 2012 ARM Ltd. |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | #ifndef __ASM_UACCESS_H |
| 8 | #define __ASM_UACCESS_H |
| 9 | |
| 10 | #include <asm/alternative.h> |
| 11 | #include <asm/kernel-pgtable.h> |
| 12 | #include <asm/sysreg.h> |
| 13 | |
| 14 | /* |
| 15 | * User space memory access functions |
| 16 | */ |
| 17 | #include <linux/bitops.h> |
| 18 | #include <linux/kasan-checks.h> |
| 19 | #include <linux/string.h> |
| 20 | |
| 21 | #include <asm/cpufeature.h> |
| 22 | #include <asm/ptrace.h> |
| 23 | #include <asm/memory.h> |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 24 | #include <asm/extable.h> |
| 25 | |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 26 | #define get_fs() (current_thread_info()->addr_limit) |
| 27 | |
| 28 | static inline void set_fs(mm_segment_t fs) |
| 29 | { |
| 30 | current_thread_info()->addr_limit = fs; |
| 31 | |
| 32 | /* |
| 33 | * Prevent a mispredicted conditional call to set_fs from forwarding |
| 34 | * the wrong address limit to access_ok under speculation. |
| 35 | */ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 36 | spec_bar(); |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 37 | |
| 38 | /* On user-mode return, check fs is correct */ |
| 39 | set_thread_flag(TIF_FSCHECK); |
| 40 | |
| 41 | /* |
| 42 | * Enable/disable UAO so that copy_to_user() etc can access |
| 43 | * kernel memory with the unprivileged instructions. |
| 44 | */ |
| 45 | if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) |
| 46 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); |
| 47 | else |
| 48 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, |
| 49 | CONFIG_ARM64_UAO)); |
| 50 | } |
| 51 | |
| 52 | #define segment_eq(a, b) ((a) == (b)) |
| 53 | |
| 54 | /* |
| 55 | * Test whether a block of memory is a valid user space address. |
| 56 | * Returns 1 if the range is valid, 0 otherwise. |
| 57 | * |
| 58 | * This is equivalent to the following test: |
| 59 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
| 60 | */ |
| 61 | static inline unsigned long __range_ok(const void __user *addr, unsigned long size) |
| 62 | { |
| 63 | unsigned long ret, limit = current_thread_info()->addr_limit; |
| 64 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 65 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && |
| 66 | test_thread_flag(TIF_TAGGED_ADDR)) |
| 67 | addr = untagged_addr(addr); |
| 68 | |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 69 | __chk_user_ptr(addr); |
| 70 | asm volatile( |
| 71 | // A + B <= C + 1 for all A,B,C, in four easy steps: |
| 72 | // 1: X = A + B; X' = X % 2^64 |
| 73 | " adds %0, %3, %2\n" |
| 74 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 |
| 75 | " csel %1, xzr, %1, hi\n" |
| 76 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' |
| 77 | // to compensate for the carry flag being set in step 4. For |
| 78 | // X > 2^64, X' merely has to remain nonzero, which it does. |
| 79 | " csinv %0, %0, xzr, cc\n" |
| 80 | // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 |
| 81 | // comes from the carry in being clear. Otherwise, we are |
| 82 | // testing X' - C == 0, subject to the previous adjustments. |
| 83 | " sbcs xzr, %0, %1\n" |
| 84 | " cset %0, ls\n" |
| 85 | : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); |
| 86 | |
| 87 | return ret; |
| 88 | } |
| 89 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 90 | #define access_ok(addr, size) __range_ok(addr, size) |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 91 | #define user_addr_max get_fs |
| 92 | |
| 93 | #define _ASM_EXTABLE(from, to) \ |
| 94 | " .pushsection __ex_table, \"a\"\n" \ |
| 95 | " .align 3\n" \ |
| 96 | " .long (" #from " - .), (" #to " - .)\n" \ |
| 97 | " .popsection\n" |
| 98 | |
| 99 | /* |
| 100 | * User access enabling/disabling. |
| 101 | */ |
| 102 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
| 103 | static inline void __uaccess_ttbr0_disable(void) |
| 104 | { |
| 105 | unsigned long flags, ttbr; |
| 106 | |
| 107 | local_irq_save(flags); |
| 108 | ttbr = read_sysreg(ttbr1_el1); |
| 109 | ttbr &= ~TTBR_ASID_MASK; |
| 110 | /* reserved_ttbr0 placed before swapper_pg_dir */ |
| 111 | write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1); |
| 112 | isb(); |
| 113 | /* Set reserved ASID */ |
| 114 | write_sysreg(ttbr, ttbr1_el1); |
| 115 | isb(); |
| 116 | local_irq_restore(flags); |
| 117 | } |
| 118 | |
| 119 | static inline void __uaccess_ttbr0_enable(void) |
| 120 | { |
| 121 | unsigned long flags, ttbr0, ttbr1; |
| 122 | |
| 123 | /* |
| 124 | * Disable interrupts to avoid preemption between reading the 'ttbr0' |
| 125 | * variable and the MSR. A context switch could trigger an ASID |
| 126 | * roll-over and an update of 'ttbr0'. |
| 127 | */ |
| 128 | local_irq_save(flags); |
| 129 | ttbr0 = READ_ONCE(current_thread_info()->ttbr0); |
| 130 | |
| 131 | /* Restore active ASID */ |
| 132 | ttbr1 = read_sysreg(ttbr1_el1); |
| 133 | ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ |
| 134 | ttbr1 |= ttbr0 & TTBR_ASID_MASK; |
| 135 | write_sysreg(ttbr1, ttbr1_el1); |
| 136 | isb(); |
| 137 | |
| 138 | /* Restore user page table */ |
| 139 | write_sysreg(ttbr0, ttbr0_el1); |
| 140 | isb(); |
| 141 | local_irq_restore(flags); |
| 142 | } |
| 143 | |
| 144 | static inline bool uaccess_ttbr0_disable(void) |
| 145 | { |
| 146 | if (!system_uses_ttbr0_pan()) |
| 147 | return false; |
| 148 | __uaccess_ttbr0_disable(); |
| 149 | return true; |
| 150 | } |
| 151 | |
| 152 | static inline bool uaccess_ttbr0_enable(void) |
| 153 | { |
| 154 | if (!system_uses_ttbr0_pan()) |
| 155 | return false; |
| 156 | __uaccess_ttbr0_enable(); |
| 157 | return true; |
| 158 | } |
| 159 | #else |
| 160 | static inline bool uaccess_ttbr0_disable(void) |
| 161 | { |
| 162 | return false; |
| 163 | } |
| 164 | |
| 165 | static inline bool uaccess_ttbr0_enable(void) |
| 166 | { |
| 167 | return false; |
| 168 | } |
| 169 | #endif |
| 170 | |
| 171 | static inline void __uaccess_disable_hw_pan(void) |
| 172 | { |
| 173 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, |
| 174 | CONFIG_ARM64_PAN)); |
| 175 | } |
| 176 | |
| 177 | static inline void __uaccess_enable_hw_pan(void) |
| 178 | { |
| 179 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, |
| 180 | CONFIG_ARM64_PAN)); |
| 181 | } |
| 182 | |
| 183 | #define __uaccess_disable(alt) \ |
| 184 | do { \ |
| 185 | if (!uaccess_ttbr0_disable()) \ |
| 186 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ |
| 187 | CONFIG_ARM64_PAN)); \ |
| 188 | } while (0) |
| 189 | |
| 190 | #define __uaccess_enable(alt) \ |
| 191 | do { \ |
| 192 | if (!uaccess_ttbr0_enable()) \ |
| 193 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ |
| 194 | CONFIG_ARM64_PAN)); \ |
| 195 | } while (0) |
| 196 | |
| 197 | static inline void uaccess_disable(void) |
| 198 | { |
| 199 | __uaccess_disable(ARM64_HAS_PAN); |
| 200 | } |
| 201 | |
| 202 | static inline void uaccess_enable(void) |
| 203 | { |
| 204 | __uaccess_enable(ARM64_HAS_PAN); |
| 205 | } |
| 206 | |
| 207 | /* |
| 208 | * These functions are no-ops when UAO is present. |
| 209 | */ |
| 210 | static inline void uaccess_disable_not_uao(void) |
| 211 | { |
| 212 | __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); |
| 213 | } |
| 214 | |
| 215 | static inline void uaccess_enable_not_uao(void) |
| 216 | { |
| 217 | __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); |
| 218 | } |
| 219 | |
| 220 | /* |
| 221 | * Sanitise a uaccess pointer such that it becomes NULL if above the |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 222 | * current addr_limit. In case the pointer is tagged (has the top byte set), |
| 223 | * untag the pointer before checking. |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 224 | */ |
| 225 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) |
| 226 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) |
| 227 | { |
| 228 | void __user *safe_ptr; |
| 229 | |
| 230 | asm volatile( |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 231 | " bics xzr, %3, %2\n" |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 232 | " csel %0, %1, xzr, eq\n" |
| 233 | : "=&r" (safe_ptr) |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 234 | : "r" (ptr), "r" (current_thread_info()->addr_limit), |
| 235 | "r" (untagged_addr(ptr)) |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 236 | : "cc"); |
| 237 | |
| 238 | csdb(); |
| 239 | return safe_ptr; |
| 240 | } |
| 241 | |
| 242 | /* |
| 243 | * The "__xxx" versions of the user access functions do not verify the address |
| 244 | * space - it must have been done previously with a separate "access_ok()" |
| 245 | * call. |
| 246 | * |
| 247 | * The "__xxx_error" versions set the third argument to -EFAULT if an error |
| 248 | * occurs, and leave it unchanged on success. |
| 249 | */ |
| 250 | #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
| 251 | asm volatile( \ |
| 252 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
| 253 | alt_instr " " reg "1, [%2]\n", feature) \ |
| 254 | "2:\n" \ |
| 255 | " .section .fixup, \"ax\"\n" \ |
| 256 | " .align 2\n" \ |
| 257 | "3: mov %w0, %3\n" \ |
| 258 | " mov %1, #0\n" \ |
| 259 | " b 2b\n" \ |
| 260 | " .previous\n" \ |
| 261 | _ASM_EXTABLE(1b, 3b) \ |
| 262 | : "+r" (err), "=&r" (x) \ |
| 263 | : "r" (addr), "i" (-EFAULT)) |
| 264 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 265 | #define __raw_get_user(x, ptr, err) \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 266 | do { \ |
| 267 | unsigned long __gu_val; \ |
| 268 | __chk_user_ptr(ptr); \ |
| 269 | uaccess_enable_not_uao(); \ |
| 270 | switch (sizeof(*(ptr))) { \ |
| 271 | case 1: \ |
| 272 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ |
| 273 | (err), ARM64_HAS_UAO); \ |
| 274 | break; \ |
| 275 | case 2: \ |
| 276 | __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ |
| 277 | (err), ARM64_HAS_UAO); \ |
| 278 | break; \ |
| 279 | case 4: \ |
| 280 | __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ |
| 281 | (err), ARM64_HAS_UAO); \ |
| 282 | break; \ |
| 283 | case 8: \ |
| 284 | __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ |
| 285 | (err), ARM64_HAS_UAO); \ |
| 286 | break; \ |
| 287 | default: \ |
| 288 | BUILD_BUG(); \ |
| 289 | } \ |
| 290 | uaccess_disable_not_uao(); \ |
| 291 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
| 292 | } while (0) |
| 293 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 294 | #define __get_user_error(x, ptr, err) \ |
| 295 | do { \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 296 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
| 297 | might_fault(); \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 298 | if (access_ok(__p, sizeof(*__p))) { \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 299 | __p = uaccess_mask_ptr(__p); \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 300 | __raw_get_user((x), __p, (err)); \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 301 | } else { \ |
| 302 | (x) = 0; (err) = -EFAULT; \ |
| 303 | } \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 304 | } while (0) |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 305 | |
| 306 | #define __get_user(x, ptr) \ |
| 307 | ({ \ |
| 308 | int __gu_err = 0; \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 309 | __get_user_error((x), (ptr), __gu_err); \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 310 | __gu_err; \ |
| 311 | }) |
| 312 | |
| 313 | #define get_user __get_user |
| 314 | |
| 315 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
| 316 | asm volatile( \ |
| 317 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
| 318 | alt_instr " " reg "1, [%2]\n", feature) \ |
| 319 | "2:\n" \ |
| 320 | " .section .fixup,\"ax\"\n" \ |
| 321 | " .align 2\n" \ |
| 322 | "3: mov %w0, %3\n" \ |
| 323 | " b 2b\n" \ |
| 324 | " .previous\n" \ |
| 325 | _ASM_EXTABLE(1b, 3b) \ |
| 326 | : "+r" (err) \ |
| 327 | : "r" (x), "r" (addr), "i" (-EFAULT)) |
| 328 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 329 | #define __raw_put_user(x, ptr, err) \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 330 | do { \ |
| 331 | __typeof__(*(ptr)) __pu_val = (x); \ |
| 332 | __chk_user_ptr(ptr); \ |
| 333 | uaccess_enable_not_uao(); \ |
| 334 | switch (sizeof(*(ptr))) { \ |
| 335 | case 1: \ |
| 336 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ |
| 337 | (err), ARM64_HAS_UAO); \ |
| 338 | break; \ |
| 339 | case 2: \ |
| 340 | __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ |
| 341 | (err), ARM64_HAS_UAO); \ |
| 342 | break; \ |
| 343 | case 4: \ |
| 344 | __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ |
| 345 | (err), ARM64_HAS_UAO); \ |
| 346 | break; \ |
| 347 | case 8: \ |
| 348 | __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ |
| 349 | (err), ARM64_HAS_UAO); \ |
| 350 | break; \ |
| 351 | default: \ |
| 352 | BUILD_BUG(); \ |
| 353 | } \ |
| 354 | uaccess_disable_not_uao(); \ |
| 355 | } while (0) |
| 356 | |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 357 | #define __put_user_error(x, ptr, err) \ |
| 358 | do { \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 359 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
| 360 | might_fault(); \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 361 | if (access_ok(__p, sizeof(*__p))) { \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 362 | __p = uaccess_mask_ptr(__p); \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 363 | __raw_put_user((x), __p, (err)); \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 364 | } else { \ |
| 365 | (err) = -EFAULT; \ |
| 366 | } \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 367 | } while (0) |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 368 | |
| 369 | #define __put_user(x, ptr) \ |
| 370 | ({ \ |
| 371 | int __pu_err = 0; \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 372 | __put_user_error((x), (ptr), __pu_err); \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 373 | __pu_err; \ |
| 374 | }) |
| 375 | |
| 376 | #define put_user __put_user |
| 377 | |
| 378 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
| 379 | #define raw_copy_from_user(to, from, n) \ |
| 380 | ({ \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 381 | unsigned long __acfu_ret; \ |
| 382 | uaccess_enable_not_uao(); \ |
| 383 | __acfu_ret = __arch_copy_from_user((to), \ |
| 384 | __uaccess_mask_ptr(from), (n)); \ |
| 385 | uaccess_disable_not_uao(); \ |
| 386 | __acfu_ret; \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 387 | }) |
| 388 | |
| 389 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
| 390 | #define raw_copy_to_user(to, from, n) \ |
| 391 | ({ \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 392 | unsigned long __actu_ret; \ |
| 393 | uaccess_enable_not_uao(); \ |
| 394 | __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ |
| 395 | (from), (n)); \ |
| 396 | uaccess_disable_not_uao(); \ |
| 397 | __actu_ret; \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 398 | }) |
| 399 | |
| 400 | extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); |
| 401 | #define raw_copy_in_user(to, from, n) \ |
| 402 | ({ \ |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 403 | unsigned long __aciu_ret; \ |
| 404 | uaccess_enable_not_uao(); \ |
| 405 | __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ |
| 406 | __uaccess_mask_ptr(from), (n)); \ |
| 407 | uaccess_disable_not_uao(); \ |
| 408 | __aciu_ret; \ |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 409 | }) |
| 410 | |
| 411 | #define INLINE_COPY_TO_USER |
| 412 | #define INLINE_COPY_FROM_USER |
| 413 | |
| 414 | extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); |
| 415 | static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) |
| 416 | { |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 417 | if (access_ok(to, n)) { |
| 418 | uaccess_enable_not_uao(); |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 419 | n = __arch_clear_user(__uaccess_mask_ptr(to), n); |
| David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 420 | uaccess_disable_not_uao(); |
| 421 | } |
| Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 422 | return n; |
| 423 | } |
| 424 | #define clear_user __clear_user |
| 425 | |
| 426 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
| 427 | |
| 428 | extern __must_check long strnlen_user(const char __user *str, long n); |
| 429 | |
| 430 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 431 | struct page; |
| 432 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); |
| 433 | extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); |
| 434 | |
| 435 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
| 436 | { |
| 437 | kasan_check_write(dst, size); |
| 438 | return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); |
| 439 | } |
| 440 | #endif |
| 441 | |
| 442 | #endif /* __ASM_UACCESS_H */ |