David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Based on arch/arm/include/asm/uaccess.h |
| 4 | * |
| 5 | * Copyright (C) 2012 ARM Ltd. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | */ |
| 7 | #ifndef __ASM_UACCESS_H |
| 8 | #define __ASM_UACCESS_H |
| 9 | |
| 10 | #include <asm/alternative.h> |
| 11 | #include <asm/kernel-pgtable.h> |
| 12 | #include <asm/sysreg.h> |
| 13 | |
| 14 | /* |
| 15 | * User space memory access functions |
| 16 | */ |
| 17 | #include <linux/bitops.h> |
| 18 | #include <linux/kasan-checks.h> |
| 19 | #include <linux/string.h> |
| 20 | |
| 21 | #include <asm/cpufeature.h> |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 22 | #include <asm/mmu.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 23 | #include <asm/ptrace.h> |
| 24 | #include <asm/memory.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 25 | #include <asm/extable.h> |
| 26 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 27 | #define get_fs() (current_thread_info()->addr_limit) |
| 28 | |
| 29 | static inline void set_fs(mm_segment_t fs) |
| 30 | { |
| 31 | current_thread_info()->addr_limit = fs; |
| 32 | |
| 33 | /* |
| 34 | * Prevent a mispredicted conditional call to set_fs from forwarding |
| 35 | * the wrong address limit to access_ok under speculation. |
| 36 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 37 | spec_bar(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | |
| 39 | /* On user-mode return, check fs is correct */ |
| 40 | set_thread_flag(TIF_FSCHECK); |
| 41 | |
| 42 | /* |
| 43 | * Enable/disable UAO so that copy_to_user() etc can access |
| 44 | * kernel memory with the unprivileged instructions. |
| 45 | */ |
| 46 | if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) |
| 47 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); |
| 48 | else |
| 49 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, |
| 50 | CONFIG_ARM64_UAO)); |
| 51 | } |
| 52 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 53 | #define uaccess_kernel() (get_fs() == KERNEL_DS) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 54 | |
| 55 | /* |
| 56 | * Test whether a block of memory is a valid user space address. |
| 57 | * Returns 1 if the range is valid, 0 otherwise. |
| 58 | * |
| 59 | * This is equivalent to the following test: |
| 60 | * (u65)addr + (u65)size <= (u65)current->addr_limit + 1 |
| 61 | */ |
| 62 | static inline unsigned long __range_ok(const void __user *addr, unsigned long size) |
| 63 | { |
| 64 | unsigned long ret, limit = current_thread_info()->addr_limit; |
| 65 | |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 66 | /* |
| 67 | * Asynchronous I/O running in a kernel thread does not have the |
| 68 | * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag |
| 69 | * the user address before checking. |
| 70 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 71 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 72 | (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR))) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 73 | addr = untagged_addr(addr); |
| 74 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 75 | __chk_user_ptr(addr); |
| 76 | asm volatile( |
| 77 | // A + B <= C + 1 for all A,B,C, in four easy steps: |
| 78 | // 1: X = A + B; X' = X % 2^64 |
| 79 | " adds %0, %3, %2\n" |
| 80 | // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4 |
| 81 | " csel %1, xzr, %1, hi\n" |
| 82 | // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X' |
| 83 | // to compensate for the carry flag being set in step 4. For |
| 84 | // X > 2^64, X' merely has to remain nonzero, which it does. |
| 85 | " csinv %0, %0, xzr, cc\n" |
| 86 | // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1 |
| 87 | // comes from the carry in being clear. Otherwise, we are |
| 88 | // testing X' - C == 0, subject to the previous adjustments. |
| 89 | " sbcs xzr, %0, %1\n" |
| 90 | " cset %0, ls\n" |
| 91 | : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc"); |
| 92 | |
| 93 | return ret; |
| 94 | } |
| 95 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 96 | #define access_ok(addr, size) __range_ok(addr, size) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 97 | #define user_addr_max get_fs |
| 98 | |
| 99 | #define _ASM_EXTABLE(from, to) \ |
| 100 | " .pushsection __ex_table, \"a\"\n" \ |
| 101 | " .align 3\n" \ |
| 102 | " .long (" #from " - .), (" #to " - .)\n" \ |
| 103 | " .popsection\n" |
| 104 | |
| 105 | /* |
| 106 | * User access enabling/disabling. |
| 107 | */ |
| 108 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
| 109 | static inline void __uaccess_ttbr0_disable(void) |
| 110 | { |
| 111 | unsigned long flags, ttbr; |
| 112 | |
| 113 | local_irq_save(flags); |
| 114 | ttbr = read_sysreg(ttbr1_el1); |
| 115 | ttbr &= ~TTBR_ASID_MASK; |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 116 | /* reserved_pg_dir placed before swapper_pg_dir */ |
| 117 | write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 118 | isb(); |
| 119 | /* Set reserved ASID */ |
| 120 | write_sysreg(ttbr, ttbr1_el1); |
| 121 | isb(); |
| 122 | local_irq_restore(flags); |
| 123 | } |
| 124 | |
| 125 | static inline void __uaccess_ttbr0_enable(void) |
| 126 | { |
| 127 | unsigned long flags, ttbr0, ttbr1; |
| 128 | |
| 129 | /* |
| 130 | * Disable interrupts to avoid preemption between reading the 'ttbr0' |
| 131 | * variable and the MSR. A context switch could trigger an ASID |
| 132 | * roll-over and an update of 'ttbr0'. |
| 133 | */ |
| 134 | local_irq_save(flags); |
| 135 | ttbr0 = READ_ONCE(current_thread_info()->ttbr0); |
| 136 | |
| 137 | /* Restore active ASID */ |
| 138 | ttbr1 = read_sysreg(ttbr1_el1); |
| 139 | ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */ |
| 140 | ttbr1 |= ttbr0 & TTBR_ASID_MASK; |
| 141 | write_sysreg(ttbr1, ttbr1_el1); |
| 142 | isb(); |
| 143 | |
| 144 | /* Restore user page table */ |
| 145 | write_sysreg(ttbr0, ttbr0_el1); |
| 146 | isb(); |
| 147 | local_irq_restore(flags); |
| 148 | } |
| 149 | |
| 150 | static inline bool uaccess_ttbr0_disable(void) |
| 151 | { |
| 152 | if (!system_uses_ttbr0_pan()) |
| 153 | return false; |
| 154 | __uaccess_ttbr0_disable(); |
| 155 | return true; |
| 156 | } |
| 157 | |
| 158 | static inline bool uaccess_ttbr0_enable(void) |
| 159 | { |
| 160 | if (!system_uses_ttbr0_pan()) |
| 161 | return false; |
| 162 | __uaccess_ttbr0_enable(); |
| 163 | return true; |
| 164 | } |
| 165 | #else |
| 166 | static inline bool uaccess_ttbr0_disable(void) |
| 167 | { |
| 168 | return false; |
| 169 | } |
| 170 | |
| 171 | static inline bool uaccess_ttbr0_enable(void) |
| 172 | { |
| 173 | return false; |
| 174 | } |
| 175 | #endif |
| 176 | |
| 177 | static inline void __uaccess_disable_hw_pan(void) |
| 178 | { |
| 179 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, |
| 180 | CONFIG_ARM64_PAN)); |
| 181 | } |
| 182 | |
| 183 | static inline void __uaccess_enable_hw_pan(void) |
| 184 | { |
| 185 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, |
| 186 | CONFIG_ARM64_PAN)); |
| 187 | } |
| 188 | |
| 189 | #define __uaccess_disable(alt) \ |
| 190 | do { \ |
| 191 | if (!uaccess_ttbr0_disable()) \ |
| 192 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ |
| 193 | CONFIG_ARM64_PAN)); \ |
| 194 | } while (0) |
| 195 | |
| 196 | #define __uaccess_enable(alt) \ |
| 197 | do { \ |
| 198 | if (!uaccess_ttbr0_enable()) \ |
| 199 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ |
| 200 | CONFIG_ARM64_PAN)); \ |
| 201 | } while (0) |
| 202 | |
| 203 | static inline void uaccess_disable(void) |
| 204 | { |
| 205 | __uaccess_disable(ARM64_HAS_PAN); |
| 206 | } |
| 207 | |
| 208 | static inline void uaccess_enable(void) |
| 209 | { |
| 210 | __uaccess_enable(ARM64_HAS_PAN); |
| 211 | } |
| 212 | |
| 213 | /* |
| 214 | * These functions are no-ops when UAO is present. |
| 215 | */ |
| 216 | static inline void uaccess_disable_not_uao(void) |
| 217 | { |
| 218 | __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); |
| 219 | } |
| 220 | |
| 221 | static inline void uaccess_enable_not_uao(void) |
| 222 | { |
| 223 | __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); |
| 224 | } |
| 225 | |
| 226 | /* |
| 227 | * Sanitise a uaccess pointer such that it becomes NULL if above the |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 228 | * current addr_limit. In case the pointer is tagged (has the top byte set), |
| 229 | * untag the pointer before checking. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 230 | */ |
| 231 | #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) |
| 232 | static inline void __user *__uaccess_mask_ptr(const void __user *ptr) |
| 233 | { |
| 234 | void __user *safe_ptr; |
| 235 | |
| 236 | asm volatile( |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 237 | " bics xzr, %3, %2\n" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 238 | " csel %0, %1, xzr, eq\n" |
| 239 | : "=&r" (safe_ptr) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 240 | : "r" (ptr), "r" (current_thread_info()->addr_limit), |
| 241 | "r" (untagged_addr(ptr)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 242 | : "cc"); |
| 243 | |
| 244 | csdb(); |
| 245 | return safe_ptr; |
| 246 | } |
| 247 | |
| 248 | /* |
| 249 | * The "__xxx" versions of the user access functions do not verify the address |
| 250 | * space - it must have been done previously with a separate "access_ok()" |
| 251 | * call. |
| 252 | * |
| 253 | * The "__xxx_error" versions set the third argument to -EFAULT if an error |
| 254 | * occurs, and leave it unchanged on success. |
| 255 | */ |
| 256 | #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
| 257 | asm volatile( \ |
| 258 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
| 259 | alt_instr " " reg "1, [%2]\n", feature) \ |
| 260 | "2:\n" \ |
| 261 | " .section .fixup, \"ax\"\n" \ |
| 262 | " .align 2\n" \ |
| 263 | "3: mov %w0, %3\n" \ |
| 264 | " mov %1, #0\n" \ |
| 265 | " b 2b\n" \ |
| 266 | " .previous\n" \ |
| 267 | _ASM_EXTABLE(1b, 3b) \ |
| 268 | : "+r" (err), "=&r" (x) \ |
| 269 | : "r" (addr), "i" (-EFAULT)) |
| 270 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 271 | #define __raw_get_user(x, ptr, err) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 272 | do { \ |
| 273 | unsigned long __gu_val; \ |
| 274 | __chk_user_ptr(ptr); \ |
| 275 | uaccess_enable_not_uao(); \ |
| 276 | switch (sizeof(*(ptr))) { \ |
| 277 | case 1: \ |
| 278 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ |
| 279 | (err), ARM64_HAS_UAO); \ |
| 280 | break; \ |
| 281 | case 2: \ |
| 282 | __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ |
| 283 | (err), ARM64_HAS_UAO); \ |
| 284 | break; \ |
| 285 | case 4: \ |
| 286 | __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ |
| 287 | (err), ARM64_HAS_UAO); \ |
| 288 | break; \ |
| 289 | case 8: \ |
| 290 | __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ |
| 291 | (err), ARM64_HAS_UAO); \ |
| 292 | break; \ |
| 293 | default: \ |
| 294 | BUILD_BUG(); \ |
| 295 | } \ |
| 296 | uaccess_disable_not_uao(); \ |
| 297 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
| 298 | } while (0) |
| 299 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 300 | #define __get_user_error(x, ptr, err) \ |
| 301 | do { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 302 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
| 303 | might_fault(); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 304 | if (access_ok(__p, sizeof(*__p))) { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 305 | __p = uaccess_mask_ptr(__p); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 306 | __raw_get_user((x), __p, (err)); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 307 | } else { \ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame^] | 308 | (x) = (__force __typeof__(x))0; (err) = -EFAULT; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 309 | } \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 310 | } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 311 | |
| 312 | #define __get_user(x, ptr) \ |
| 313 | ({ \ |
| 314 | int __gu_err = 0; \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 315 | __get_user_error((x), (ptr), __gu_err); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 316 | __gu_err; \ |
| 317 | }) |
| 318 | |
| 319 | #define get_user __get_user |
| 320 | |
| 321 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
| 322 | asm volatile( \ |
| 323 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
| 324 | alt_instr " " reg "1, [%2]\n", feature) \ |
| 325 | "2:\n" \ |
| 326 | " .section .fixup,\"ax\"\n" \ |
| 327 | " .align 2\n" \ |
| 328 | "3: mov %w0, %3\n" \ |
| 329 | " b 2b\n" \ |
| 330 | " .previous\n" \ |
| 331 | _ASM_EXTABLE(1b, 3b) \ |
| 332 | : "+r" (err) \ |
| 333 | : "r" (x), "r" (addr), "i" (-EFAULT)) |
| 334 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 335 | #define __raw_put_user(x, ptr, err) \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 336 | do { \ |
| 337 | __typeof__(*(ptr)) __pu_val = (x); \ |
| 338 | __chk_user_ptr(ptr); \ |
| 339 | uaccess_enable_not_uao(); \ |
| 340 | switch (sizeof(*(ptr))) { \ |
| 341 | case 1: \ |
| 342 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ |
| 343 | (err), ARM64_HAS_UAO); \ |
| 344 | break; \ |
| 345 | case 2: \ |
| 346 | __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ |
| 347 | (err), ARM64_HAS_UAO); \ |
| 348 | break; \ |
| 349 | case 4: \ |
| 350 | __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ |
| 351 | (err), ARM64_HAS_UAO); \ |
| 352 | break; \ |
| 353 | case 8: \ |
| 354 | __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ |
| 355 | (err), ARM64_HAS_UAO); \ |
| 356 | break; \ |
| 357 | default: \ |
| 358 | BUILD_BUG(); \ |
| 359 | } \ |
| 360 | uaccess_disable_not_uao(); \ |
| 361 | } while (0) |
| 362 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 363 | #define __put_user_error(x, ptr, err) \ |
| 364 | do { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 365 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
| 366 | might_fault(); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 367 | if (access_ok(__p, sizeof(*__p))) { \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 368 | __p = uaccess_mask_ptr(__p); \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 369 | __raw_put_user((x), __p, (err)); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 370 | } else { \ |
| 371 | (err) = -EFAULT; \ |
| 372 | } \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 373 | } while (0) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 374 | |
| 375 | #define __put_user(x, ptr) \ |
| 376 | ({ \ |
| 377 | int __pu_err = 0; \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 378 | __put_user_error((x), (ptr), __pu_err); \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 379 | __pu_err; \ |
| 380 | }) |
| 381 | |
| 382 | #define put_user __put_user |
| 383 | |
| 384 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
| 385 | #define raw_copy_from_user(to, from, n) \ |
| 386 | ({ \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 387 | unsigned long __acfu_ret; \ |
| 388 | uaccess_enable_not_uao(); \ |
| 389 | __acfu_ret = __arch_copy_from_user((to), \ |
| 390 | __uaccess_mask_ptr(from), (n)); \ |
| 391 | uaccess_disable_not_uao(); \ |
| 392 | __acfu_ret; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 393 | }) |
| 394 | |
| 395 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
| 396 | #define raw_copy_to_user(to, from, n) \ |
| 397 | ({ \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 398 | unsigned long __actu_ret; \ |
| 399 | uaccess_enable_not_uao(); \ |
| 400 | __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \ |
| 401 | (from), (n)); \ |
| 402 | uaccess_disable_not_uao(); \ |
| 403 | __actu_ret; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 404 | }) |
| 405 | |
| 406 | extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n); |
| 407 | #define raw_copy_in_user(to, from, n) \ |
| 408 | ({ \ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 409 | unsigned long __aciu_ret; \ |
| 410 | uaccess_enable_not_uao(); \ |
| 411 | __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \ |
| 412 | __uaccess_mask_ptr(from), (n)); \ |
| 413 | uaccess_disable_not_uao(); \ |
| 414 | __aciu_ret; \ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 415 | }) |
| 416 | |
| 417 | #define INLINE_COPY_TO_USER |
| 418 | #define INLINE_COPY_FROM_USER |
| 419 | |
| 420 | extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n); |
| 421 | static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n) |
| 422 | { |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 423 | if (access_ok(to, n)) { |
| 424 | uaccess_enable_not_uao(); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 425 | n = __arch_clear_user(__uaccess_mask_ptr(to), n); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 426 | uaccess_disable_not_uao(); |
| 427 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 428 | return n; |
| 429 | } |
| 430 | #define clear_user __clear_user |
| 431 | |
| 432 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
| 433 | |
| 434 | extern __must_check long strnlen_user(const char __user *str, long n); |
| 435 | |
| 436 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
| 437 | struct page; |
| 438 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); |
| 439 | extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); |
| 440 | |
| 441 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) |
| 442 | { |
| 443 | kasan_check_write(dst, size); |
| 444 | return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size); |
| 445 | } |
| 446 | #endif |
| 447 | |
| 448 | #endif /* __ASM_UACCESS_H */ |