Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __LINUX_UACCESS_H__ |
| 3 | #define __LINUX_UACCESS_H__ |
| 4 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 5 | #include <linux/fault-inject-usercopy.h> |
| 6 | #include <linux/instrumented.h> |
| 7 | #include <linux/minmax.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 8 | #include <linux/sched.h> |
| 9 | #include <linux/thread_info.h> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 10 | |
| 11 | #include <asm/uaccess.h> |
| 12 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 13 | #ifdef CONFIG_SET_FS |
| 14 | /* |
| 15 | * Force the uaccess routines to be wired up for actual userspace access, |
| 16 | * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone |
| 17 | * using force_uaccess_end below. |
| 18 | */ |
| 19 | static inline mm_segment_t force_uaccess_begin(void) |
| 20 | { |
| 21 | mm_segment_t fs = get_fs(); |
| 22 | |
| 23 | set_fs(USER_DS); |
| 24 | return fs; |
| 25 | } |
| 26 | |
| 27 | static inline void force_uaccess_end(mm_segment_t oldfs) |
| 28 | { |
| 29 | set_fs(oldfs); |
| 30 | } |
| 31 | #else /* CONFIG_SET_FS */ |
| 32 | typedef struct { |
| 33 | /* empty dummy */ |
| 34 | } mm_segment_t; |
| 35 | |
| 36 | #ifndef TASK_SIZE_MAX |
| 37 | #define TASK_SIZE_MAX TASK_SIZE |
| 38 | #endif |
| 39 | |
| 40 | #define uaccess_kernel() (false) |
| 41 | #define user_addr_max() (TASK_SIZE_MAX) |
| 42 | |
| 43 | static inline mm_segment_t force_uaccess_begin(void) |
| 44 | { |
| 45 | return (mm_segment_t) { }; |
| 46 | } |
| 47 | |
| 48 | static inline void force_uaccess_end(mm_segment_t oldfs) |
| 49 | { |
| 50 | } |
| 51 | #endif /* CONFIG_SET_FS */ |
| 52 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 53 | /* |
| 54 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) |
| 55 | * and get rid of their private instances of copy_{to,from}_user() and |
| 56 | * __copy_{to,from}_user{,_inatomic}(). |
| 57 | * |
| 58 | * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and |
| 59 | * return the amount left to copy. They should assume that access_ok() has |
| 60 | * already been checked (and succeeded); they should *not* zero-pad anything. |
| 61 | * No KASAN or object size checks either - those belong here. |
| 62 | * |
| 63 | * Both of these functions should attempt to copy size bytes starting at from |
| 64 | * into the area starting at to. They must not fetch or store anything |
| 65 | * outside of those areas. Return value must be between 0 (everything |
| 66 | * copied successfully) and size (nothing copied). |
| 67 | * |
| 68 | * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting |
| 69 | * at to must become equal to the bytes fetched from the corresponding area |
| 70 | * starting at from. All data past to + size - N must be left unmodified. |
| 71 | * |
| 72 | * If copying succeeds, the return value must be 0. If some data cannot be |
| 73 | * fetched, it is permitted to copy less than had been fetched; the only |
| 74 | * hard requirement is that not storing anything at all (i.e. returning size) |
| 75 | * should happen only when nothing could be copied. In other words, you don't |
| 76 | * have to squeeze as much as possible - it is allowed, but not necessary. |
| 77 | * |
| 78 | * For raw_copy_from_user() to always points to kernel memory and no faults |
| 79 | * on store should happen. Interpretation of from is affected by set_fs(). |
| 80 | * For raw_copy_to_user() it's the other way round. |
| 81 | * |
| 82 | * Both can be inlined - it's up to architectures whether it wants to bother |
| 83 | * with that. They should not be used directly; they are used to implement |
| 84 | * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) |
| 85 | * that are used instead. Out of those, __... ones are inlined. Plain |
| 86 | * copy_{to,from}_user() might or might not be inlined. If you want them |
| 87 | * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. |
| 88 | * |
| 89 | * NOTE: only copy_from_user() zero-pads the destination in case of short copy. |
| 90 | * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything |
| 91 | * at all; their callers absolutely must check the return value. |
| 92 | * |
| 93 | * Biarch ones should also provide raw_copy_in_user() - similar to the above, |
| 94 | * but both source and destination are __user pointers (affected by set_fs() |
| 95 | * as usual) and both source and destination can trigger faults. |
| 96 | */ |
| 97 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 98 | static __always_inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 99 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
| 100 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 101 | instrument_copy_from_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 102 | check_object_size(to, n, false); |
| 103 | return raw_copy_from_user(to, from, n); |
| 104 | } |
| 105 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 106 | static __always_inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 107 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
| 108 | { |
| 109 | might_fault(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 110 | if (should_fail_usercopy()) |
| 111 | return n; |
| 112 | instrument_copy_from_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 113 | check_object_size(to, n, false); |
| 114 | return raw_copy_from_user(to, from, n); |
| 115 | } |
| 116 | |
| 117 | /** |
| 118 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. |
| 119 | * @to: Destination address, in user space. |
| 120 | * @from: Source address, in kernel space. |
| 121 | * @n: Number of bytes to copy. |
| 122 | * |
| 123 | * Context: User context only. |
| 124 | * |
| 125 | * Copy data from kernel space to user space. Caller must check |
| 126 | * the specified block with access_ok() before calling this function. |
| 127 | * The caller should also make sure he pins the user space address |
| 128 | * so that we don't result in page fault and sleep. |
| 129 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 130 | static __always_inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 131 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
| 132 | { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 133 | if (should_fail_usercopy()) |
| 134 | return n; |
| 135 | instrument_copy_to_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 136 | check_object_size(from, n, true); |
| 137 | return raw_copy_to_user(to, from, n); |
| 138 | } |
| 139 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 140 | static __always_inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 141 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
| 142 | { |
| 143 | might_fault(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 144 | if (should_fail_usercopy()) |
| 145 | return n; |
| 146 | instrument_copy_to_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 147 | check_object_size(from, n, true); |
| 148 | return raw_copy_to_user(to, from, n); |
| 149 | } |
| 150 | |
| 151 | #ifdef INLINE_COPY_FROM_USER |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 152 | static inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 153 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
| 154 | { |
| 155 | unsigned long res = n; |
| 156 | might_fault(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 157 | if (!should_fail_usercopy() && likely(access_ok(from, n))) { |
| 158 | instrument_copy_from_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 159 | res = raw_copy_from_user(to, from, n); |
| 160 | } |
| 161 | if (unlikely(res)) |
| 162 | memset(to + (n - res), 0, res); |
| 163 | return res; |
| 164 | } |
| 165 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 166 | extern __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 167 | _copy_from_user(void *, const void __user *, unsigned long); |
| 168 | #endif |
| 169 | |
| 170 | #ifdef INLINE_COPY_TO_USER |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 171 | static inline __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 172 | _copy_to_user(void __user *to, const void *from, unsigned long n) |
| 173 | { |
| 174 | might_fault(); |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 175 | if (should_fail_usercopy()) |
| 176 | return n; |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 177 | if (access_ok(to, n)) { |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 178 | instrument_copy_to_user(to, from, n); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 179 | n = raw_copy_to_user(to, from, n); |
| 180 | } |
| 181 | return n; |
| 182 | } |
| 183 | #else |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 184 | extern __must_check unsigned long |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 185 | _copy_to_user(void __user *, const void *, unsigned long); |
| 186 | #endif |
| 187 | |
| 188 | static __always_inline unsigned long __must_check |
| 189 | copy_from_user(void *to, const void __user *from, unsigned long n) |
| 190 | { |
| 191 | if (likely(check_copy_size(to, n, false))) |
| 192 | n = _copy_from_user(to, from, n); |
| 193 | return n; |
| 194 | } |
| 195 | |
| 196 | static __always_inline unsigned long __must_check |
| 197 | copy_to_user(void __user *to, const void *from, unsigned long n) |
| 198 | { |
| 199 | if (likely(check_copy_size(from, n, true))) |
| 200 | n = _copy_to_user(to, from, n); |
| 201 | return n; |
| 202 | } |
| 203 | #ifdef CONFIG_COMPAT |
| 204 | static __always_inline unsigned long __must_check |
| 205 | copy_in_user(void __user *to, const void __user *from, unsigned long n) |
| 206 | { |
| 207 | might_fault(); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 208 | if (access_ok(to, n) && access_ok(from, n)) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 209 | n = raw_copy_in_user(to, from, n); |
| 210 | return n; |
| 211 | } |
| 212 | #endif |
| 213 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 214 | #ifndef copy_mc_to_kernel |
| 215 | /* |
| 216 | * Without arch opt-in this generic copy_mc_to_kernel() will not handle |
| 217 | * #MC (or arch equivalent) during source read. |
| 218 | */ |
| 219 | static inline unsigned long __must_check |
| 220 | copy_mc_to_kernel(void *dst, const void *src, size_t cnt) |
| 221 | { |
| 222 | memcpy(dst, src, cnt); |
| 223 | return 0; |
| 224 | } |
| 225 | #endif |
| 226 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 227 | static __always_inline void pagefault_disabled_inc(void) |
| 228 | { |
| 229 | current->pagefault_disabled++; |
| 230 | } |
| 231 | |
| 232 | static __always_inline void pagefault_disabled_dec(void) |
| 233 | { |
| 234 | current->pagefault_disabled--; |
| 235 | } |
| 236 | |
| 237 | /* |
| 238 | * These routines enable/disable the pagefault handler. If disabled, it will |
| 239 | * not take any locks and go straight to the fixup table. |
| 240 | * |
| 241 | * User access methods will not sleep when called from a pagefault_disabled() |
| 242 | * environment. |
| 243 | */ |
| 244 | static inline void pagefault_disable(void) |
| 245 | { |
| 246 | pagefault_disabled_inc(); |
| 247 | /* |
| 248 | * make sure to have issued the store before a pagefault |
| 249 | * can hit. |
| 250 | */ |
| 251 | barrier(); |
| 252 | } |
| 253 | |
| 254 | static inline void pagefault_enable(void) |
| 255 | { |
| 256 | /* |
| 257 | * make sure to issue those last loads/stores before enabling |
| 258 | * the pagefault handler again. |
| 259 | */ |
| 260 | barrier(); |
| 261 | pagefault_disabled_dec(); |
| 262 | } |
| 263 | |
| 264 | /* |
| 265 | * Is the pagefault handler disabled? If so, user access methods will not sleep. |
| 266 | */ |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 267 | static inline bool pagefault_disabled(void) |
| 268 | { |
| 269 | return current->pagefault_disabled != 0; |
| 270 | } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 271 | |
| 272 | /* |
| 273 | * The pagefault handler is in general disabled by pagefault_disable() or |
| 274 | * when in irq context (via in_atomic()). |
| 275 | * |
| 276 | * This function should only be used by the fault handlers. Other users should |
| 277 | * stick to pagefault_disabled(). |
| 278 | * Please NEVER use preempt_disable() to disable the fault handler. With |
| 279 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. |
| 280 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. |
| 281 | */ |
| 282 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) |
| 283 | |
| 284 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
| 285 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 286 | static inline __must_check unsigned long |
| 287 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, |
| 288 | unsigned long n) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 289 | { |
| 290 | return __copy_from_user_inatomic(to, from, n); |
| 291 | } |
| 292 | |
| 293 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
| 294 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 295 | extern __must_check int check_zeroed_user(const void __user *from, size_t size); |
| 296 | |
| 297 | /** |
| 298 | * copy_struct_from_user: copy a struct from userspace |
| 299 | * @dst: Destination address, in kernel space. This buffer must be @ksize |
| 300 | * bytes long. |
| 301 | * @ksize: Size of @dst struct. |
| 302 | * @src: Source address, in userspace. |
| 303 | * @usize: (Alleged) size of @src struct. |
| 304 | * |
| 305 | * Copies a struct from userspace to kernel space, in a way that guarantees |
| 306 | * backwards-compatibility for struct syscall arguments (as long as future |
| 307 | * struct extensions are made such that all new fields are *appended* to the |
| 308 | * old struct, and zeroed-out new fields have the same meaning as the old |
| 309 | * struct). |
| 310 | * |
| 311 | * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. |
| 312 | * The recommended usage is something like the following: |
| 313 | * |
| 314 | * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) |
| 315 | * { |
| 316 | * int err; |
| 317 | * struct foo karg = {}; |
| 318 | * |
| 319 | * if (usize > PAGE_SIZE) |
| 320 | * return -E2BIG; |
| 321 | * if (usize < FOO_SIZE_VER0) |
| 322 | * return -EINVAL; |
| 323 | * |
| 324 | * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); |
| 325 | * if (err) |
| 326 | * return err; |
| 327 | * |
| 328 | * // ... |
| 329 | * } |
| 330 | * |
| 331 | * There are three cases to consider: |
| 332 | * * If @usize == @ksize, then it's copied verbatim. |
| 333 | * * If @usize < @ksize, then the userspace has passed an old struct to a |
| 334 | * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) |
| 335 | * are to be zero-filled. |
| 336 | * * If @usize > @ksize, then the userspace has passed a new struct to an |
| 337 | * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) |
| 338 | * are checked to ensure they are zeroed, otherwise -E2BIG is returned. |
| 339 | * |
| 340 | * Returns (in all cases, some data may have been copied): |
| 341 | * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. |
| 342 | * * -EFAULT: access to userspace failed. |
| 343 | */ |
| 344 | static __always_inline __must_check int |
| 345 | copy_struct_from_user(void *dst, size_t ksize, const void __user *src, |
| 346 | size_t usize) |
| 347 | { |
| 348 | size_t size = min(ksize, usize); |
| 349 | size_t rest = max(ksize, usize) - size; |
| 350 | |
| 351 | /* Deal with trailing bytes. */ |
| 352 | if (usize < ksize) { |
| 353 | memset(dst + size, 0, rest); |
| 354 | } else if (usize > ksize) { |
| 355 | int ret = check_zeroed_user(src + size, rest); |
| 356 | if (ret <= 0) |
| 357 | return ret ?: -E2BIG; |
| 358 | } |
| 359 | /* Copy the interoperable parts of the struct. */ |
| 360 | if (copy_from_user(dst, src, size)) |
| 361 | return -EFAULT; |
| 362 | return 0; |
| 363 | } |
| 364 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 365 | bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 366 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 367 | long copy_from_kernel_nofault(void *dst, const void *src, size_t size); |
| 368 | long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 369 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 370 | long copy_from_user_nofault(void *dst, const void __user *src, size_t size); |
| 371 | long notrace copy_to_user_nofault(void __user *dst, const void *src, |
| 372 | size_t size); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 373 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 374 | long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, |
| 375 | long count); |
Olivier Deprez | 0e64123 | 2021-09-23 10:07:05 +0200 | [diff] [blame] | 376 | |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 377 | long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, |
| 378 | long count); |
| 379 | long strnlen_user_nofault(const void __user *unsafe_addr, long count); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 380 | |
| 381 | /** |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 382 | * get_kernel_nofault(): safely attempt to read from a location |
| 383 | * @val: read into this variable |
| 384 | * @ptr: address to read from |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 385 | * |
| 386 | * Returns 0 on success, or -EFAULT. |
| 387 | */ |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 388 | #define get_kernel_nofault(val, ptr) ({ \ |
| 389 | const typeof(val) *__gk_ptr = (ptr); \ |
| 390 | copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ |
| 391 | }) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 392 | |
| 393 | #ifndef user_access_begin |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 394 | #define user_access_begin(ptr,len) access_ok(ptr, len) |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 395 | #define user_access_end() do { } while (0) |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame] | 396 | #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) |
| 397 | #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) |
| 398 | #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) |
| 399 | #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) |
| 400 | static inline unsigned long user_access_save(void) { return 0UL; } |
| 401 | static inline void user_access_restore(unsigned long flags) { } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 402 | #endif |
Olivier Deprez | 157378f | 2022-04-04 15:47:50 +0200 | [diff] [blame] | 403 | #ifndef user_write_access_begin |
| 404 | #define user_write_access_begin user_access_begin |
| 405 | #define user_write_access_end user_access_end |
| 406 | #endif |
| 407 | #ifndef user_read_access_begin |
| 408 | #define user_read_access_begin user_access_begin |
| 409 | #define user_read_access_end user_access_end |
| 410 | #endif |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 411 | |
| 412 | #ifdef CONFIG_HARDENED_USERCOPY |
| 413 | void usercopy_warn(const char *name, const char *detail, bool to_user, |
| 414 | unsigned long offset, unsigned long len); |
| 415 | void __noreturn usercopy_abort(const char *name, const char *detail, |
| 416 | bool to_user, unsigned long offset, |
| 417 | unsigned long len); |
| 418 | #endif |
| 419 | |
| 420 | #endif /* __LINUX_UACCESS_H__ */ |