blob: d4ee6e9425625391d93eabe4a6d72cd0d4155315 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
5#include <linux/sched.h>
6#include <linux/thread_info.h>
7#include <linux/kasan-checks.h>
8
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00009#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
10
11#include <asm/uaccess.h>
12
13/*
14 * Architectures should provide two primitives (raw_copy_{to,from}_user())
15 * and get rid of their private instances of copy_{to,from}_user() and
16 * __copy_{to,from}_user{,_inatomic}().
17 *
18 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
19 * return the amount left to copy. They should assume that access_ok() has
20 * already been checked (and succeeded); they should *not* zero-pad anything.
21 * No KASAN or object size checks either - those belong here.
22 *
23 * Both of these functions should attempt to copy size bytes starting at from
24 * into the area starting at to. They must not fetch or store anything
25 * outside of those areas. Return value must be between 0 (everything
26 * copied successfully) and size (nothing copied).
27 *
28 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
29 * at to must become equal to the bytes fetched from the corresponding area
30 * starting at from. All data past to + size - N must be left unmodified.
31 *
32 * If copying succeeds, the return value must be 0. If some data cannot be
33 * fetched, it is permitted to copy less than had been fetched; the only
34 * hard requirement is that not storing anything at all (i.e. returning size)
35 * should happen only when nothing could be copied. In other words, you don't
36 * have to squeeze as much as possible - it is allowed, but not necessary.
37 *
38 * For raw_copy_from_user() to always points to kernel memory and no faults
39 * on store should happen. Interpretation of from is affected by set_fs().
40 * For raw_copy_to_user() it's the other way round.
41 *
42 * Both can be inlined - it's up to architectures whether it wants to bother
43 * with that. They should not be used directly; they are used to implement
44 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
45 * that are used instead. Out of those, __... ones are inlined. Plain
46 * copy_{to,from}_user() might or might not be inlined. If you want them
47 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
48 *
49 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
50 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
51 * at all; their callers absolutely must check the return value.
52 *
53 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
54 * but both source and destination are __user pointers (affected by set_fs()
55 * as usual) and both source and destination can trigger faults.
56 */
57
David Brazdil0f672f62019-12-10 10:32:29 +000058static __always_inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60{
61 kasan_check_write(to, n);
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64}
65
David Brazdil0f672f62019-12-10 10:32:29 +000066static __always_inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000067__copy_from_user(void *to, const void __user *from, unsigned long n)
68{
69 might_fault();
70 kasan_check_write(to, n);
71 check_object_size(to, n, false);
72 return raw_copy_from_user(to, from, n);
73}
74
75/**
76 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
77 * @to: Destination address, in user space.
78 * @from: Source address, in kernel space.
79 * @n: Number of bytes to copy.
80 *
81 * Context: User context only.
82 *
83 * Copy data from kernel space to user space. Caller must check
84 * the specified block with access_ok() before calling this function.
85 * The caller should also make sure he pins the user space address
86 * so that we don't result in page fault and sleep.
87 */
David Brazdil0f672f62019-12-10 10:32:29 +000088static __always_inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000089__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
90{
91 kasan_check_read(from, n);
92 check_object_size(from, n, true);
93 return raw_copy_to_user(to, from, n);
94}
95
David Brazdil0f672f62019-12-10 10:32:29 +000096static __always_inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000097__copy_to_user(void __user *to, const void *from, unsigned long n)
98{
99 might_fault();
100 kasan_check_read(from, n);
101 check_object_size(from, n, true);
102 return raw_copy_to_user(to, from, n);
103}
104
105#ifdef INLINE_COPY_FROM_USER
David Brazdil0f672f62019-12-10 10:32:29 +0000106static inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000107_copy_from_user(void *to, const void __user *from, unsigned long n)
108{
109 unsigned long res = n;
110 might_fault();
David Brazdil0f672f62019-12-10 10:32:29 +0000111 if (likely(access_ok(from, n))) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000112 kasan_check_write(to, n);
113 res = raw_copy_from_user(to, from, n);
114 }
115 if (unlikely(res))
116 memset(to + (n - res), 0, res);
117 return res;
118}
119#else
David Brazdil0f672f62019-12-10 10:32:29 +0000120extern __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000121_copy_from_user(void *, const void __user *, unsigned long);
122#endif
123
124#ifdef INLINE_COPY_TO_USER
David Brazdil0f672f62019-12-10 10:32:29 +0000125static inline __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000126_copy_to_user(void __user *to, const void *from, unsigned long n)
127{
128 might_fault();
David Brazdil0f672f62019-12-10 10:32:29 +0000129 if (access_ok(to, n)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000130 kasan_check_read(from, n);
131 n = raw_copy_to_user(to, from, n);
132 }
133 return n;
134}
135#else
David Brazdil0f672f62019-12-10 10:32:29 +0000136extern __must_check unsigned long
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000137_copy_to_user(void __user *, const void *, unsigned long);
138#endif
139
140static __always_inline unsigned long __must_check
141copy_from_user(void *to, const void __user *from, unsigned long n)
142{
143 if (likely(check_copy_size(to, n, false)))
144 n = _copy_from_user(to, from, n);
145 return n;
146}
147
148static __always_inline unsigned long __must_check
149copy_to_user(void __user *to, const void *from, unsigned long n)
150{
151 if (likely(check_copy_size(from, n, true)))
152 n = _copy_to_user(to, from, n);
153 return n;
154}
155#ifdef CONFIG_COMPAT
156static __always_inline unsigned long __must_check
157copy_in_user(void __user *to, const void __user *from, unsigned long n)
158{
159 might_fault();
David Brazdil0f672f62019-12-10 10:32:29 +0000160 if (access_ok(to, n) && access_ok(from, n))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000161 n = raw_copy_in_user(to, from, n);
162 return n;
163}
164#endif
165
166static __always_inline void pagefault_disabled_inc(void)
167{
168 current->pagefault_disabled++;
169}
170
171static __always_inline void pagefault_disabled_dec(void)
172{
173 current->pagefault_disabled--;
174}
175
176/*
177 * These routines enable/disable the pagefault handler. If disabled, it will
178 * not take any locks and go straight to the fixup table.
179 *
180 * User access methods will not sleep when called from a pagefault_disabled()
181 * environment.
182 */
183static inline void pagefault_disable(void)
184{
185 pagefault_disabled_inc();
186 /*
187 * make sure to have issued the store before a pagefault
188 * can hit.
189 */
190 barrier();
191}
192
193static inline void pagefault_enable(void)
194{
195 /*
196 * make sure to issue those last loads/stores before enabling
197 * the pagefault handler again.
198 */
199 barrier();
200 pagefault_disabled_dec();
201}
202
203/*
204 * Is the pagefault handler disabled? If so, user access methods will not sleep.
205 */
David Brazdil0f672f62019-12-10 10:32:29 +0000206static inline bool pagefault_disabled(void)
207{
208 return current->pagefault_disabled != 0;
209}
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000210
211/*
212 * The pagefault handler is in general disabled by pagefault_disable() or
213 * when in irq context (via in_atomic()).
214 *
215 * This function should only be used by the fault handlers. Other users should
216 * stick to pagefault_disabled().
217 * Please NEVER use preempt_disable() to disable the fault handler. With
218 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
219 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
220 */
221#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
222
223#ifndef ARCH_HAS_NOCACHE_UACCESS
224
David Brazdil0f672f62019-12-10 10:32:29 +0000225static inline __must_check unsigned long
226__copy_from_user_inatomic_nocache(void *to, const void __user *from,
227 unsigned long n)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000228{
229 return __copy_from_user_inatomic(to, from, n);
230}
231
232#endif /* ARCH_HAS_NOCACHE_UACCESS */
233
David Brazdil0f672f62019-12-10 10:32:29 +0000234extern __must_check int check_zeroed_user(const void __user *from, size_t size);
235
236/**
237 * copy_struct_from_user: copy a struct from userspace
238 * @dst: Destination address, in kernel space. This buffer must be @ksize
239 * bytes long.
240 * @ksize: Size of @dst struct.
241 * @src: Source address, in userspace.
242 * @usize: (Alleged) size of @src struct.
243 *
244 * Copies a struct from userspace to kernel space, in a way that guarantees
245 * backwards-compatibility for struct syscall arguments (as long as future
246 * struct extensions are made such that all new fields are *appended* to the
247 * old struct, and zeroed-out new fields have the same meaning as the old
248 * struct).
249 *
250 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
251 * The recommended usage is something like the following:
252 *
253 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
254 * {
255 * int err;
256 * struct foo karg = {};
257 *
258 * if (usize > PAGE_SIZE)
259 * return -E2BIG;
260 * if (usize < FOO_SIZE_VER0)
261 * return -EINVAL;
262 *
263 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
264 * if (err)
265 * return err;
266 *
267 * // ...
268 * }
269 *
270 * There are three cases to consider:
271 * * If @usize == @ksize, then it's copied verbatim.
272 * * If @usize < @ksize, then the userspace has passed an old struct to a
273 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
274 * are to be zero-filled.
275 * * If @usize > @ksize, then the userspace has passed a new struct to an
276 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
277 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
278 *
279 * Returns (in all cases, some data may have been copied):
280 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
281 * * -EFAULT: access to userspace failed.
282 */
283static __always_inline __must_check int
284copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
285 size_t usize)
286{
287 size_t size = min(ksize, usize);
288 size_t rest = max(ksize, usize) - size;
289
290 /* Deal with trailing bytes. */
291 if (usize < ksize) {
292 memset(dst + size, 0, rest);
293 } else if (usize > ksize) {
294 int ret = check_zeroed_user(src + size, rest);
295 if (ret <= 0)
296 return ret ?: -E2BIG;
297 }
298 /* Copy the interoperable parts of the struct. */
299 if (copy_from_user(dst, src, size))
300 return -EFAULT;
301 return 0;
302}
303
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000304/*
305 * probe_kernel_read(): safely attempt to read from a location
306 * @dst: pointer to the buffer that shall take the data
307 * @src: address to read from
308 * @size: size of the data chunk
309 *
310 * Safely read from address @src to the buffer at @dst. If a kernel fault
311 * happens, handle that and return -EFAULT.
312 */
313extern long probe_kernel_read(void *dst, const void *src, size_t size);
314extern long __probe_kernel_read(void *dst, const void *src, size_t size);
315
316/*
David Brazdil0f672f62019-12-10 10:32:29 +0000317 * probe_user_read(): safely attempt to read from a location in user space
318 * @dst: pointer to the buffer that shall take the data
319 * @src: address to read from
320 * @size: size of the data chunk
321 *
322 * Safely read from address @src to the buffer at @dst. If a kernel fault
323 * happens, handle that and return -EFAULT.
324 */
325extern long probe_user_read(void *dst, const void __user *src, size_t size);
326extern long __probe_user_read(void *dst, const void __user *src, size_t size);
327
328/*
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000329 * probe_kernel_write(): safely attempt to write to a location
330 * @dst: address to write to
331 * @src: pointer to the data that shall be written
332 * @size: size of the data chunk
333 *
334 * Safely write to address @dst from the buffer at @src. If a kernel fault
335 * happens, handle that and return -EFAULT.
336 */
337extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
338extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
339
340extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
David Brazdil0f672f62019-12-10 10:32:29 +0000341extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
342 long count);
343extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000344
345/**
346 * probe_kernel_address(): safely attempt to read from a location
347 * @addr: address to read from
348 * @retval: read into this variable
349 *
350 * Returns 0 on success, or -EFAULT.
351 */
352#define probe_kernel_address(addr, retval) \
353 probe_kernel_read(&retval, addr, sizeof(retval))
354
355#ifndef user_access_begin
David Brazdil0f672f62019-12-10 10:32:29 +0000356#define user_access_begin(ptr,len) access_ok(ptr, len)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000357#define user_access_end() do { } while (0)
David Brazdil0f672f62019-12-10 10:32:29 +0000358#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
359#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
360#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
361#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
362static inline unsigned long user_access_save(void) { return 0UL; }
363static inline void user_access_restore(unsigned long flags) { }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000364#endif
365
366#ifdef CONFIG_HARDENED_USERCOPY
367void usercopy_warn(const char *name, const char *detail, bool to_user,
368 unsigned long offset, unsigned long len);
369void __noreturn usercopy_abort(const char *name, const char *detail,
370 bool to_user, unsigned long offset,
371 unsigned long len);
372#endif
373
374#endif /* __LINUX_UACCESS_H__ */