blob: c868e7ee49b35000f559ef337e5a8ad030e4d9b9 [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10#ifndef __S390_UACCESS_H
11#define __S390_UACCESS_H
12
13/*
14 * User space memory access functions
15 */
16#include <asm/processor.h>
17#include <asm/ctl_reg.h>
18#include <asm/extable.h>
19#include <asm/facility.h>
20
21/*
22 * The fs value determines whether argument validity checking should be
23 * performed or not. If get_fs() == USER_DS, checking is performed, with
24 * get_fs() == KERNEL_DS, checking is bypassed.
25 *
26 * For historical reasons, these macros are grossly misnamed.
27 */
28
29#define KERNEL_DS (0)
30#define KERNEL_DS_SACF (1)
31#define USER_DS (2)
32#define USER_DS_SACF (3)
33
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000034#define get_fs() (current->thread.mm_segment)
Olivier Deprez157378f2022-04-04 15:47:50 +020035#define uaccess_kernel() ((get_fs() & 2) == KERNEL_DS)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000036
37void set_fs(mm_segment_t fs);
38
39static inline int __range_ok(unsigned long addr, unsigned long size)
40{
41 return 1;
42}
43
44#define __access_ok(addr, size) \
45({ \
46 __chk_user_ptr(addr); \
47 __range_ok((unsigned long)(addr), (size)); \
48})
49
David Brazdil0f672f62019-12-10 10:32:29 +000050#define access_ok(addr, size) __access_ok(addr, size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000051
52unsigned long __must_check
53raw_copy_from_user(void *to, const void __user *from, unsigned long n);
54
55unsigned long __must_check
56raw_copy_to_user(void __user *to, const void *from, unsigned long n);
57
David Brazdil0f672f62019-12-10 10:32:29 +000058#ifndef CONFIG_KASAN
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000059#define INLINE_COPY_FROM_USER
60#define INLINE_COPY_TO_USER
David Brazdil0f672f62019-12-10 10:32:29 +000061#endif
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000062
Olivier Deprez157378f2022-04-04 15:47:50 +020063int __put_user_bad(void) __attribute__((noreturn));
64int __get_user_bad(void) __attribute__((noreturn));
65
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000066#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
67
68#define __put_get_user_asm(to, from, size, spec) \
69({ \
70 register unsigned long __reg0 asm("0") = spec; \
71 int __rc; \
72 \
73 asm volatile( \
74 "0: mvcos %1,%3,%2\n" \
75 "1: xr %0,%0\n" \
76 "2:\n" \
77 ".pushsection .fixup, \"ax\"\n" \
78 "3: lhi %0,%5\n" \
79 " jg 2b\n" \
80 ".popsection\n" \
81 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
82 : "=d" (__rc), "+Q" (*(to)) \
83 : "d" (size), "Q" (*(from)), \
84 "d" (__reg0), "K" (-EFAULT) \
85 : "cc"); \
86 __rc; \
87})
88
David Brazdil0f672f62019-12-10 10:32:29 +000089static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +000090{
91 unsigned long spec = 0x010000UL;
92 int rc;
93
94 switch (size) {
95 case 1:
96 rc = __put_get_user_asm((unsigned char __user *)ptr,
97 (unsigned char *)x,
98 size, spec);
99 break;
100 case 2:
101 rc = __put_get_user_asm((unsigned short __user *)ptr,
102 (unsigned short *)x,
103 size, spec);
104 break;
105 case 4:
106 rc = __put_get_user_asm((unsigned int __user *)ptr,
107 (unsigned int *)x,
108 size, spec);
109 break;
110 case 8:
111 rc = __put_get_user_asm((unsigned long __user *)ptr,
112 (unsigned long *)x,
113 size, spec);
114 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200115 default:
116 __put_user_bad();
117 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000118 }
119 return rc;
120}
121
David Brazdil0f672f62019-12-10 10:32:29 +0000122static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000123{
124 unsigned long spec = 0x01UL;
125 int rc;
126
127 switch (size) {
128 case 1:
129 rc = __put_get_user_asm((unsigned char *)x,
130 (unsigned char __user *)ptr,
131 size, spec);
132 break;
133 case 2:
134 rc = __put_get_user_asm((unsigned short *)x,
135 (unsigned short __user *)ptr,
136 size, spec);
137 break;
138 case 4:
139 rc = __put_get_user_asm((unsigned int *)x,
140 (unsigned int __user *)ptr,
141 size, spec);
142 break;
143 case 8:
144 rc = __put_get_user_asm((unsigned long *)x,
145 (unsigned long __user *)ptr,
146 size, spec);
147 break;
Olivier Deprez157378f2022-04-04 15:47:50 +0200148 default:
149 __get_user_bad();
150 break;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000151 }
152 return rc;
153}
154
155#else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
156
157static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
158{
159 size = raw_copy_to_user(ptr, x, size);
160 return size ? -EFAULT : 0;
161}
162
163static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
164{
165 size = raw_copy_from_user(x, ptr, size);
166 return size ? -EFAULT : 0;
167}
168
169#endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
170
171/*
172 * These are the main single-value transfer routines. They automatically
173 * use the right size if we just have the right pointer type.
174 */
175#define __put_user(x, ptr) \
176({ \
177 __typeof__(*(ptr)) __x = (x); \
178 int __pu_err = -EFAULT; \
179 __chk_user_ptr(ptr); \
180 switch (sizeof (*(ptr))) { \
181 case 1: \
182 case 2: \
183 case 4: \
184 case 8: \
185 __pu_err = __put_user_fn(&__x, ptr, \
186 sizeof(*(ptr))); \
187 break; \
188 default: \
189 __put_user_bad(); \
190 break; \
Olivier Deprez157378f2022-04-04 15:47:50 +0200191 } \
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000192 __builtin_expect(__pu_err, 0); \
193})
194
195#define put_user(x, ptr) \
196({ \
197 might_fault(); \
198 __put_user(x, ptr); \
199})
200
201
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000202#define __get_user(x, ptr) \
203({ \
204 int __gu_err = -EFAULT; \
205 __chk_user_ptr(ptr); \
206 switch (sizeof(*(ptr))) { \
207 case 1: { \
208 unsigned char __x = 0; \
209 __gu_err = __get_user_fn(&__x, ptr, \
210 sizeof(*(ptr))); \
211 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
212 break; \
213 }; \
214 case 2: { \
215 unsigned short __x = 0; \
216 __gu_err = __get_user_fn(&__x, ptr, \
217 sizeof(*(ptr))); \
218 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
219 break; \
220 }; \
221 case 4: { \
222 unsigned int __x = 0; \
223 __gu_err = __get_user_fn(&__x, ptr, \
224 sizeof(*(ptr))); \
225 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
226 break; \
227 }; \
228 case 8: { \
229 unsigned long long __x = 0; \
230 __gu_err = __get_user_fn(&__x, ptr, \
231 sizeof(*(ptr))); \
232 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
233 break; \
234 }; \
235 default: \
236 __get_user_bad(); \
237 break; \
238 } \
239 __builtin_expect(__gu_err, 0); \
240})
241
242#define get_user(x, ptr) \
243({ \
244 might_fault(); \
245 __get_user(x, ptr); \
246})
247
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000248unsigned long __must_check
249raw_copy_in_user(void __user *to, const void __user *from, unsigned long n);
250
251/*
252 * Copy a null terminated string from userspace.
253 */
254
255long __strncpy_from_user(char *dst, const char __user *src, long count);
256
257static inline long __must_check
258strncpy_from_user(char *dst, const char __user *src, long count)
259{
260 might_fault();
261 return __strncpy_from_user(dst, src, count);
262}
263
264unsigned long __must_check __strnlen_user(const char __user *src, unsigned long count);
265
266static inline unsigned long strnlen_user(const char __user *src, unsigned long n)
267{
268 might_fault();
269 return __strnlen_user(src, n);
270}
271
272/*
273 * Zero Userspace
274 */
275unsigned long __must_check __clear_user(void __user *to, unsigned long size);
276
277static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
278{
279 might_fault();
280 return __clear_user(to, n);
281}
282
283int copy_to_user_real(void __user *dest, void *src, unsigned long count);
Olivier Deprez0e641232021-09-23 10:07:05 +0200284void *s390_kernel_write(void *dst, const void *src, size_t size);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000285
Olivier Deprez157378f2022-04-04 15:47:50 +0200286#define HAVE_GET_KERNEL_NOFAULT
287
288int __noreturn __put_kernel_bad(void);
289
290#define __put_kernel_asm(val, to, insn) \
291({ \
292 int __rc; \
293 \
294 asm volatile( \
295 "0: " insn " %2,%1\n" \
296 "1: xr %0,%0\n" \
297 "2:\n" \
298 ".pushsection .fixup, \"ax\"\n" \
299 "3: lhi %0,%3\n" \
300 " jg 2b\n" \
301 ".popsection\n" \
302 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
303 : "=d" (__rc), "+Q" (*(to)) \
304 : "d" (val), "K" (-EFAULT) \
305 : "cc"); \
306 __rc; \
307})
308
309#define __put_kernel_nofault(dst, src, type, err_label) \
310do { \
311 u64 __x = (u64)(*((type *)(src))); \
312 int __pk_err; \
313 \
314 switch (sizeof(type)) { \
315 case 1: \
316 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
317 break; \
318 case 2: \
319 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
320 break; \
321 case 4: \
322 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
323 break; \
324 case 8: \
325 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
326 break; \
327 default: \
328 __pk_err = __put_kernel_bad(); \
329 break; \
330 } \
331 if (unlikely(__pk_err)) \
332 goto err_label; \
333} while (0)
334
335int __noreturn __get_kernel_bad(void);
336
337#define __get_kernel_asm(val, from, insn) \
338({ \
339 int __rc; \
340 \
341 asm volatile( \
342 "0: " insn " %1,%2\n" \
343 "1: xr %0,%0\n" \
344 "2:\n" \
345 ".pushsection .fixup, \"ax\"\n" \
346 "3: lhi %0,%3\n" \
347 " jg 2b\n" \
348 ".popsection\n" \
349 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
350 : "=d" (__rc), "+d" (val) \
351 : "Q" (*(from)), "K" (-EFAULT) \
352 : "cc"); \
353 __rc; \
354})
355
356#define __get_kernel_nofault(dst, src, type, err_label) \
357do { \
358 int __gk_err; \
359 \
360 switch (sizeof(type)) { \
361 case 1: { \
362 u8 __x = 0; \
363 \
364 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
365 *((type *)(dst)) = (type)__x; \
366 break; \
367 }; \
368 case 2: { \
369 u16 __x = 0; \
370 \
371 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
372 *((type *)(dst)) = (type)__x; \
373 break; \
374 }; \
375 case 4: { \
376 u32 __x = 0; \
377 \
378 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
379 *((type *)(dst)) = (type)__x; \
380 break; \
381 }; \
382 case 8: { \
383 u64 __x = 0; \
384 \
385 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
386 *((type *)(dst)) = (type)__x; \
387 break; \
388 }; \
389 default: \
390 __gk_err = __get_kernel_bad(); \
391 break; \
392 } \
393 if (unlikely(__gk_err)) \
394 goto err_label; \
395} while (0)
396
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000397#endif /* __S390_UACCESS_H */