1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/uaccess.h"
9 */
10 #ifndef __S390_UACCESS_H
11 #define __S390_UACCESS_H
12
13 /*
14 * User space memory access functions
15 */
16 #include <asm/processor.h>
17 #include <asm/ctl_reg.h>
18 #include <asm/extable.h>
19 #include <asm/facility.h>
20
21 void debug_user_asce(int exit);
22
__range_ok(unsigned long addr,unsigned long size)23 static inline int __range_ok(unsigned long addr, unsigned long size)
24 {
25 return 1;
26 }
27
28 #define __access_ok(addr, size) \
29 ({ \
30 __chk_user_ptr(addr); \
31 __range_ok((unsigned long)(addr), (size)); \
32 })
33
34 #define access_ok(addr, size) __access_ok(addr, size)
35
36 unsigned long __must_check
37 raw_copy_from_user(void *to, const void __user *from, unsigned long n);
38
39 unsigned long __must_check
40 raw_copy_to_user(void __user *to, const void *from, unsigned long n);
41
42 #ifndef CONFIG_KASAN
43 #define INLINE_COPY_FROM_USER
44 #define INLINE_COPY_TO_USER
45 #endif
46
47 int __put_user_bad(void) __attribute__((noreturn));
48 int __get_user_bad(void) __attribute__((noreturn));
49
50 #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
51
52 #define __put_get_user_asm(to, from, size, insn) \
53 ({ \
54 int __rc; \
55 \
56 asm volatile( \
57 insn " 0,%[spec]\n" \
58 "0: mvcos %[_to],%[_from],%[_size]\n" \
59 "1: xr %[rc],%[rc]\n" \
60 "2:\n" \
61 ".pushsection .fixup, \"ax\"\n" \
62 "3: lhi %[rc],%[retval]\n" \
63 " jg 2b\n" \
64 ".popsection\n" \
65 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
66 : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
67 : [_size] "d" (size), [_from] "Q" (*(from)), \
68 [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
69 : "cc", "0"); \
70 __rc; \
71 })
72
__put_user_fn(void * x,void __user * ptr,unsigned long size)73 static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
74 {
75 int rc;
76
77 switch (size) {
78 case 1:
79 rc = __put_get_user_asm((unsigned char __user *)ptr,
80 (unsigned char *)x,
81 size, "llilh");
82 break;
83 case 2:
84 rc = __put_get_user_asm((unsigned short __user *)ptr,
85 (unsigned short *)x,
86 size, "llilh");
87 break;
88 case 4:
89 rc = __put_get_user_asm((unsigned int __user *)ptr,
90 (unsigned int *)x,
91 size, "llilh");
92 break;
93 case 8:
94 rc = __put_get_user_asm((unsigned long __user *)ptr,
95 (unsigned long *)x,
96 size, "llilh");
97 break;
98 default:
99 __put_user_bad();
100 break;
101 }
102 return rc;
103 }
104
__get_user_fn(void * x,const void __user * ptr,unsigned long size)105 static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
106 {
107 int rc;
108
109 switch (size) {
110 case 1:
111 rc = __put_get_user_asm((unsigned char *)x,
112 (unsigned char __user *)ptr,
113 size, "lghi");
114 break;
115 case 2:
116 rc = __put_get_user_asm((unsigned short *)x,
117 (unsigned short __user *)ptr,
118 size, "lghi");
119 break;
120 case 4:
121 rc = __put_get_user_asm((unsigned int *)x,
122 (unsigned int __user *)ptr,
123 size, "lghi");
124 break;
125 case 8:
126 rc = __put_get_user_asm((unsigned long *)x,
127 (unsigned long __user *)ptr,
128 size, "lghi");
129 break;
130 default:
131 __get_user_bad();
132 break;
133 }
134 return rc;
135 }
136
137 #else /* CONFIG_HAVE_MARCH_Z10_FEATURES */
138
__put_user_fn(void * x,void __user * ptr,unsigned long size)139 static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
140 {
141 size = raw_copy_to_user(ptr, x, size);
142 return size ? -EFAULT : 0;
143 }
144
__get_user_fn(void * x,const void __user * ptr,unsigned long size)145 static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
146 {
147 size = raw_copy_from_user(x, ptr, size);
148 return size ? -EFAULT : 0;
149 }
150
151 #endif /* CONFIG_HAVE_MARCH_Z10_FEATURES */
152
153 /*
154 * These are the main single-value transfer routines. They automatically
155 * use the right size if we just have the right pointer type.
156 */
157 #define __put_user(x, ptr) \
158 ({ \
159 __typeof__(*(ptr)) __x = (x); \
160 int __pu_err = -EFAULT; \
161 __chk_user_ptr(ptr); \
162 switch (sizeof (*(ptr))) { \
163 case 1: \
164 case 2: \
165 case 4: \
166 case 8: \
167 __pu_err = __put_user_fn(&__x, ptr, \
168 sizeof(*(ptr))); \
169 break; \
170 default: \
171 __put_user_bad(); \
172 break; \
173 } \
174 __builtin_expect(__pu_err, 0); \
175 })
176
177 #define put_user(x, ptr) \
178 ({ \
179 might_fault(); \
180 __put_user(x, ptr); \
181 })
182
183
184 #define __get_user(x, ptr) \
185 ({ \
186 int __gu_err = -EFAULT; \
187 __chk_user_ptr(ptr); \
188 switch (sizeof(*(ptr))) { \
189 case 1: { \
190 unsigned char __x = 0; \
191 __gu_err = __get_user_fn(&__x, ptr, \
192 sizeof(*(ptr))); \
193 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
194 break; \
195 }; \
196 case 2: { \
197 unsigned short __x = 0; \
198 __gu_err = __get_user_fn(&__x, ptr, \
199 sizeof(*(ptr))); \
200 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
201 break; \
202 }; \
203 case 4: { \
204 unsigned int __x = 0; \
205 __gu_err = __get_user_fn(&__x, ptr, \
206 sizeof(*(ptr))); \
207 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
208 break; \
209 }; \
210 case 8: { \
211 unsigned long long __x = 0; \
212 __gu_err = __get_user_fn(&__x, ptr, \
213 sizeof(*(ptr))); \
214 (x) = *(__force __typeof__(*(ptr)) *) &__x; \
215 break; \
216 }; \
217 default: \
218 __get_user_bad(); \
219 break; \
220 } \
221 __builtin_expect(__gu_err, 0); \
222 })
223
224 #define get_user(x, ptr) \
225 ({ \
226 might_fault(); \
227 __get_user(x, ptr); \
228 })
229
230 /*
231 * Copy a null terminated string from userspace.
232 */
233 long __must_check strncpy_from_user(char *dst, const char __user *src, long count);
234
235 long __must_check strnlen_user(const char __user *src, long count);
236
237 /*
238 * Zero Userspace
239 */
240 unsigned long __must_check __clear_user(void __user *to, unsigned long size);
241
clear_user(void __user * to,unsigned long n)242 static inline unsigned long __must_check clear_user(void __user *to, unsigned long n)
243 {
244 might_fault();
245 return __clear_user(to, n);
246 }
247
248 int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count);
249 void *s390_kernel_write(void *dst, const void *src, size_t size);
250
251 #define HAVE_GET_KERNEL_NOFAULT
252
253 int __noreturn __put_kernel_bad(void);
254
255 #define __put_kernel_asm(val, to, insn) \
256 ({ \
257 int __rc; \
258 \
259 asm volatile( \
260 "0: " insn " %2,%1\n" \
261 "1: xr %0,%0\n" \
262 "2:\n" \
263 ".pushsection .fixup, \"ax\"\n" \
264 "3: lhi %0,%3\n" \
265 " jg 2b\n" \
266 ".popsection\n" \
267 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
268 : "=d" (__rc), "+Q" (*(to)) \
269 : "d" (val), "K" (-EFAULT) \
270 : "cc"); \
271 __rc; \
272 })
273
274 #define __put_kernel_nofault(dst, src, type, err_label) \
275 do { \
276 u64 __x = (u64)(*((type *)(src))); \
277 int __pk_err; \
278 \
279 switch (sizeof(type)) { \
280 case 1: \
281 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stc"); \
282 break; \
283 case 2: \
284 __pk_err = __put_kernel_asm(__x, (type *)(dst), "sth"); \
285 break; \
286 case 4: \
287 __pk_err = __put_kernel_asm(__x, (type *)(dst), "st"); \
288 break; \
289 case 8: \
290 __pk_err = __put_kernel_asm(__x, (type *)(dst), "stg"); \
291 break; \
292 default: \
293 __pk_err = __put_kernel_bad(); \
294 break; \
295 } \
296 if (unlikely(__pk_err)) \
297 goto err_label; \
298 } while (0)
299
300 int __noreturn __get_kernel_bad(void);
301
302 #define __get_kernel_asm(val, from, insn) \
303 ({ \
304 int __rc; \
305 \
306 asm volatile( \
307 "0: " insn " %1,%2\n" \
308 "1: xr %0,%0\n" \
309 "2:\n" \
310 ".pushsection .fixup, \"ax\"\n" \
311 "3: lhi %0,%3\n" \
312 " jg 2b\n" \
313 ".popsection\n" \
314 EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
315 : "=d" (__rc), "+d" (val) \
316 : "Q" (*(from)), "K" (-EFAULT) \
317 : "cc"); \
318 __rc; \
319 })
320
321 #define __get_kernel_nofault(dst, src, type, err_label) \
322 do { \
323 int __gk_err; \
324 \
325 switch (sizeof(type)) { \
326 case 1: { \
327 u8 __x = 0; \
328 \
329 __gk_err = __get_kernel_asm(__x, (type *)(src), "ic"); \
330 *((type *)(dst)) = (type)__x; \
331 break; \
332 }; \
333 case 2: { \
334 u16 __x = 0; \
335 \
336 __gk_err = __get_kernel_asm(__x, (type *)(src), "lh"); \
337 *((type *)(dst)) = (type)__x; \
338 break; \
339 }; \
340 case 4: { \
341 u32 __x = 0; \
342 \
343 __gk_err = __get_kernel_asm(__x, (type *)(src), "l"); \
344 *((type *)(dst)) = (type)__x; \
345 break; \
346 }; \
347 case 8: { \
348 u64 __x = 0; \
349 \
350 __gk_err = __get_kernel_asm(__x, (type *)(src), "lg"); \
351 *((type *)(dst)) = (type)__x; \
352 break; \
353 }; \
354 default: \
355 __gk_err = __get_kernel_bad(); \
356 break; \
357 } \
358 if (unlikely(__gk_err)) \
359 goto err_label; \
360 } while (0)
361
362 #endif /* __S390_UACCESS_H */
363