1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_H
3 #define _ASM_X86_UACCESS_H
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/kasan-checks.h>
9 #include <linux/string.h>
10 #include <asm/asm.h>
11 #include <asm/page.h>
12 #include <asm/smap.h>
13 #include <asm/extable.h>
14
15 /*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25 #define KERNEL_DS MAKE_MM_SEG(-1UL)
26 #define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
27
28 #define get_fs() (current->thread.addr_limit)
set_fs(mm_segment_t fs)29 static inline void set_fs(mm_segment_t fs)
30 {
31 current->thread.addr_limit = fs;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK);
34 }
35
36 #define segment_eq(a, b) ((a).seg == (b).seg)
37 #define user_addr_max() (current->thread.addr_limit.seg)
38
39 /*
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
42 */
__chk_range_not_ok(unsigned long addr,unsigned long size,unsigned long limit)43 static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
44 {
45 /*
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
51 */
52 if (__builtin_constant_p(size))
53 return unlikely(addr > limit - size);
54
55 /* Arbitrary sizes? Be careful about overflow */
56 addr += size;
57 if (unlikely(addr < size))
58 return true;
59 return unlikely(addr > limit);
60 }
61
62 #define __range_not_ok(addr, size, limit) \
63 ({ \
64 __chk_user_ptr(addr); \
65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
66 })
67
68 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
69 static inline bool pagefault_disabled(void);
70 # define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
72 #else
73 # define WARN_ON_IN_IRQ()
74 #endif
75
76 /**
77 * access_ok - Checks if a user space pointer is valid
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
81 * Context: User context only. This function may sleep if pagefaults are
82 * enabled.
83 *
84 * Checks if a pointer to a block of memory in user space is valid.
85 *
86 * Note that, depending on architecture, this function probably just
87 * checks that the pointer is in the user space range - after calling
88 * this function, memory access functions may still return -EFAULT.
89 *
90 * Return: true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
92 */
93 #define access_ok(addr, size) \
94 ({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97 })
98
99 /*
100 * These are the main single-value transfer routines. They automatically
101 * use the right size if we just have the right pointer type.
102 *
103 * This gets kind of ugly. We want to return _two_ values in "get_user()"
104 * and yet we don't want to do any pointers, because that is too much
105 * of a performance impact. Thus we have a few rather ugly macros here,
106 * and hide all the ugliness from the user.
107 *
108 * The "__xxx" versions of the user access functions are versions that
109 * do not verify the address space, that must have been done previously
110 * with a separate "access_ok()" call (this is used when we do multiple
111 * accesses to the same area of user memory).
112 */
113
114 extern int __get_user_1(void);
115 extern int __get_user_2(void);
116 extern int __get_user_4(void);
117 extern int __get_user_8(void);
118 extern int __get_user_bad(void);
119
120 #define __uaccess_begin() stac()
121 #define __uaccess_end() clac()
122 #define __uaccess_begin_nospec() \
123 ({ \
124 stac(); \
125 barrier_nospec(); \
126 })
127
128 /*
129 * This is a type: either unsigned long, if the argument fits into
130 * that type, or otherwise unsigned long long.
131 */
132 #define __inttype(x) \
133 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
134
135 /**
136 * get_user - Get a simple variable from user space.
137 * @x: Variable to store result.
138 * @ptr: Source address, in user space.
139 *
140 * Context: User context only. This function may sleep if pagefaults are
141 * enabled.
142 *
143 * This macro copies a single simple variable from user space to kernel
144 * space. It supports simple types like char and int, but not larger
145 * data types like structures or arrays.
146 *
147 * @ptr must have pointer-to-simple-variable type, and the result of
148 * dereferencing @ptr must be assignable to @x without a cast.
149 *
150 * Return: zero on success, or -EFAULT on error.
151 * On error, the variable @x is set to zero.
152 */
153 /*
154 * Careful: we have to cast the result to the type of the pointer
155 * for sign reasons.
156 *
157 * The use of _ASM_DX as the register specifier is a bit of a
158 * simplification, as gcc only cares about it as the starting point
159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
160 * (%ecx being the next register in gcc's x86 register sequence), and
161 * %rdx on 64 bits.
162 *
163 * Clang/LLVM cares about the size of the register, but still wants
164 * the base register for something that ends up being a pair.
165 */
166 #define get_user(x, ptr) \
167 ({ \
168 int __ret_gu; \
169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
170 __chk_user_ptr(ptr); \
171 might_fault(); \
172 asm volatile("call __get_user_%P4" \
173 : "=a" (__ret_gu), "=r" (__val_gu), \
174 ASM_CALL_CONSTRAINT \
175 : "0" (ptr), "i" (sizeof(*(ptr)))); \
176 (x) = (__force __typeof__(*(ptr))) __val_gu; \
177 __builtin_expect(__ret_gu, 0); \
178 })
179
180 #define __put_user_x(size, x, ptr, __ret_pu) \
181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
183
184
185
186 #ifdef CONFIG_X86_32
187 #define __put_user_goto_u64(x, addr, label) \
188 asm_volatile_goto("\n" \
189 "1: movl %%eax,0(%1)\n" \
190 "2: movl %%edx,4(%1)\n" \
191 _ASM_EXTABLE_UA(1b, %l2) \
192 _ASM_EXTABLE_UA(2b, %l2) \
193 : : "A" (x), "r" (addr) \
194 : : label)
195
196 #define __put_user_asm_ex_u64(x, addr) \
197 asm volatile("\n" \
198 "1: movl %%eax,0(%1)\n" \
199 "2: movl %%edx,4(%1)\n" \
200 "3:" \
201 _ASM_EXTABLE_EX(1b, 2b) \
202 _ASM_EXTABLE_EX(2b, 3b) \
203 : : "A" (x), "r" (addr))
204
205 #define __put_user_x8(x, ptr, __ret_pu) \
206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
208 #else
209 #define __put_user_goto_u64(x, ptr, label) \
210 __put_user_goto(x, ptr, "q", "", "er", label)
211 #define __put_user_asm_ex_u64(x, addr) \
212 __put_user_asm_ex(x, addr, "q", "", "er")
213 #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
214 #endif
215
216 extern void __put_user_bad(void);
217
218 /*
219 * Strange magic calling convention: pointer in %ecx,
220 * value in %eax(:%edx), return value in %eax. clobbers %rbx
221 */
222 extern void __put_user_1(void);
223 extern void __put_user_2(void);
224 extern void __put_user_4(void);
225 extern void __put_user_8(void);
226
227 /**
228 * put_user - Write a simple value into user space.
229 * @x: Value to copy to user space.
230 * @ptr: Destination address, in user space.
231 *
232 * Context: User context only. This function may sleep if pagefaults are
233 * enabled.
234 *
235 * This macro copies a single simple value from kernel space to user
236 * space. It supports simple types like char and int, but not larger
237 * data types like structures or arrays.
238 *
239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
240 * to the result of dereferencing @ptr.
241 *
242 * Return: zero on success, or -EFAULT on error.
243 */
244 #define put_user(x, ptr) \
245 ({ \
246 int __ret_pu; \
247 __typeof__(*(ptr)) __pu_val; \
248 __chk_user_ptr(ptr); \
249 might_fault(); \
250 __pu_val = x; \
251 switch (sizeof(*(ptr))) { \
252 case 1: \
253 __put_user_x(1, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 2: \
256 __put_user_x(2, __pu_val, ptr, __ret_pu); \
257 break; \
258 case 4: \
259 __put_user_x(4, __pu_val, ptr, __ret_pu); \
260 break; \
261 case 8: \
262 __put_user_x8(__pu_val, ptr, __ret_pu); \
263 break; \
264 default: \
265 __put_user_x(X, __pu_val, ptr, __ret_pu); \
266 break; \
267 } \
268 __builtin_expect(__ret_pu, 0); \
269 })
270
271 #define __put_user_size(x, ptr, size, label) \
272 do { \
273 __chk_user_ptr(ptr); \
274 switch (size) { \
275 case 1: \
276 __put_user_goto(x, ptr, "b", "b", "iq", label); \
277 break; \
278 case 2: \
279 __put_user_goto(x, ptr, "w", "w", "ir", label); \
280 break; \
281 case 4: \
282 __put_user_goto(x, ptr, "l", "k", "ir", label); \
283 break; \
284 case 8: \
285 __put_user_goto_u64(x, ptr, label); \
286 break; \
287 default: \
288 __put_user_bad(); \
289 } \
290 } while (0)
291
292 /*
293 * This doesn't do __uaccess_begin/end - the exception handling
294 * around it must do that.
295 */
296 #define __put_user_size_ex(x, ptr, size) \
297 do { \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
302 break; \
303 case 2: \
304 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
305 break; \
306 case 4: \
307 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
308 break; \
309 case 8: \
310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
311 break; \
312 default: \
313 __put_user_bad(); \
314 } \
315 } while (0)
316
317 #ifdef CONFIG_X86_32
318 #define __get_user_asm_u64(x, ptr, retval, errret) \
319 ({ \
320 __typeof__(ptr) __ptr = (ptr); \
321 asm volatile("\n" \
322 "1: movl %2,%%eax\n" \
323 "2: movl %3,%%edx\n" \
324 "3:\n" \
325 ".section .fixup,\"ax\"\n" \
326 "4: mov %4,%0\n" \
327 " xorl %%eax,%%eax\n" \
328 " xorl %%edx,%%edx\n" \
329 " jmp 3b\n" \
330 ".previous\n" \
331 _ASM_EXTABLE_UA(1b, 4b) \
332 _ASM_EXTABLE_UA(2b, 4b) \
333 : "=r" (retval), "=&A"(x) \
334 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
335 "i" (errret), "0" (retval)); \
336 })
337
338 #define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
339 #else
340 #define __get_user_asm_u64(x, ptr, retval, errret) \
341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
342 #define __get_user_asm_ex_u64(x, ptr) \
343 __get_user_asm_ex(x, ptr, "q", "", "=r")
344 #endif
345
346 #define __get_user_size(x, ptr, size, retval, errret) \
347 do { \
348 retval = 0; \
349 __chk_user_ptr(ptr); \
350 switch (size) { \
351 case 1: \
352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
353 break; \
354 case 2: \
355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
356 break; \
357 case 4: \
358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
359 break; \
360 case 8: \
361 __get_user_asm_u64(x, ptr, retval, errret); \
362 break; \
363 default: \
364 (x) = __get_user_bad(); \
365 } \
366 } while (0)
367
368 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
369 asm volatile("\n" \
370 "1: mov"itype" %2,%"rtype"1\n" \
371 "2:\n" \
372 ".section .fixup,\"ax\"\n" \
373 "3: mov %3,%0\n" \
374 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 " jmp 2b\n" \
376 ".previous\n" \
377 _ASM_EXTABLE_UA(1b, 3b) \
378 : "=r" (err), ltype(x) \
379 : "m" (__m(addr)), "i" (errret), "0" (err))
380
381 /*
382 * This doesn't do __uaccess_begin/end - the exception handling
383 * around it must do that.
384 */
385 #define __get_user_size_ex(x, ptr, size) \
386 do { \
387 __chk_user_ptr(ptr); \
388 switch (size) { \
389 case 1: \
390 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
391 break; \
392 case 2: \
393 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
394 break; \
395 case 4: \
396 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
397 break; \
398 case 8: \
399 __get_user_asm_ex_u64(x, ptr); \
400 break; \
401 default: \
402 (x) = __get_user_bad(); \
403 } \
404 } while (0)
405
406 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
407 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
408 "2:\n" \
409 ".section .fixup,\"ax\"\n" \
410 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
411 " jmp 2b\n" \
412 ".previous\n" \
413 _ASM_EXTABLE_EX(1b, 3b) \
414 : ltype(x) : "m" (__m(addr)))
415
416 #define __put_user_nocheck(x, ptr, size) \
417 ({ \
418 __label__ __pu_label; \
419 int __pu_err = -EFAULT; \
420 __typeof__(*(ptr)) __pu_val = (x); \
421 __typeof__(ptr) __pu_ptr = (ptr); \
422 __typeof__(size) __pu_size = (size); \
423 __uaccess_begin(); \
424 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
425 __pu_err = 0; \
426 __pu_label: \
427 __uaccess_end(); \
428 __builtin_expect(__pu_err, 0); \
429 })
430
431 #define __get_user_nocheck(x, ptr, size) \
432 ({ \
433 int __gu_err; \
434 __inttype(*(ptr)) __gu_val; \
435 __typeof__(ptr) __gu_ptr = (ptr); \
436 __typeof__(size) __gu_size = (size); \
437 __uaccess_begin_nospec(); \
438 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
439 __uaccess_end(); \
440 (x) = (__force __typeof__(*(ptr)))__gu_val; \
441 __builtin_expect(__gu_err, 0); \
442 })
443
444 #ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
445 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
446 bool success; \
447 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
448 __typeof__(*(_ptr)) __old = *_old; \
449 __typeof__(*(_ptr)) __new = (_new); \
450 asm_volatile_goto("\n" \
451 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
452 _ASM_EXTABLE_UA(1b, %l[label]) \
453 : CC_OUT(z) (success), \
454 [ptr] "+m" (*_ptr), \
455 [old] "+a" (__old) \
456 : [new] ltype (__new) \
457 : "memory" \
458 : label); \
459 if (unlikely(!success)) \
460 *_old = __old; \
461 likely(success); })
462
463 #ifdef CONFIG_X86_32
464 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
465 bool success; \
466 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
467 __typeof__(*(_ptr)) __old = *_old; \
468 __typeof__(*(_ptr)) __new = (_new); \
469 asm_volatile_goto("\n" \
470 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
471 _ASM_EXTABLE_UA(1b, %l[label]) \
472 : CC_OUT(z) (success), \
473 "+A" (__old), \
474 [ptr] "+m" (*_ptr) \
475 : "b" ((u32)__new), \
476 "c" ((u32)((u64)__new >> 32)) \
477 : "memory" \
478 : label); \
479 if (unlikely(!success)) \
480 *_old = __old; \
481 likely(success); })
482 #endif // CONFIG_X86_32
483 #else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
484 #define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
485 int __err = 0; \
486 bool success; \
487 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
488 __typeof__(*(_ptr)) __old = *_old; \
489 __typeof__(*(_ptr)) __new = (_new); \
490 asm volatile("\n" \
491 "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
492 CC_SET(z) \
493 "2:\n" \
494 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
495 %[errout]) \
496 : CC_OUT(z) (success), \
497 [errout] "+r" (__err), \
498 [ptr] "+m" (*_ptr), \
499 [old] "+a" (__old) \
500 : [new] ltype (__new) \
501 : "memory"); \
502 if (unlikely(__err)) \
503 goto label; \
504 if (unlikely(!success)) \
505 *_old = __old; \
506 likely(success); })
507
508 #ifdef CONFIG_X86_32
509 /*
510 * Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
511 * There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
512 * hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
513 * both ESI and EDI for the memory operand, compilation will fail if the error
514 * is an input+output as there will be no register available for input.
515 */
516 #define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
517 int __result; \
518 __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
519 __typeof__(*(_ptr)) __old = *_old; \
520 __typeof__(*(_ptr)) __new = (_new); \
521 asm volatile("\n" \
522 "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
523 "mov $0, %%ecx\n\t" \
524 "setz %%cl\n" \
525 "2:\n" \
526 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
527 : [result]"=c" (__result), \
528 "+A" (__old), \
529 [ptr] "+m" (*_ptr) \
530 : "b" ((u32)__new), \
531 "c" ((u32)((u64)__new >> 32)) \
532 : "memory", "cc"); \
533 if (unlikely(__result < 0)) \
534 goto label; \
535 if (unlikely(!__result)) \
536 *_old = __old; \
537 likely(__result); })
538 #endif // CONFIG_X86_32
539 #endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
540
541 /* FIXME: this hack is definitely wrong -AK */
542 struct __large_struct { unsigned long buf[100]; };
543 #define __m(x) (*(struct __large_struct __user *)(x))
544
545 /*
546 * Tell gcc we read from memory instead of writing: this is because
547 * we do not write to any memory gcc knows about, so there are no
548 * aliasing issues.
549 */
550 #define __put_user_goto(x, addr, itype, rtype, ltype, label) \
551 asm_volatile_goto("\n" \
552 "1: mov"itype" %"rtype"0,%1\n" \
553 _ASM_EXTABLE_UA(1b, %l2) \
554 : : ltype(x), "m" (__m(addr)) \
555 : : label)
556
557 #define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
558 ({ __label__ __puflab; \
559 int __pufret = errret; \
560 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
561 __pufret = 0; \
562 __puflab: __pufret; })
563
564 #define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
565 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
566 } while (0)
567
568 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
569 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
570 "2:\n" \
571 _ASM_EXTABLE_EX(1b, 2b) \
572 : : ltype(x), "m" (__m(addr)))
573
574 /*
575 * uaccess_try and catch
576 */
577 #define uaccess_try do { \
578 current->thread.uaccess_err = 0; \
579 __uaccess_begin(); \
580 barrier();
581
582 #define uaccess_try_nospec do { \
583 current->thread.uaccess_err = 0; \
584 __uaccess_begin_nospec(); \
585
586 #define uaccess_catch(err) \
587 __uaccess_end(); \
588 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
589 } while (0)
590
591 /**
592 * __get_user - Get a simple variable from user space, with less checking.
593 * @x: Variable to store result.
594 * @ptr: Source address, in user space.
595 *
596 * Context: User context only. This function may sleep if pagefaults are
597 * enabled.
598 *
599 * This macro copies a single simple variable from user space to kernel
600 * space. It supports simple types like char and int, but not larger
601 * data types like structures or arrays.
602 *
603 * @ptr must have pointer-to-simple-variable type, and the result of
604 * dereferencing @ptr must be assignable to @x without a cast.
605 *
606 * Caller must check the pointer with access_ok() before calling this
607 * function.
608 *
609 * Return: zero on success, or -EFAULT on error.
610 * On error, the variable @x is set to zero.
611 */
612
613 #define __get_user(x, ptr) \
614 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
615
616 /**
617 * __put_user - Write a simple value into user space, with less checking.
618 * @x: Value to copy to user space.
619 * @ptr: Destination address, in user space.
620 *
621 * Context: User context only. This function may sleep if pagefaults are
622 * enabled.
623 *
624 * This macro copies a single simple value from kernel space to user
625 * space. It supports simple types like char and int, but not larger
626 * data types like structures or arrays.
627 *
628 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
629 * to the result of dereferencing @ptr.
630 *
631 * Caller must check the pointer with access_ok() before calling this
632 * function.
633 *
634 * Return: zero on success, or -EFAULT on error.
635 */
636
637 #define __put_user(x, ptr) \
638 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
639
640 /*
641 * {get|put}_user_try and catch
642 *
643 * get_user_try {
644 * get_user_ex(...);
645 * } get_user_catch(err)
646 */
647 #define get_user_try uaccess_try_nospec
648 #define get_user_catch(err) uaccess_catch(err)
649
650 #define get_user_ex(x, ptr) do { \
651 unsigned long __gue_val; \
652 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
653 (x) = (__force __typeof__(*(ptr)))__gue_val; \
654 } while (0)
655
656 #define put_user_try uaccess_try
657 #define put_user_catch(err) uaccess_catch(err)
658
659 #define put_user_ex(x, ptr) \
660 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
661
662 extern unsigned long
663 copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
664 extern __must_check long
665 strncpy_from_user(char *dst, const char __user *src, long count);
666
667 extern __must_check long strnlen_user(const char __user *str, long n);
668
669 unsigned long __must_check clear_user(void __user *mem, unsigned long len);
670 unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
671
672 extern void __cmpxchg_wrong_size(void)
673 __compiletime_error("Bad argument size for cmpxchg");
674
675 #define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
676 ({ \
677 int __ret = 0; \
678 __typeof__(*(ptr)) __old = (old); \
679 __typeof__(*(ptr)) __new = (new); \
680 __uaccess_begin_nospec(); \
681 switch (size) { \
682 case 1: \
683 { \
684 asm volatile("\n" \
685 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
686 "2:\n" \
687 "\t.section .fixup, \"ax\"\n" \
688 "3:\tmov %3, %0\n" \
689 "\tjmp 2b\n" \
690 "\t.previous\n" \
691 _ASM_EXTABLE_UA(1b, 3b) \
692 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
693 : "i" (-EFAULT), "q" (__new), "1" (__old) \
694 : "memory" \
695 ); \
696 break; \
697 } \
698 case 2: \
699 { \
700 asm volatile("\n" \
701 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
702 "2:\n" \
703 "\t.section .fixup, \"ax\"\n" \
704 "3:\tmov %3, %0\n" \
705 "\tjmp 2b\n" \
706 "\t.previous\n" \
707 _ASM_EXTABLE_UA(1b, 3b) \
708 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
709 : "i" (-EFAULT), "r" (__new), "1" (__old) \
710 : "memory" \
711 ); \
712 break; \
713 } \
714 case 4: \
715 { \
716 asm volatile("\n" \
717 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
718 "2:\n" \
719 "\t.section .fixup, \"ax\"\n" \
720 "3:\tmov %3, %0\n" \
721 "\tjmp 2b\n" \
722 "\t.previous\n" \
723 _ASM_EXTABLE_UA(1b, 3b) \
724 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
725 : "i" (-EFAULT), "r" (__new), "1" (__old) \
726 : "memory" \
727 ); \
728 break; \
729 } \
730 case 8: \
731 { \
732 if (!IS_ENABLED(CONFIG_X86_64)) \
733 __cmpxchg_wrong_size(); \
734 \
735 asm volatile("\n" \
736 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
737 "2:\n" \
738 "\t.section .fixup, \"ax\"\n" \
739 "3:\tmov %3, %0\n" \
740 "\tjmp 2b\n" \
741 "\t.previous\n" \
742 _ASM_EXTABLE_UA(1b, 3b) \
743 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
744 : "i" (-EFAULT), "r" (__new), "1" (__old) \
745 : "memory" \
746 ); \
747 break; \
748 } \
749 default: \
750 __cmpxchg_wrong_size(); \
751 } \
752 __uaccess_end(); \
753 *(uval) = __old; \
754 __ret; \
755 })
756
757 #define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
758 ({ \
759 access_ok((ptr), sizeof(*(ptr))) ? \
760 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
761 (old), (new), sizeof(*(ptr))) : \
762 -EFAULT; \
763 })
764
765 /*
766 * movsl can be slow when source and dest are not both 8-byte aligned
767 */
768 #ifdef CONFIG_X86_INTEL_USERCOPY
769 extern struct movsl_mask {
770 int mask;
771 } ____cacheline_aligned_in_smp movsl_mask;
772 #endif
773
774 #define ARCH_HAS_NOCACHE_UACCESS 1
775
776 #ifdef CONFIG_X86_32
777 # include <asm/uaccess_32.h>
778 #else
779 # include <asm/uaccess_64.h>
780 #endif
781
782 /*
783 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
784 * nested NMI paths are careful to preserve CR2.
785 *
786 * Caller must use pagefault_enable/disable, or run in interrupt context,
787 * and also do a uaccess_ok() check
788 */
789 #define __copy_from_user_nmi __copy_from_user_inatomic
790
791 /*
792 * The "unsafe" user accesses aren't really "unsafe", but the naming
793 * is a big fat warning: you have to not only do the access_ok()
794 * checking before using them, but you have to surround them with the
795 * user_access_begin/end() pair.
796 */
user_access_begin(const void __user * ptr,size_t len)797 static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
798 {
799 if (unlikely(!access_ok(ptr,len)))
800 return 0;
801 __uaccess_begin_nospec();
802 return 1;
803 }
804 #define user_access_begin(a,b) user_access_begin(a,b)
805 #define user_access_end() __uaccess_end()
806
807 #define user_access_save() smap_save()
808 #define user_access_restore(x) smap_restore(x)
809
810 #define unsafe_put_user(x, ptr, label) \
811 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
812
813 #define unsafe_get_user(x, ptr, err_label) \
814 do { \
815 int __gu_err; \
816 __inttype(*(ptr)) __gu_val; \
817 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
818 (x) = (__force __typeof__(*(ptr)))__gu_val; \
819 if (unlikely(__gu_err)) goto err_label; \
820 } while (0)
821
822 extern void __try_cmpxchg_user_wrong_size(void);
823
824 #ifndef CONFIG_X86_32
825 #define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
826 __try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
827 #endif
828
829 /*
830 * Force the pointer to u<size> to match the size expected by the asm helper.
831 * clang/LLVM compiles all cases and only discards the unused paths after
832 * processing errors, which breaks i386 if the pointer is an 8-byte value.
833 */
834 #define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
835 bool __ret; \
836 __chk_user_ptr(_ptr); \
837 switch (sizeof(*(_ptr))) { \
838 case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
839 (__force u8 *)(_ptr), (_oldp), \
840 (_nval), _label); \
841 break; \
842 case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
843 (__force u16 *)(_ptr), (_oldp), \
844 (_nval), _label); \
845 break; \
846 case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
847 (__force u32 *)(_ptr), (_oldp), \
848 (_nval), _label); \
849 break; \
850 case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
851 (_nval), _label); \
852 break; \
853 default: __try_cmpxchg_user_wrong_size(); \
854 } \
855 __ret; })
856
857 /* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
858 #define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
859 int __ret = -EFAULT; \
860 __uaccess_begin_nospec(); \
861 __ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
862 _label: \
863 __uaccess_end(); \
864 __ret; \
865 })
866
867 /*
868 * We want the unsafe accessors to always be inlined and use
869 * the error labels - thus the macro games.
870 */
871 #define unsafe_copy_loop(dst, src, len, type, label) \
872 while (len >= sizeof(type)) { \
873 unsafe_put_user(*(type *)src,(type __user *)dst,label); \
874 dst += sizeof(type); \
875 src += sizeof(type); \
876 len -= sizeof(type); \
877 }
878
879 #define unsafe_copy_to_user(_dst,_src,_len,label) \
880 do { \
881 char __user *__ucu_dst = (_dst); \
882 const char *__ucu_src = (_src); \
883 size_t __ucu_len = (_len); \
884 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
885 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
886 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
887 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
888 } while (0)
889
890 #endif /* _ASM_X86_UACCESS_H */
891
892