1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
4
5 /*
6 * User space memory access functions
7 */
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
13 #include <asm/page.h>
14
15 /*
16 * Copy To/From Userspace
17 */
18
19 /* Handles exceptions in both to and from, but doesn't do access_ok */
20 __must_check unsigned long
21 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
22 __must_check unsigned long
23 copy_user_generic_string(void *to, const void *from, unsigned len);
24 __must_check unsigned long
25 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
26
27 static __always_inline __must_check unsigned long
copy_user_generic(void * to,const void * from,unsigned len)28 copy_user_generic(void *to, const void *from, unsigned len)
29 {
30 unsigned ret;
31
32 /*
33 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
34 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
35 * Otherwise, use copy_user_generic_unrolled.
36 */
37 alternative_call_2(copy_user_generic_unrolled,
38 copy_user_generic_string,
39 X86_FEATURE_REP_GOOD,
40 copy_user_enhanced_fast_string,
41 X86_FEATURE_ERMS,
42 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
43 "=d" (len)),
44 "1" (to), "2" (from), "3" (len)
45 : "memory", "rcx", "r8", "r9", "r10", "r11");
46 return ret;
47 }
48
49 static __always_inline __must_check unsigned long
raw_copy_from_user(void * dst,const void __user * src,unsigned long size)50 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
51 {
52 return copy_user_generic(dst, (__force void *)src, size);
53 }
54
55 static __always_inline __must_check unsigned long
raw_copy_to_user(void __user * dst,const void * src,unsigned long size)56 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
57 {
58 return copy_user_generic((__force void *)dst, src, size);
59 }
60
61 extern long __copy_user_nocache(void *dst, const void __user *src,
62 unsigned size, int zerorest);
63
64 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
65 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
66 size_t len);
67
68 static inline int
__copy_from_user_inatomic_nocache(void * dst,const void __user * src,unsigned size)69 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
70 unsigned size)
71 {
72 kasan_check_write(dst, size);
73 return __copy_user_nocache(dst, src, size, 0);
74 }
75
76 static inline int
__copy_from_user_flushcache(void * dst,const void __user * src,unsigned size)77 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
78 {
79 kasan_check_write(dst, size);
80 return __copy_user_flushcache(dst, src, size);
81 }
82 #endif /* _ASM_X86_UACCESS_64_H */
83