1 #ifndef _ASM_X86_UACCESS_64_H
2 #define _ASM_X86_UACCESS_64_H
3
4 /*
5 * User space memory access functions
6 */
7 #include <linux/compiler.h>
8 #include <linux/errno.h>
9 #include <linux/lockdep.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeature.h>
12 #include <asm/page.h>
13
14 /*
15 * Copy To/From Userspace
16 */
17
18 /* Handles exceptions in both to and from, but doesn't do access_ok */
19 __must_check unsigned long
20 copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
21 __must_check unsigned long
22 copy_user_generic_string(void *to, const void *from, unsigned len);
23 __must_check unsigned long
24 copy_user_generic_unrolled(void *to, const void *from, unsigned len);
25
26 static __always_inline __must_check unsigned long
copy_user_generic(void * to,const void * from,unsigned len)27 copy_user_generic(void *to, const void *from, unsigned len)
28 {
29 unsigned ret;
30
31 /*
32 * If CPU has ERMS feature, use copy_user_enhanced_fast_string.
33 * Otherwise, if CPU has rep_good feature, use copy_user_generic_string.
34 * Otherwise, use copy_user_generic_unrolled.
35 */
36 alternative_call_2(copy_user_generic_unrolled,
37 copy_user_generic_string,
38 X86_FEATURE_REP_GOOD,
39 copy_user_enhanced_fast_string,
40 X86_FEATURE_ERMS,
41 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
42 "=d" (len)),
43 "1" (to), "2" (from), "3" (len)
44 : "memory", "rcx", "r8", "r9", "r10", "r11");
45 return ret;
46 }
47
48 __must_check unsigned long
49 copy_in_user(void __user *to, const void __user *from, unsigned len);
50
51 static __always_inline __must_check
__copy_from_user_nocheck(void * dst,const void __user * src,unsigned size)52 int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size)
53 {
54 int ret = 0;
55
56 check_object_size(dst, size, false);
57 if (!__builtin_constant_p(size))
58 return copy_user_generic(dst, (__force void *)src, size);
59 switch (size) {
60 case 1:
61 __uaccess_begin();
62 __get_user_asm(*(u8 *)dst, (u8 __user *)src,
63 ret, "b", "b", "=q", 1);
64 __uaccess_end();
65 return ret;
66 case 2:
67 __uaccess_begin();
68 __get_user_asm(*(u16 *)dst, (u16 __user *)src,
69 ret, "w", "w", "=r", 2);
70 __uaccess_end();
71 return ret;
72 case 4:
73 __uaccess_begin();
74 __get_user_asm(*(u32 *)dst, (u32 __user *)src,
75 ret, "l", "k", "=r", 4);
76 __uaccess_end();
77 return ret;
78 case 8:
79 __uaccess_begin();
80 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
81 ret, "q", "", "=r", 8);
82 __uaccess_end();
83 return ret;
84 case 10:
85 __uaccess_begin();
86 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
87 ret, "q", "", "=r", 10);
88 if (likely(!ret))
89 __get_user_asm(*(u16 *)(8 + (char *)dst),
90 (u16 __user *)(8 + (char __user *)src),
91 ret, "w", "w", "=r", 2);
92 __uaccess_end();
93 return ret;
94 case 16:
95 __uaccess_begin();
96 __get_user_asm(*(u64 *)dst, (u64 __user *)src,
97 ret, "q", "", "=r", 16);
98 if (likely(!ret))
99 __get_user_asm(*(u64 *)(8 + (char *)dst),
100 (u64 __user *)(8 + (char __user *)src),
101 ret, "q", "", "=r", 8);
102 __uaccess_end();
103 return ret;
104 default:
105 return copy_user_generic(dst, (__force void *)src, size);
106 }
107 }
108
109 static __always_inline __must_check
__copy_from_user(void * dst,const void __user * src,unsigned size)110 int __copy_from_user(void *dst, const void __user *src, unsigned size)
111 {
112 might_fault();
113 return __copy_from_user_nocheck(dst, src, size);
114 }
115
116 static __always_inline __must_check
__copy_to_user_nocheck(void __user * dst,const void * src,unsigned size)117 int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size)
118 {
119 int ret = 0;
120
121 check_object_size(src, size, true);
122 if (!__builtin_constant_p(size))
123 return copy_user_generic((__force void *)dst, src, size);
124 switch (size) {
125 case 1:
126 __uaccess_begin();
127 __put_user_asm(*(u8 *)src, (u8 __user *)dst,
128 ret, "b", "b", "iq", 1);
129 __uaccess_end();
130 return ret;
131 case 2:
132 __uaccess_begin();
133 __put_user_asm(*(u16 *)src, (u16 __user *)dst,
134 ret, "w", "w", "ir", 2);
135 __uaccess_end();
136 return ret;
137 case 4:
138 __uaccess_begin();
139 __put_user_asm(*(u32 *)src, (u32 __user *)dst,
140 ret, "l", "k", "ir", 4);
141 __uaccess_end();
142 return ret;
143 case 8:
144 __uaccess_begin();
145 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
146 ret, "q", "", "er", 8);
147 __uaccess_end();
148 return ret;
149 case 10:
150 __uaccess_begin();
151 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
152 ret, "q", "", "er", 10);
153 if (likely(!ret)) {
154 asm("":::"memory");
155 __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
156 ret, "w", "w", "ir", 2);
157 }
158 __uaccess_end();
159 return ret;
160 case 16:
161 __uaccess_begin();
162 __put_user_asm(*(u64 *)src, (u64 __user *)dst,
163 ret, "q", "", "er", 16);
164 if (likely(!ret)) {
165 asm("":::"memory");
166 __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
167 ret, "q", "", "er", 8);
168 }
169 __uaccess_end();
170 return ret;
171 default:
172 return copy_user_generic((__force void *)dst, src, size);
173 }
174 }
175
176 static __always_inline __must_check
__copy_to_user(void __user * dst,const void * src,unsigned size)177 int __copy_to_user(void __user *dst, const void *src, unsigned size)
178 {
179 might_fault();
180 return __copy_to_user_nocheck(dst, src, size);
181 }
182
183 static __always_inline __must_check
__copy_in_user(void __user * dst,const void __user * src,unsigned size)184 int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
185 {
186 int ret = 0;
187
188 might_fault();
189 if (!__builtin_constant_p(size))
190 return copy_user_generic((__force void *)dst,
191 (__force void *)src, size);
192 switch (size) {
193 case 1: {
194 u8 tmp;
195 __uaccess_begin();
196 __get_user_asm(tmp, (u8 __user *)src,
197 ret, "b", "b", "=q", 1);
198 if (likely(!ret))
199 __put_user_asm(tmp, (u8 __user *)dst,
200 ret, "b", "b", "iq", 1);
201 __uaccess_end();
202 return ret;
203 }
204 case 2: {
205 u16 tmp;
206 __uaccess_begin();
207 __get_user_asm(tmp, (u16 __user *)src,
208 ret, "w", "w", "=r", 2);
209 if (likely(!ret))
210 __put_user_asm(tmp, (u16 __user *)dst,
211 ret, "w", "w", "ir", 2);
212 __uaccess_end();
213 return ret;
214 }
215
216 case 4: {
217 u32 tmp;
218 __uaccess_begin();
219 __get_user_asm(tmp, (u32 __user *)src,
220 ret, "l", "k", "=r", 4);
221 if (likely(!ret))
222 __put_user_asm(tmp, (u32 __user *)dst,
223 ret, "l", "k", "ir", 4);
224 __uaccess_end();
225 return ret;
226 }
227 case 8: {
228 u64 tmp;
229 __uaccess_begin();
230 __get_user_asm(tmp, (u64 __user *)src,
231 ret, "q", "", "=r", 8);
232 if (likely(!ret))
233 __put_user_asm(tmp, (u64 __user *)dst,
234 ret, "q", "", "er", 8);
235 __uaccess_end();
236 return ret;
237 }
238 default:
239 return copy_user_generic((__force void *)dst,
240 (__force void *)src, size);
241 }
242 }
243
244 static __must_check __always_inline int
__copy_from_user_inatomic(void * dst,const void __user * src,unsigned size)245 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
246 {
247 return __copy_from_user_nocheck(dst, src, size);
248 }
249
250 static __must_check __always_inline int
__copy_to_user_inatomic(void __user * dst,const void * src,unsigned size)251 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
252 {
253 return __copy_to_user_nocheck(dst, src, size);
254 }
255
256 extern long __copy_user_nocache(void *dst, const void __user *src,
257 unsigned size, int zerorest);
258
259 static inline int
__copy_from_user_nocache(void * dst,const void __user * src,unsigned size)260 __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
261 {
262 might_fault();
263 return __copy_user_nocache(dst, src, size, 1);
264 }
265
266 static inline int
__copy_from_user_inatomic_nocache(void * dst,const void __user * src,unsigned size)267 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
268 unsigned size)
269 {
270 return __copy_user_nocache(dst, src, size, 0);
271 }
272
273 unsigned long
274 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
275
276 #endif /* _ASM_X86_UACCESS_64_H */
277