• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ARCH_POWERPC_UACCESS_H
3 #define _ARCH_POWERPC_UACCESS_H
4 
5 #include <asm/ppc_asm.h>
6 #include <asm/processor.h>
7 #include <asm/page.h>
8 #include <asm/extable.h>
9 #include <asm/kup.h>
10 
11 /*
12  * The fs value determines whether argument validity checking should be
13  * performed or not.  If get_fs() == USER_DS, checking is performed, with
14  * get_fs() == KERNEL_DS, checking is bypassed.
15  *
16  * For historical reasons, these macros are grossly misnamed.
17  *
18  * The fs/ds values are now the highest legal address in the "segment".
19  * This simplifies the checking in the routines below.
20  */
21 
22 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
23 
24 #define KERNEL_DS	MAKE_MM_SEG(~0UL)
25 #ifdef __powerpc64__
26 /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
27 #define USER_DS		MAKE_MM_SEG(TASK_SIZE_USER64 - 1)
28 #else
29 #define USER_DS		MAKE_MM_SEG(TASK_SIZE - 1)
30 #endif
31 
32 #define get_ds()	(KERNEL_DS)
33 #define get_fs()	(current->thread.addr_limit)
34 
set_fs(mm_segment_t fs)35 static inline void set_fs(mm_segment_t fs)
36 {
37 	current->thread.addr_limit = fs;
38 	/* On user-mode return check addr_limit (fs) is correct */
39 	set_thread_flag(TIF_FSCHECK);
40 }
41 
42 #define segment_eq(a, b)	((a).seg == (b).seg)
43 
44 #define user_addr_max()	(get_fs().seg)
45 
46 #ifdef __powerpc64__
47 /*
48  * This check is sufficient because there is a large enough
49  * gap between user addresses and the kernel addresses
50  */
51 #define __access_ok(addr, size, segment)	\
52 	(((addr) <= (segment).seg) && ((size) <= (segment).seg))
53 
54 #else
55 
__access_ok(unsigned long addr,unsigned long size,mm_segment_t seg)56 static inline int __access_ok(unsigned long addr, unsigned long size,
57 			mm_segment_t seg)
58 {
59 	if (addr > seg.seg)
60 		return 0;
61 	return (size == 0 || size - 1 <= seg.seg - addr);
62 }
63 
64 #endif
65 
66 #define access_ok(type, addr, size)		\
67 	(__chk_user_ptr(addr), (void)(type),		\
68 	 __access_ok((__force unsigned long)(addr), (size), get_fs()))
69 
70 /*
71  * These are the main single-value transfer routines.  They automatically
72  * use the right size if we just have the right pointer type.
73  *
74  * This gets kind of ugly. We want to return _two_ values in "get_user()"
75  * and yet we don't want to do any pointers, because that is too much
76  * of a performance impact. Thus we have a few rather ugly macros here,
77  * and hide all the ugliness from the user.
78  *
79  * The "__xxx" versions of the user access functions are versions that
80  * do not verify the address space, that must have been done previously
81  * with a separate "access_ok()" call (this is used when we do multiple
82  * accesses to the same area of user memory).
83  *
84  * As we use the same address space for kernel and user data on the
85  * PowerPC, we can just do these as direct assignments.  (Of course, the
86  * exception handling means that it's no longer "just"...)
87  *
88  */
89 #define get_user(x, ptr) \
90 	__get_user_check((x), (ptr), sizeof(*(ptr)))
91 #define put_user(x, ptr) \
92 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
93 
94 #define __get_user(x, ptr) \
95 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
96 #define __put_user(x, ptr) \
97 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
98 
99 #define __get_user_allowed(x, ptr) \
100 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
101 #define __put_user_allowed(x, ptr) \
102 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
103 
104 #define __get_user_inatomic(x, ptr) \
105 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
106 #define __put_user_inatomic(x, ptr) \
107 	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
108 
109 extern long __put_user_bad(void);
110 
111 /*
112  * We don't tell gcc that we are accessing memory, but this is OK
113  * because we do not write to any memory gcc knows about, so there
114  * are no aliasing issues.
115  */
116 #define __put_user_asm(x, addr, err, op)			\
117 	__asm__ __volatile__(					\
118 		"1:	" op " %1,0(%2)	# put_user\n"		\
119 		"2:\n"						\
120 		".section .fixup,\"ax\"\n"			\
121 		"3:	li %0,%3\n"				\
122 		"	b 2b\n"					\
123 		".previous\n"					\
124 		EX_TABLE(1b, 3b)				\
125 		: "=r" (err)					\
126 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
127 
128 #ifdef __powerpc64__
129 #define __put_user_asm2(x, ptr, retval)				\
130 	  __put_user_asm(x, ptr, retval, "std")
131 #else /* __powerpc64__ */
132 #define __put_user_asm2(x, addr, err)				\
133 	__asm__ __volatile__(					\
134 		"1:	stw %1,0(%2)\n"				\
135 		"2:	stw %1+1,4(%2)\n"			\
136 		"3:\n"						\
137 		".section .fixup,\"ax\"\n"			\
138 		"4:	li %0,%3\n"				\
139 		"	b 3b\n"					\
140 		".previous\n"					\
141 		EX_TABLE(1b, 4b)				\
142 		EX_TABLE(2b, 4b)				\
143 		: "=r" (err)					\
144 		: "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
145 #endif /* __powerpc64__ */
146 
147 #define __put_user_size_allowed(x, ptr, size, retval)		\
148 do {								\
149 	retval = 0;						\
150 	switch (size) {						\
151 	  case 1: __put_user_asm(x, ptr, retval, "stb"); break;	\
152 	  case 2: __put_user_asm(x, ptr, retval, "sth"); break;	\
153 	  case 4: __put_user_asm(x, ptr, retval, "stw"); break;	\
154 	  case 8: __put_user_asm2(x, ptr, retval); break;	\
155 	  default: __put_user_bad();				\
156 	}							\
157 } while (0)
158 
159 #define __put_user_size(x, ptr, size, retval)			\
160 do {								\
161 	allow_write_to_user(ptr, size);				\
162 	__put_user_size_allowed(x, ptr, size, retval);		\
163 	prevent_write_to_user(ptr, size);			\
164 } while (0)
165 
166 #define __put_user_nocheck(x, ptr, size, do_allow)			\
167 ({								\
168 	long __pu_err;						\
169 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
170 	__typeof__(*(ptr)) __pu_val = (x);			\
171 	__typeof__(size) __pu_size = (size);			\
172 								\
173 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
174 		might_fault();					\
175 	__chk_user_ptr(__pu_addr);				\
176 	if (do_allow)								\
177 		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);	\
178 	else									\
179 		__put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
180 								\
181 	__pu_err;						\
182 })
183 
184 #define __put_user_check(x, ptr, size)					\
185 ({									\
186 	long __pu_err = -EFAULT;					\
187 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
188 	__typeof__(*(ptr)) __pu_val = (x);				\
189 	__typeof__(size) __pu_size = (size);				\
190 									\
191 	might_fault();							\
192 	if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size))			\
193 		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
194 									\
195 	__pu_err;							\
196 })
197 
198 #define __put_user_nosleep(x, ptr, size)			\
199 ({								\
200 	long __pu_err;						\
201 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
202 	__typeof__(*(ptr)) __pu_val = (x);			\
203 	__typeof__(size) __pu_size = (size);			\
204 								\
205 	__chk_user_ptr(__pu_addr);				\
206 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
207 								\
208 	__pu_err;						\
209 })
210 
211 
212 extern long __get_user_bad(void);
213 
214 /*
215  * This does an atomic 128 byte aligned load from userspace.
216  * Upto caller to do enable_kernel_vmx() before calling!
217  */
218 #define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
219 	__asm__ __volatile__(				\
220 		"1:	lvx  0,0,%1	# get user\n"	\
221 		" 	stvx 0,0,%2	# put kernel\n"	\
222 		"2:\n"					\
223 		".section .fixup,\"ax\"\n"		\
224 		"3:	li %0,%3\n"			\
225 		"	b 2b\n"				\
226 		".previous\n"				\
227 		EX_TABLE(1b, 3b)			\
228 		: "=r" (err)			\
229 		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
230 
231 #define __get_user_asm(x, addr, err, op)		\
232 	__asm__ __volatile__(				\
233 		"1:	"op" %1,0(%2)	# get_user\n"	\
234 		"2:\n"					\
235 		".section .fixup,\"ax\"\n"		\
236 		"3:	li %0,%3\n"			\
237 		"	li %1,0\n"			\
238 		"	b 2b\n"				\
239 		".previous\n"				\
240 		EX_TABLE(1b, 3b)			\
241 		: "=r" (err), "=r" (x)			\
242 		: "b" (addr), "i" (-EFAULT), "0" (err))
243 
244 #ifdef __powerpc64__
245 #define __get_user_asm2(x, addr, err)			\
246 	__get_user_asm(x, addr, err, "ld")
247 #else /* __powerpc64__ */
248 #define __get_user_asm2(x, addr, err)			\
249 	__asm__ __volatile__(				\
250 		"1:	lwz %1,0(%2)\n"			\
251 		"2:	lwz %1+1,4(%2)\n"		\
252 		"3:\n"					\
253 		".section .fixup,\"ax\"\n"		\
254 		"4:	li %0,%3\n"			\
255 		"	li %1,0\n"			\
256 		"	li %1+1,0\n"			\
257 		"	b 3b\n"				\
258 		".previous\n"				\
259 		EX_TABLE(1b, 4b)			\
260 		EX_TABLE(2b, 4b)			\
261 		: "=r" (err), "=&r" (x)			\
262 		: "b" (addr), "i" (-EFAULT), "0" (err))
263 #endif /* __powerpc64__ */
264 
265 #define __get_user_size_allowed(x, ptr, size, retval)		\
266 do {								\
267 	retval = 0;						\
268 	__chk_user_ptr(ptr);					\
269 	if (size > sizeof(x))					\
270 		(x) = __get_user_bad();				\
271 	switch (size) {						\
272 	case 1: __get_user_asm(x, ptr, retval, "lbz"); break;	\
273 	case 2: __get_user_asm(x, ptr, retval, "lhz"); break;	\
274 	case 4: __get_user_asm(x, ptr, retval, "lwz"); break;	\
275 	case 8: __get_user_asm2(x, ptr, retval);  break;	\
276 	default: (x) = __get_user_bad();			\
277 	}							\
278 } while (0)
279 
280 #define __get_user_size(x, ptr, size, retval)			\
281 do {								\
282 	allow_read_from_user(ptr, size);			\
283 	__get_user_size_allowed(x, ptr, size, retval);		\
284 	prevent_read_from_user(ptr, size);			\
285 } while (0)
286 
287 /*
288  * This is a type: either unsigned long, if the argument fits into
289  * that type, or otherwise unsigned long long.
290  */
291 #define __long_type(x) \
292 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
293 
294 #define __get_user_nocheck(x, ptr, size, do_allow)			\
295 ({								\
296 	long __gu_err;						\
297 	__long_type(*(ptr)) __gu_val;				\
298 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
299 	__typeof__(size) __gu_size = (size);			\
300 								\
301 	__chk_user_ptr(__gu_addr);				\
302 	if (!is_kernel_addr((unsigned long)__gu_addr))		\
303 		might_fault();					\
304 	barrier_nospec();					\
305 	if (do_allow)								\
306 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);	\
307 	else									\
308 		__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
309 	(x) = (__typeof__(*(ptr)))__gu_val;			\
310 								\
311 	__gu_err;						\
312 })
313 
314 #define __get_user_check(x, ptr, size)					\
315 ({									\
316 	long __gu_err = -EFAULT;					\
317 	__long_type(*(ptr)) __gu_val = 0;				\
318 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
319 	__typeof__(size) __gu_size = (size);				\
320 									\
321 	might_fault();							\
322 	if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) {		\
323 		barrier_nospec();					\
324 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
325 	}								\
326 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
327 									\
328 	__gu_err;							\
329 })
330 
331 #define __get_user_nosleep(x, ptr, size)			\
332 ({								\
333 	long __gu_err;						\
334 	__long_type(*(ptr)) __gu_val;				\
335 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
336 	__typeof__(size) __gu_size = (size);			\
337 								\
338 	__chk_user_ptr(__gu_addr);				\
339 	barrier_nospec();					\
340 	__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
341 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
342 								\
343 	__gu_err;						\
344 })
345 
346 
347 /* more complex routines */
348 
349 extern unsigned long __copy_tofrom_user(void __user *to,
350 		const void __user *from, unsigned long size);
351 
352 #ifdef __powerpc64__
353 static inline unsigned long
raw_copy_in_user(void __user * to,const void __user * from,unsigned long n)354 raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
355 {
356 	unsigned long ret;
357 
358 	barrier_nospec();
359 	allow_user_access(to, from, n);
360 	ret = __copy_tofrom_user(to, from, n);
361 	prevent_user_access(to, from, n);
362 	return ret;
363 }
364 #endif /* __powerpc64__ */
365 
raw_copy_from_user(void * to,const void __user * from,unsigned long n)366 static inline unsigned long raw_copy_from_user(void *to,
367 		const void __user *from, unsigned long n)
368 {
369 	unsigned long ret;
370 	if (__builtin_constant_p(n) && (n <= 8)) {
371 		ret = 1;
372 
373 		switch (n) {
374 		case 1:
375 			barrier_nospec();
376 			__get_user_size(*(u8 *)to, from, 1, ret);
377 			break;
378 		case 2:
379 			barrier_nospec();
380 			__get_user_size(*(u16 *)to, from, 2, ret);
381 			break;
382 		case 4:
383 			barrier_nospec();
384 			__get_user_size(*(u32 *)to, from, 4, ret);
385 			break;
386 		case 8:
387 			barrier_nospec();
388 			__get_user_size(*(u64 *)to, from, 8, ret);
389 			break;
390 		}
391 		if (ret == 0)
392 			return 0;
393 	}
394 
395 	barrier_nospec();
396 	allow_read_from_user(from, n);
397 	ret = __copy_tofrom_user((__force void __user *)to, from, n);
398 	prevent_read_from_user(from, n);
399 	return ret;
400 }
401 
402 static inline unsigned long
raw_copy_to_user_allowed(void __user * to,const void * from,unsigned long n)403 raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
404 {
405 	if (__builtin_constant_p(n) && (n <= 8)) {
406 		unsigned long ret = 1;
407 
408 		switch (n) {
409 		case 1:
410 			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
411 			break;
412 		case 2:
413 			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
414 			break;
415 		case 4:
416 			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
417 			break;
418 		case 8:
419 			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
420 			break;
421 		}
422 		if (ret == 0)
423 			return 0;
424 	}
425 
426 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
427 }
428 
429 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)430 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
431 {
432 	unsigned long ret;
433 
434 	allow_write_to_user(to, n);
435 	ret = raw_copy_to_user_allowed(to, from, n);
436 	prevent_write_to_user(to, n);
437 	return ret;
438 }
439 
440 unsigned long __arch_clear_user(void __user *addr, unsigned long size);
441 
clear_user(void __user * addr,unsigned long size)442 static inline unsigned long clear_user(void __user *addr, unsigned long size)
443 {
444 	unsigned long ret = size;
445 	might_fault();
446 	if (likely(access_ok(VERIFY_WRITE, addr, size))) {
447 		allow_write_to_user(addr, size);
448 		ret = __arch_clear_user(addr, size);
449 		prevent_write_to_user(addr, size);
450 	}
451 	return ret;
452 }
453 
__clear_user(void __user * addr,unsigned long size)454 static inline unsigned long __clear_user(void __user *addr, unsigned long size)
455 {
456 	return clear_user(addr, size);
457 }
458 
459 extern long strncpy_from_user(char *dst, const char __user *src, long count);
460 extern __must_check long strnlen_user(const char __user *str, long n);
461 
462 extern long __copy_from_user_flushcache(void *dst, const void __user *src,
463 		unsigned size);
464 extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
465 			   size_t len);
466 
467 #define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
468 #define user_access_end()		  prevent_user_access(NULL, NULL, ~0ul)
469 
470 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
471 #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
472 #define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
473 #define unsafe_copy_to_user(d, s, l, e) \
474 	unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
475 
476 #endif	/* _ARCH_POWERPC_UACCESS_H */
477