• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle
7  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8  * Copyright (C) 2007  Maciej W. Rozycki
9  * Copyright (C) 2014, Imagination Technologies Ltd.
10  */
11 #ifndef _ASM_UACCESS_H
12 #define _ASM_UACCESS_H
13 
14 #include <linux/kernel.h>
15 #include <linux/errno.h>
16 #include <linux/thread_info.h>
17 #include <linux/string.h>
18 #include <asm/asm-eva.h>
19 
20 /*
21  * The fs value determines whether argument validity checking should be
22  * performed or not.  If get_fs() == USER_DS, checking is performed, with
23  * get_fs() == KERNEL_DS, checking is bypassed.
24  *
25  * For historical reasons, these macros are grossly misnamed.
26  */
27 #ifdef CONFIG_32BIT
28 
29 #ifdef CONFIG_KVM_GUEST
30 #define __UA_LIMIT 0x40000000UL
31 #else
32 #define __UA_LIMIT 0x80000000UL
33 #endif
34 
35 #define __UA_ADDR	".word"
36 #define __UA_LA		"la"
37 #define __UA_ADDU	"addu"
38 #define __UA_t0		"$8"
39 #define __UA_t1		"$9"
40 
41 #endif /* CONFIG_32BIT */
42 
43 #ifdef CONFIG_64BIT
44 
45 extern u64 __ua_limit;
46 
47 #define __UA_LIMIT	__ua_limit
48 
49 #define __UA_ADDR	".dword"
50 #define __UA_LA		"dla"
51 #define __UA_ADDU	"daddu"
52 #define __UA_t0		"$12"
53 #define __UA_t1		"$13"
54 
55 #endif /* CONFIG_64BIT */
56 
57 /*
58  * USER_DS is a bitmask that has the bits set that may not be set in a valid
59  * userspace address.  Note that we limit 32-bit userspace to 0x7fff8000 but
60  * the arithmetic we're doing only works if the limit is a power of two, so
61  * we use 0x80000000 here on 32-bit kernels.  If a process passes an invalid
62  * address in this range it's the process's problem, not ours :-)
63  */
64 
65 #ifdef CONFIG_KVM_GUEST
66 #define KERNEL_DS	((mm_segment_t) { 0x80000000UL })
67 #define USER_DS		((mm_segment_t) { 0xC0000000UL })
68 #else
69 #define KERNEL_DS	((mm_segment_t) { 0UL })
70 #define USER_DS		((mm_segment_t) { __UA_LIMIT })
71 #endif
72 
73 #define VERIFY_READ    0
74 #define VERIFY_WRITE   1
75 
76 #define get_ds()	(KERNEL_DS)
77 #define get_fs()	(current_thread_info()->addr_limit)
78 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
79 
80 #define segment_eq(a, b)	((a).seg == (b).seg)
81 
82 /*
83  * eva_kernel_access() - determine whether kernel memory access on an EVA system
84  *
85  * Determines whether memory accesses should be performed to kernel memory
86  * on a system using Extended Virtual Addressing (EVA).
87  *
88  * Return: true if a kernel memory access on an EVA system, else false.
89  */
eva_kernel_access(void)90 static inline bool eva_kernel_access(void)
91 {
92 	if (!config_enabled(CONFIG_EVA))
93 		return false;
94 
95 	return segment_eq(get_fs(), get_ds());
96 }
97 
98 /*
99  * Is a address valid? This does a straighforward calculation rather
100  * than tests.
101  *
102  * Address valid if:
103  *  - "addr" doesn't have any high-bits set
104  *  - AND "size" doesn't have any high-bits set
105  *  - AND "addr+size" doesn't have any high-bits set
106  *  - OR we are in kernel mode.
107  *
108  * __ua_size() is a trick to avoid runtime checking of positive constant
109  * sizes; for those we already know at compile time that the size is ok.
110  */
111 #define __ua_size(size)							\
112 	((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size))
113 
114 /*
115  * access_ok: - Checks if a user space pointer is valid
116  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
117  *	  %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
118  *	  to write to a block, it is always safe to read from it.
119  * @addr: User space pointer to start of block to check
120  * @size: Size of block to check
121  *
122  * Context: User context only.	This function may sleep.
123  *
124  * Checks if a pointer to a block of memory in user space is valid.
125  *
126  * Returns true (nonzero) if the memory block may be valid, false (zero)
127  * if it is definitely invalid.
128  *
129  * Note that, depending on architecture, this function probably just
130  * checks that the pointer is in the user space range - after calling
131  * this function, memory access functions may still return -EFAULT.
132  */
133 
__access_ok(const void __user * p,unsigned long size)134 static inline int __access_ok(const void __user *p, unsigned long size)
135 {
136 	unsigned long addr = (unsigned long)p;
137 	return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0;
138 }
139 
140 #define access_ok(type, addr, size)					\
141 	likely(__access_ok((addr), (size)))
142 
143 /*
144  * put_user: - Write a simple value into user space.
145  * @x:	 Value to copy to user space.
146  * @ptr: Destination address, in user space.
147  *
148  * Context: User context only.	This function may sleep.
149  *
150  * This macro copies a single simple value from kernel space to user
151  * space.  It supports simple types like char and int, but not larger
152  * data types like structures or arrays.
153  *
154  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
155  * to the result of dereferencing @ptr.
156  *
157  * Returns zero on success, or -EFAULT on error.
158  */
159 #define put_user(x,ptr) \
160 	__put_user_check((x), (ptr), sizeof(*(ptr)))
161 
162 /*
163  * get_user: - Get a simple variable from user space.
164  * @x:	 Variable to store result.
165  * @ptr: Source address, in user space.
166  *
167  * Context: User context only.	This function may sleep.
168  *
169  * This macro copies a single simple variable from user space to kernel
170  * space.  It supports simple types like char and int, but not larger
171  * data types like structures or arrays.
172  *
173  * @ptr must have pointer-to-simple-variable type, and the result of
174  * dereferencing @ptr must be assignable to @x without a cast.
175  *
176  * Returns zero on success, or -EFAULT on error.
177  * On error, the variable @x is set to zero.
178  */
179 #define get_user(x,ptr) \
180 	__get_user_check((x), (ptr), sizeof(*(ptr)))
181 
182 /*
183  * __put_user: - Write a simple value into user space, with less checking.
184  * @x:	 Value to copy to user space.
185  * @ptr: Destination address, in user space.
186  *
187  * Context: User context only.	This function may sleep.
188  *
189  * This macro copies a single simple value from kernel space to user
190  * space.  It supports simple types like char and int, but not larger
191  * data types like structures or arrays.
192  *
193  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
194  * to the result of dereferencing @ptr.
195  *
196  * Caller must check the pointer with access_ok() before calling this
197  * function.
198  *
199  * Returns zero on success, or -EFAULT on error.
200  */
201 #define __put_user(x,ptr) \
202 	__put_user_nocheck((x), (ptr), sizeof(*(ptr)))
203 
204 /*
205  * __get_user: - Get a simple variable from user space, with less checking.
206  * @x:	 Variable to store result.
207  * @ptr: Source address, in user space.
208  *
209  * Context: User context only.	This function may sleep.
210  *
211  * This macro copies a single simple variable from user space to kernel
212  * space.  It supports simple types like char and int, but not larger
213  * data types like structures or arrays.
214  *
215  * @ptr must have pointer-to-simple-variable type, and the result of
216  * dereferencing @ptr must be assignable to @x without a cast.
217  *
218  * Caller must check the pointer with access_ok() before calling this
219  * function.
220  *
221  * Returns zero on success, or -EFAULT on error.
222  * On error, the variable @x is set to zero.
223  */
224 #define __get_user(x,ptr) \
225 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
226 
227 struct __large_struct { unsigned long buf[100]; };
228 #define __m(x) (*(struct __large_struct __user *)(x))
229 
230 /*
231  * Yuck.  We need two variants, one for 64bit operation and one
232  * for 32 bit mode and old iron.
233  */
234 #ifndef CONFIG_EVA
235 #define __get_kernel_common(val, size, ptr) __get_user_common(val, size, ptr)
236 #else
237 /*
238  * Kernel specific functions for EVA. We need to use normal load instructions
239  * to read data from kernel when operating in EVA mode. We use these macros to
240  * avoid redefining __get_user_asm for EVA.
241  */
242 #undef _loadd
243 #undef _loadw
244 #undef _loadh
245 #undef _loadb
246 #ifdef CONFIG_32BIT
247 #define _loadd			_loadw
248 #else
249 #define _loadd(reg, addr)	"ld " reg ", " addr
250 #endif
251 #define _loadw(reg, addr)	"lw " reg ", " addr
252 #define _loadh(reg, addr)	"lh " reg ", " addr
253 #define _loadb(reg, addr)	"lb " reg ", " addr
254 
255 #define __get_kernel_common(val, size, ptr)				\
256 do {									\
257 	switch (size) {							\
258 	case 1: __get_data_asm(val, _loadb, ptr); break;		\
259 	case 2: __get_data_asm(val, _loadh, ptr); break;		\
260 	case 4: __get_data_asm(val, _loadw, ptr); break;		\
261 	case 8: __GET_DW(val, _loadd, ptr); break;			\
262 	default: __get_user_unknown(); break;				\
263 	}								\
264 } while (0)
265 #endif
266 
267 #ifdef CONFIG_32BIT
268 #define __GET_DW(val, insn, ptr) __get_data_asm_ll32(val, insn, ptr)
269 #endif
270 #ifdef CONFIG_64BIT
271 #define __GET_DW(val, insn, ptr) __get_data_asm(val, insn, ptr)
272 #endif
273 
274 extern void __get_user_unknown(void);
275 
276 #define __get_user_common(val, size, ptr)				\
277 do {									\
278 	switch (size) {							\
279 	case 1: __get_data_asm(val, user_lb, ptr); break;		\
280 	case 2: __get_data_asm(val, user_lh, ptr); break;		\
281 	case 4: __get_data_asm(val, user_lw, ptr); break;		\
282 	case 8: __GET_DW(val, user_ld, ptr); break;			\
283 	default: __get_user_unknown(); break;				\
284 	}								\
285 } while (0)
286 
287 #define __get_user_nocheck(x, ptr, size)				\
288 ({									\
289 	int __gu_err;							\
290 									\
291 	if (eva_kernel_access()) {					\
292 		__get_kernel_common((x), size, ptr);			\
293 	} else {							\
294 		__chk_user_ptr(ptr);					\
295 		__get_user_common((x), size, ptr);			\
296 	}								\
297 	__gu_err;							\
298 })
299 
300 #define __get_user_check(x, ptr, size)					\
301 ({									\
302 	int __gu_err = -EFAULT;						\
303 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
304 									\
305 	might_fault();							\
306 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size))) {		\
307 		if (eva_kernel_access())				\
308 			__get_kernel_common((x), size, __gu_ptr);	\
309 		else							\
310 			__get_user_common((x), size, __gu_ptr);		\
311 	} else								\
312 		(x) = 0;						\
313 									\
314 	__gu_err;							\
315 })
316 
317 #define __get_data_asm(val, insn, addr)					\
318 {									\
319 	long __gu_tmp;							\
320 									\
321 	__asm__ __volatile__(						\
322 	"1:	"insn("%1", "%3")"				\n"	\
323 	"2:							\n"	\
324 	"	.insn						\n"	\
325 	"	.section .fixup,\"ax\"				\n"	\
326 	"3:	li	%0, %4					\n"	\
327 	"	move	%1, $0					\n"	\
328 	"	j	2b					\n"	\
329 	"	.previous					\n"	\
330 	"	.section __ex_table,\"a\"			\n"	\
331 	"	"__UA_ADDR "\t1b, 3b				\n"	\
332 	"	.previous					\n"	\
333 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
334 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
335 									\
336 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
337 }
338 
339 /*
340  * Get a long long 64 using 32 bit registers.
341  */
342 #define __get_data_asm_ll32(val, insn, addr)				\
343 {									\
344 	union {								\
345 		unsigned long long	l;				\
346 		__typeof__(*(addr))	t;				\
347 	} __gu_tmp;							\
348 									\
349 	__asm__ __volatile__(						\
350 	"1:	" insn("%1", "(%3)")"				\n"	\
351 	"2:	" insn("%D1", "4(%3)")"				\n"	\
352 	"3:							\n"	\
353 	"	.insn						\n"	\
354 	"	.section	.fixup,\"ax\"			\n"	\
355 	"4:	li	%0, %4					\n"	\
356 	"	move	%1, $0					\n"	\
357 	"	move	%D1, $0					\n"	\
358 	"	j	3b					\n"	\
359 	"	.previous					\n"	\
360 	"	.section	__ex_table,\"a\"		\n"	\
361 	"	" __UA_ADDR "	1b, 4b				\n"	\
362 	"	" __UA_ADDR "	2b, 4b				\n"	\
363 	"	.previous					\n"	\
364 	: "=r" (__gu_err), "=&r" (__gu_tmp.l)				\
365 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
366 									\
367 	(val) = __gu_tmp.t;						\
368 }
369 
370 #ifndef CONFIG_EVA
371 #define __put_kernel_common(ptr, size) __put_user_common(ptr, size)
372 #else
373 /*
374  * Kernel specific functions for EVA. We need to use normal load instructions
375  * to read data from kernel when operating in EVA mode. We use these macros to
376  * avoid redefining __get_data_asm for EVA.
377  */
378 #undef _stored
379 #undef _storew
380 #undef _storeh
381 #undef _storeb
382 #ifdef CONFIG_32BIT
383 #define _stored			_storew
384 #else
385 #define _stored(reg, addr)	"ld " reg ", " addr
386 #endif
387 
388 #define _storew(reg, addr)	"sw " reg ", " addr
389 #define _storeh(reg, addr)	"sh " reg ", " addr
390 #define _storeb(reg, addr)	"sb " reg ", " addr
391 
392 #define __put_kernel_common(ptr, size)					\
393 do {									\
394 	switch (size) {							\
395 	case 1: __put_data_asm(_storeb, ptr); break;			\
396 	case 2: __put_data_asm(_storeh, ptr); break;			\
397 	case 4: __put_data_asm(_storew, ptr); break;			\
398 	case 8: __PUT_DW(_stored, ptr); break;				\
399 	default: __put_user_unknown(); break;				\
400 	}								\
401 } while(0)
402 #endif
403 
404 /*
405  * Yuck.  We need two variants, one for 64bit operation and one
406  * for 32 bit mode and old iron.
407  */
408 #ifdef CONFIG_32BIT
409 #define __PUT_DW(insn, ptr) __put_data_asm_ll32(insn, ptr)
410 #endif
411 #ifdef CONFIG_64BIT
412 #define __PUT_DW(insn, ptr) __put_data_asm(insn, ptr)
413 #endif
414 
415 #define __put_user_common(ptr, size)					\
416 do {									\
417 	switch (size) {							\
418 	case 1: __put_data_asm(user_sb, ptr); break;			\
419 	case 2: __put_data_asm(user_sh, ptr); break;			\
420 	case 4: __put_data_asm(user_sw, ptr); break;			\
421 	case 8: __PUT_DW(user_sd, ptr); break;				\
422 	default: __put_user_unknown(); break;				\
423 	}								\
424 } while (0)
425 
426 #define __put_user_nocheck(x, ptr, size)				\
427 ({									\
428 	__typeof__(*(ptr)) __pu_val;					\
429 	int __pu_err = 0;						\
430 									\
431 	__pu_val = (x);							\
432 	if (eva_kernel_access()) {					\
433 		__put_kernel_common(ptr, size);				\
434 	} else {							\
435 		__chk_user_ptr(ptr);					\
436 		__put_user_common(ptr, size);				\
437 	}								\
438 	__pu_err;							\
439 })
440 
441 #define __put_user_check(x, ptr, size)					\
442 ({									\
443 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
444 	__typeof__(*(ptr)) __pu_val = (x);				\
445 	int __pu_err = -EFAULT;						\
446 									\
447 	might_fault();							\
448 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size))) {	\
449 		if (eva_kernel_access())				\
450 			__put_kernel_common(__pu_addr, size);		\
451 		else							\
452 			__put_user_common(__pu_addr, size);		\
453 	}								\
454 									\
455 	__pu_err;							\
456 })
457 
458 #define __put_data_asm(insn, ptr)					\
459 {									\
460 	__asm__ __volatile__(						\
461 	"1:	"insn("%z2", "%3")"	# __put_data_asm	\n"	\
462 	"2:							\n"	\
463 	"	.insn						\n"	\
464 	"	.section	.fixup,\"ax\"			\n"	\
465 	"3:	li	%0, %4					\n"	\
466 	"	j	2b					\n"	\
467 	"	.previous					\n"	\
468 	"	.section	__ex_table,\"a\"		\n"	\
469 	"	" __UA_ADDR "	1b, 3b				\n"	\
470 	"	.previous					\n"	\
471 	: "=r" (__pu_err)						\
472 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
473 	  "i" (-EFAULT));						\
474 }
475 
476 #define __put_data_asm_ll32(insn, ptr)					\
477 {									\
478 	__asm__ __volatile__(						\
479 	"1:	"insn("%2", "(%3)")"	# __put_data_asm_ll32	\n"	\
480 	"2:	"insn("%D2", "4(%3)")"				\n"	\
481 	"3:							\n"	\
482 	"	.insn						\n"	\
483 	"	.section	.fixup,\"ax\"			\n"	\
484 	"4:	li	%0, %4					\n"	\
485 	"	j	3b					\n"	\
486 	"	.previous					\n"	\
487 	"	.section	__ex_table,\"a\"		\n"	\
488 	"	" __UA_ADDR "	1b, 4b				\n"	\
489 	"	" __UA_ADDR "	2b, 4b				\n"	\
490 	"	.previous"						\
491 	: "=r" (__pu_err)						\
492 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
493 	  "i" (-EFAULT));						\
494 }
495 
496 extern void __put_user_unknown(void);
497 
498 /*
499  * ul{b,h,w} are macros and there are no equivalent macros for EVA.
500  * EVA unaligned access is handled in the ADE exception handler.
501  */
502 #ifndef CONFIG_EVA
503 /*
504  * put_user_unaligned: - Write a simple value into user space.
505  * @x:	 Value to copy to user space.
506  * @ptr: Destination address, in user space.
507  *
508  * Context: User context only.	This function may sleep.
509  *
510  * This macro copies a single simple value from kernel space to user
511  * space.  It supports simple types like char and int, but not larger
512  * data types like structures or arrays.
513  *
514  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
515  * to the result of dereferencing @ptr.
516  *
517  * Returns zero on success, or -EFAULT on error.
518  */
519 #define put_user_unaligned(x,ptr)	\
520 	__put_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
521 
522 /*
523  * get_user_unaligned: - Get a simple variable from user space.
524  * @x:	 Variable to store result.
525  * @ptr: Source address, in user space.
526  *
527  * Context: User context only.	This function may sleep.
528  *
529  * This macro copies a single simple variable from user space to kernel
530  * space.  It supports simple types like char and int, but not larger
531  * data types like structures or arrays.
532  *
533  * @ptr must have pointer-to-simple-variable type, and the result of
534  * dereferencing @ptr must be assignable to @x without a cast.
535  *
536  * Returns zero on success, or -EFAULT on error.
537  * On error, the variable @x is set to zero.
538  */
539 #define get_user_unaligned(x,ptr) \
540 	__get_user_unaligned_check((x),(ptr),sizeof(*(ptr)))
541 
542 /*
543  * __put_user_unaligned: - Write a simple value into user space, with less checking.
544  * @x:	 Value to copy to user space.
545  * @ptr: Destination address, in user space.
546  *
547  * Context: User context only.	This function may sleep.
548  *
549  * This macro copies a single simple value from kernel space to user
550  * space.  It supports simple types like char and int, but not larger
551  * data types like structures or arrays.
552  *
553  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
554  * to the result of dereferencing @ptr.
555  *
556  * Caller must check the pointer with access_ok() before calling this
557  * function.
558  *
559  * Returns zero on success, or -EFAULT on error.
560  */
561 #define __put_user_unaligned(x,ptr) \
562 	__put_user_unaligned_nocheck((x),(ptr),sizeof(*(ptr)))
563 
564 /*
565  * __get_user_unaligned: - Get a simple variable from user space, with less checking.
566  * @x:	 Variable to store result.
567  * @ptr: Source address, in user space.
568  *
569  * Context: User context only.	This function may sleep.
570  *
571  * This macro copies a single simple variable from user space to kernel
572  * space.  It supports simple types like char and int, but not larger
573  * data types like structures or arrays.
574  *
575  * @ptr must have pointer-to-simple-variable type, and the result of
576  * dereferencing @ptr must be assignable to @x without a cast.
577  *
578  * Caller must check the pointer with access_ok() before calling this
579  * function.
580  *
581  * Returns zero on success, or -EFAULT on error.
582  * On error, the variable @x is set to zero.
583  */
584 #define __get_user_unaligned(x,ptr) \
585 	__get_user__unalignednocheck((x),(ptr),sizeof(*(ptr)))
586 
587 /*
588  * Yuck.  We need two variants, one for 64bit operation and one
589  * for 32 bit mode and old iron.
590  */
591 #ifdef CONFIG_32BIT
592 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
593 	__get_user_unaligned_asm_ll32(val, ptr)
594 #endif
595 #ifdef CONFIG_64BIT
596 #define __GET_USER_UNALIGNED_DW(val, ptr)				\
597 	__get_user_unaligned_asm(val, "uld", ptr)
598 #endif
599 
600 extern void __get_user_unaligned_unknown(void);
601 
602 #define __get_user_unaligned_common(val, size, ptr)			\
603 do {									\
604 	switch (size) {							\
605 	case 1: __get_data_asm(val, "lb", ptr); break;			\
606 	case 2: __get_user_unaligned_asm(val, "ulh", ptr); break;	\
607 	case 4: __get_user_unaligned_asm(val, "ulw", ptr); break;	\
608 	case 8: __GET_USER_UNALIGNED_DW(val, ptr); break;		\
609 	default: __get_user_unaligned_unknown(); break;			\
610 	}								\
611 } while (0)
612 
613 #define __get_user_unaligned_nocheck(x,ptr,size)			\
614 ({									\
615 	int __gu_err;							\
616 									\
617 	__get_user_unaligned_common((x), size, ptr);			\
618 	__gu_err;							\
619 })
620 
621 #define __get_user_unaligned_check(x,ptr,size)				\
622 ({									\
623 	int __gu_err = -EFAULT;						\
624 	const __typeof__(*(ptr)) __user * __gu_ptr = (ptr);		\
625 									\
626 	if (likely(access_ok(VERIFY_READ,  __gu_ptr, size)))		\
627 		__get_user_unaligned_common((x), size, __gu_ptr);	\
628 									\
629 	__gu_err;							\
630 })
631 
632 #define __get_data_unaligned_asm(val, insn, addr)			\
633 {									\
634 	long __gu_tmp;							\
635 									\
636 	__asm__ __volatile__(						\
637 	"1:	" insn "	%1, %3				\n"	\
638 	"2:							\n"	\
639 	"	.insn						\n"	\
640 	"	.section .fixup,\"ax\"				\n"	\
641 	"3:	li	%0, %4					\n"	\
642 	"	move	%1, $0					\n"	\
643 	"	j	2b					\n"	\
644 	"	.previous					\n"	\
645 	"	.section __ex_table,\"a\"			\n"	\
646 	"	"__UA_ADDR "\t1b, 3b				\n"	\
647 	"	"__UA_ADDR "\t1b + 4, 3b			\n"	\
648 	"	.previous					\n"	\
649 	: "=r" (__gu_err), "=r" (__gu_tmp)				\
650 	: "0" (0), "o" (__m(addr)), "i" (-EFAULT));			\
651 									\
652 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
653 }
654 
655 /*
656  * Get a long long 64 using 32 bit registers.
657  */
658 #define __get_user_unaligned_asm_ll32(val, addr)			\
659 {									\
660 	unsigned long long __gu_tmp;					\
661 									\
662 	__asm__ __volatile__(						\
663 	"1:	ulw	%1, (%3)				\n"	\
664 	"2:	ulw	%D1, 4(%3)				\n"	\
665 	"	move	%0, $0					\n"	\
666 	"3:							\n"	\
667 	"	.insn						\n"	\
668 	"	.section	.fixup,\"ax\"			\n"	\
669 	"4:	li	%0, %4					\n"	\
670 	"	move	%1, $0					\n"	\
671 	"	move	%D1, $0					\n"	\
672 	"	j	3b					\n"	\
673 	"	.previous					\n"	\
674 	"	.section	__ex_table,\"a\"		\n"	\
675 	"	" __UA_ADDR "	1b, 4b				\n"	\
676 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
677 	"	" __UA_ADDR "	2b, 4b				\n"	\
678 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
679 	"	.previous					\n"	\
680 	: "=r" (__gu_err), "=&r" (__gu_tmp)				\
681 	: "0" (0), "r" (addr), "i" (-EFAULT));				\
682 	(val) = (__typeof__(*(addr))) __gu_tmp;				\
683 }
684 
685 /*
686  * Yuck.  We need two variants, one for 64bit operation and one
687  * for 32 bit mode and old iron.
688  */
689 #ifdef CONFIG_32BIT
690 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm_ll32(ptr)
691 #endif
692 #ifdef CONFIG_64BIT
693 #define __PUT_USER_UNALIGNED_DW(ptr) __put_user_unaligned_asm("usd", ptr)
694 #endif
695 
696 #define __put_user_unaligned_common(ptr, size)				\
697 do {									\
698 	switch (size) {							\
699 	case 1: __put_data_asm("sb", ptr); break;			\
700 	case 2: __put_user_unaligned_asm("ush", ptr); break;		\
701 	case 4: __put_user_unaligned_asm("usw", ptr); break;		\
702 	case 8: __PUT_USER_UNALIGNED_DW(ptr); break;			\
703 	default: __put_user_unaligned_unknown(); break;			\
704 } while (0)
705 
706 #define __put_user_unaligned_nocheck(x,ptr,size)			\
707 ({									\
708 	__typeof__(*(ptr)) __pu_val;					\
709 	int __pu_err = 0;						\
710 									\
711 	__pu_val = (x);							\
712 	__put_user_unaligned_common(ptr, size);				\
713 	__pu_err;							\
714 })
715 
716 #define __put_user_unaligned_check(x,ptr,size)				\
717 ({									\
718 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
719 	__typeof__(*(ptr)) __pu_val = (x);				\
720 	int __pu_err = -EFAULT;						\
721 									\
722 	if (likely(access_ok(VERIFY_WRITE,  __pu_addr, size)))		\
723 		__put_user_unaligned_common(__pu_addr, size);		\
724 									\
725 	__pu_err;							\
726 })
727 
728 #define __put_user_unaligned_asm(insn, ptr)				\
729 {									\
730 	__asm__ __volatile__(						\
731 	"1:	" insn "	%z2, %3		# __put_user_unaligned_asm\n" \
732 	"2:							\n"	\
733 	"	.insn						\n"	\
734 	"	.section	.fixup,\"ax\"			\n"	\
735 	"3:	li	%0, %4					\n"	\
736 	"	j	2b					\n"	\
737 	"	.previous					\n"	\
738 	"	.section	__ex_table,\"a\"		\n"	\
739 	"	" __UA_ADDR "	1b, 3b				\n"	\
740 	"	.previous					\n"	\
741 	: "=r" (__pu_err)						\
742 	: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)),			\
743 	  "i" (-EFAULT));						\
744 }
745 
746 #define __put_user_unaligned_asm_ll32(ptr)				\
747 {									\
748 	__asm__ __volatile__(						\
749 	"1:	sw	%2, (%3)	# __put_user_unaligned_asm_ll32 \n" \
750 	"2:	sw	%D2, 4(%3)				\n"	\
751 	"3:							\n"	\
752 	"	.insn						\n"	\
753 	"	.section	.fixup,\"ax\"			\n"	\
754 	"4:	li	%0, %4					\n"	\
755 	"	j	3b					\n"	\
756 	"	.previous					\n"	\
757 	"	.section	__ex_table,\"a\"		\n"	\
758 	"	" __UA_ADDR "	1b, 4b				\n"	\
759 	"	" __UA_ADDR "	1b + 4, 4b			\n"	\
760 	"	" __UA_ADDR "	2b, 4b				\n"	\
761 	"	" __UA_ADDR "	2b + 4, 4b			\n"	\
762 	"	.previous"						\
763 	: "=r" (__pu_err)						\
764 	: "0" (0), "r" (__pu_val), "r" (ptr),				\
765 	  "i" (-EFAULT));						\
766 }
767 
768 extern void __put_user_unaligned_unknown(void);
769 #endif
770 
771 /*
772  * We're generating jump to subroutines which will be outside the range of
773  * jump instructions
774  */
775 #ifdef MODULE
776 #define __MODULE_JAL(destination)					\
777 	".set\tnoat\n\t"						\
778 	__UA_LA "\t$1, " #destination "\n\t"				\
779 	"jalr\t$1\n\t"							\
780 	".set\tat\n\t"
781 #else
782 #define __MODULE_JAL(destination)					\
783 	"jal\t" #destination "\n\t"
784 #endif
785 
786 #if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) &&	\
787 					      defined(CONFIG_CPU_HAS_PREFETCH))
788 #define DADDI_SCRATCH "$3"
789 #else
790 #define DADDI_SCRATCH "$0"
791 #endif
792 
793 extern size_t __copy_user(void *__to, const void *__from, size_t __n);
794 
795 #ifndef CONFIG_EVA
796 #define __invoke_copy_to_user(to, from, n)				\
797 ({									\
798 	register void __user *__cu_to_r __asm__("$4");			\
799 	register const void *__cu_from_r __asm__("$5");			\
800 	register long __cu_len_r __asm__("$6");				\
801 									\
802 	__cu_to_r = (to);						\
803 	__cu_from_r = (from);						\
804 	__cu_len_r = (n);						\
805 	__asm__ __volatile__(						\
806 	__MODULE_JAL(__copy_user)					\
807 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
808 	:								\
809 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
810 	  DADDI_SCRATCH, "memory");					\
811 	__cu_len_r;							\
812 })
813 
814 #define __invoke_copy_to_kernel(to, from, n)				\
815 	__invoke_copy_to_user(to, from, n)
816 
817 #endif
818 
819 /*
820  * __copy_to_user: - Copy a block of data into user space, with less checking.
821  * @to:	  Destination address, in user space.
822  * @from: Source address, in kernel space.
823  * @n:	  Number of bytes to copy.
824  *
825  * Context: User context only.	This function may sleep.
826  *
827  * Copy data from kernel space to user space.  Caller must check
828  * the specified block with access_ok() before calling this function.
829  *
830  * Returns number of bytes that could not be copied.
831  * On success, this will be zero.
832  */
833 #define __copy_to_user(to, from, n)					\
834 ({									\
835 	void __user *__cu_to;						\
836 	const void *__cu_from;						\
837 	long __cu_len;							\
838 									\
839 	__cu_to = (to);							\
840 	__cu_from = (from);						\
841 	__cu_len = (n);							\
842 	might_fault();							\
843 	if (eva_kernel_access())					\
844 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
845 						   __cu_len);		\
846 	else								\
847 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
848 						 __cu_len);		\
849 	__cu_len;							\
850 })
851 
852 extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
853 
854 #define __copy_to_user_inatomic(to, from, n)				\
855 ({									\
856 	void __user *__cu_to;						\
857 	const void *__cu_from;						\
858 	long __cu_len;							\
859 									\
860 	__cu_to = (to);							\
861 	__cu_from = (from);						\
862 	__cu_len = (n);							\
863 	if (eva_kernel_access())					\
864 		__cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from,	\
865 						   __cu_len);		\
866 	else								\
867 		__cu_len = __invoke_copy_to_user(__cu_to, __cu_from,	\
868 						 __cu_len);		\
869 	__cu_len;							\
870 })
871 
872 #define __copy_from_user_inatomic(to, from, n)				\
873 ({									\
874 	void *__cu_to;							\
875 	const void __user *__cu_from;					\
876 	long __cu_len;							\
877 									\
878 	__cu_to = (to);							\
879 	__cu_from = (from);						\
880 	__cu_len = (n);							\
881 	if (eva_kernel_access())					\
882 		__cu_len = __invoke_copy_from_kernel_inatomic(__cu_to,	\
883 							      __cu_from,\
884 							      __cu_len);\
885 	else								\
886 		__cu_len = __invoke_copy_from_user_inatomic(__cu_to,	\
887 							    __cu_from,	\
888 							    __cu_len);	\
889 	__cu_len;							\
890 })
891 
892 /*
893  * copy_to_user: - Copy a block of data into user space.
894  * @to:	  Destination address, in user space.
895  * @from: Source address, in kernel space.
896  * @n:	  Number of bytes to copy.
897  *
898  * Context: User context only.	This function may sleep.
899  *
900  * Copy data from kernel space to user space.
901  *
902  * Returns number of bytes that could not be copied.
903  * On success, this will be zero.
904  */
905 #define copy_to_user(to, from, n)					\
906 ({									\
907 	void __user *__cu_to;						\
908 	const void *__cu_from;						\
909 	long __cu_len;							\
910 									\
911 	__cu_to = (to);							\
912 	__cu_from = (from);						\
913 	__cu_len = (n);							\
914 	if (eva_kernel_access()) {					\
915 		__cu_len = __invoke_copy_to_kernel(__cu_to,		\
916 						   __cu_from,		\
917 						   __cu_len);		\
918 	} else {							\
919 		if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) {       \
920 			might_fault();                                  \
921 			__cu_len = __invoke_copy_to_user(__cu_to,	\
922 							 __cu_from,	\
923 							 __cu_len);     \
924 		}							\
925 	}								\
926 	__cu_len;							\
927 })
928 
929 #ifndef CONFIG_EVA
930 
931 #define __invoke_copy_from_user(to, from, n)				\
932 ({									\
933 	register void *__cu_to_r __asm__("$4");				\
934 	register const void __user *__cu_from_r __asm__("$5");		\
935 	register long __cu_len_r __asm__("$6");				\
936 									\
937 	__cu_to_r = (to);						\
938 	__cu_from_r = (from);						\
939 	__cu_len_r = (n);						\
940 	__asm__ __volatile__(						\
941 	".set\tnoreorder\n\t"						\
942 	__MODULE_JAL(__copy_user)					\
943 	".set\tnoat\n\t"						\
944 	__UA_ADDU "\t$1, %1, %2\n\t"					\
945 	".set\tat\n\t"							\
946 	".set\treorder"							\
947 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
948 	:								\
949 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
950 	  DADDI_SCRATCH, "memory");					\
951 	__cu_len_r;							\
952 })
953 
954 #define __invoke_copy_from_kernel(to, from, n)				\
955 	__invoke_copy_from_user(to, from, n)
956 
957 /* For userland <-> userland operations */
958 #define ___invoke_copy_in_user(to, from, n)				\
959 	__invoke_copy_from_user(to, from, n)
960 
961 /* For kernel <-> kernel operations */
962 #define ___invoke_copy_in_kernel(to, from, n)				\
963 	__invoke_copy_from_user(to, from, n)
964 
965 #define __invoke_copy_from_user_inatomic(to, from, n)			\
966 ({									\
967 	register void *__cu_to_r __asm__("$4");				\
968 	register const void __user *__cu_from_r __asm__("$5");		\
969 	register long __cu_len_r __asm__("$6");				\
970 									\
971 	__cu_to_r = (to);						\
972 	__cu_from_r = (from);						\
973 	__cu_len_r = (n);						\
974 	__asm__ __volatile__(						\
975 	".set\tnoreorder\n\t"						\
976 	__MODULE_JAL(__copy_user_inatomic)				\
977 	".set\tnoat\n\t"						\
978 	__UA_ADDU "\t$1, %1, %2\n\t"					\
979 	".set\tat\n\t"							\
980 	".set\treorder"							\
981 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
982 	:								\
983 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
984 	  DADDI_SCRATCH, "memory");					\
985 	__cu_len_r;							\
986 })
987 
988 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
989 	__invoke_copy_from_user_inatomic(to, from, n)			\
990 
991 #else
992 
993 /* EVA specific functions */
994 
995 extern size_t __copy_user_inatomic_eva(void *__to, const void *__from,
996 				       size_t __n);
997 extern size_t __copy_from_user_eva(void *__to, const void *__from,
998 				   size_t __n);
999 extern size_t __copy_to_user_eva(void *__to, const void *__from,
1000 				 size_t __n);
1001 extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
1002 
1003 #define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr)	\
1004 ({									\
1005 	register void *__cu_to_r __asm__("$4");				\
1006 	register const void __user *__cu_from_r __asm__("$5");		\
1007 	register long __cu_len_r __asm__("$6");				\
1008 									\
1009 	__cu_to_r = (to);						\
1010 	__cu_from_r = (from);						\
1011 	__cu_len_r = (n);						\
1012 	__asm__ __volatile__(						\
1013 	".set\tnoreorder\n\t"						\
1014 	__MODULE_JAL(func_ptr)						\
1015 	".set\tnoat\n\t"						\
1016 	__UA_ADDU "\t$1, %1, %2\n\t"					\
1017 	".set\tat\n\t"							\
1018 	".set\treorder"							\
1019 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1020 	:								\
1021 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1022 	  DADDI_SCRATCH, "memory");					\
1023 	__cu_len_r;							\
1024 })
1025 
1026 #define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr)	\
1027 ({									\
1028 	register void *__cu_to_r __asm__("$4");				\
1029 	register const void __user *__cu_from_r __asm__("$5");		\
1030 	register long __cu_len_r __asm__("$6");				\
1031 									\
1032 	__cu_to_r = (to);						\
1033 	__cu_from_r = (from);						\
1034 	__cu_len_r = (n);						\
1035 	__asm__ __volatile__(						\
1036 	__MODULE_JAL(func_ptr)						\
1037 	: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r)	\
1038 	:								\
1039 	: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31",	\
1040 	  DADDI_SCRATCH, "memory");					\
1041 	__cu_len_r;							\
1042 })
1043 
1044 /*
1045  * Source or destination address is in userland. We need to go through
1046  * the TLB
1047  */
1048 #define __invoke_copy_from_user(to, from, n)				\
1049 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva)
1050 
1051 #define __invoke_copy_from_user_inatomic(to, from, n)			\
1052 	__invoke_copy_from_user_eva_generic(to, from, n,		\
1053 					    __copy_user_inatomic_eva)
1054 
1055 #define __invoke_copy_to_user(to, from, n)				\
1056 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva)
1057 
1058 #define ___invoke_copy_in_user(to, from, n)				\
1059 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva)
1060 
1061 /*
1062  * Source or destination address in the kernel. We are not going through
1063  * the TLB
1064  */
1065 #define __invoke_copy_from_kernel(to, from, n)				\
1066 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1067 
1068 #define __invoke_copy_from_kernel_inatomic(to, from, n)			\
1069 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic)
1070 
1071 #define __invoke_copy_to_kernel(to, from, n)				\
1072 	__invoke_copy_to_user_eva_generic(to, from, n, __copy_user)
1073 
1074 #define ___invoke_copy_in_kernel(to, from, n)				\
1075 	__invoke_copy_from_user_eva_generic(to, from, n, __copy_user)
1076 
1077 #endif /* CONFIG_EVA */
1078 
1079 /*
1080  * __copy_from_user: - Copy a block of data from user space, with less checking.
1081  * @to:	  Destination address, in kernel space.
1082  * @from: Source address, in user space.
1083  * @n:	  Number of bytes to copy.
1084  *
1085  * Context: User context only.	This function may sleep.
1086  *
1087  * Copy data from user space to kernel space.  Caller must check
1088  * the specified block with access_ok() before calling this function.
1089  *
1090  * Returns number of bytes that could not be copied.
1091  * On success, this will be zero.
1092  *
1093  * If some data could not be copied, this function will pad the copied
1094  * data to the requested size using zero bytes.
1095  */
1096 #define __copy_from_user(to, from, n)					\
1097 ({									\
1098 	void *__cu_to;							\
1099 	const void __user *__cu_from;					\
1100 	long __cu_len;							\
1101 									\
1102 	__cu_to = (to);							\
1103 	__cu_from = (from);						\
1104 	__cu_len = (n);							\
1105 	might_fault();							\
1106 	__cu_len = __invoke_copy_from_user(__cu_to, __cu_from,		\
1107 					   __cu_len);			\
1108 	__cu_len;							\
1109 })
1110 
1111 /*
1112  * copy_from_user: - Copy a block of data from user space.
1113  * @to:	  Destination address, in kernel space.
1114  * @from: Source address, in user space.
1115  * @n:	  Number of bytes to copy.
1116  *
1117  * Context: User context only.	This function may sleep.
1118  *
1119  * Copy data from user space to kernel space.
1120  *
1121  * Returns number of bytes that could not be copied.
1122  * On success, this will be zero.
1123  *
1124  * If some data could not be copied, this function will pad the copied
1125  * data to the requested size using zero bytes.
1126  */
1127 #define copy_from_user(to, from, n)					\
1128 ({									\
1129 	void *__cu_to;							\
1130 	const void __user *__cu_from;					\
1131 	long __cu_len;							\
1132 									\
1133 	__cu_to = (to);							\
1134 	__cu_from = (from);						\
1135 	__cu_len = (n);							\
1136 	if (eva_kernel_access()) {					\
1137 		__cu_len = __invoke_copy_from_kernel(__cu_to,		\
1138 						     __cu_from,		\
1139 						     __cu_len);		\
1140 	} else {							\
1141 		if (access_ok(VERIFY_READ, __cu_from, __cu_len)) {	\
1142 			might_fault();                                  \
1143 			__cu_len = __invoke_copy_from_user(__cu_to,	\
1144 							   __cu_from,	\
1145 							   __cu_len);   \
1146 		} else {						\
1147 			memset(__cu_to, 0, __cu_len);			\
1148 		}							\
1149 	}								\
1150 	__cu_len;							\
1151 })
1152 
1153 #define __copy_in_user(to, from, n)					\
1154 ({									\
1155 	void __user *__cu_to;						\
1156 	const void __user *__cu_from;					\
1157 	long __cu_len;							\
1158 									\
1159 	__cu_to = (to);							\
1160 	__cu_from = (from);						\
1161 	__cu_len = (n);							\
1162 	if (eva_kernel_access()) {					\
1163 		__cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from,	\
1164 						    __cu_len);		\
1165 	} else {							\
1166 		might_fault();						\
1167 		__cu_len = ___invoke_copy_in_user(__cu_to, __cu_from,	\
1168 						  __cu_len);		\
1169 	}								\
1170 	__cu_len;							\
1171 })
1172 
1173 #define copy_in_user(to, from, n)					\
1174 ({									\
1175 	void __user *__cu_to;						\
1176 	const void __user *__cu_from;					\
1177 	long __cu_len;							\
1178 									\
1179 	__cu_to = (to);							\
1180 	__cu_from = (from);						\
1181 	__cu_len = (n);							\
1182 	if (eva_kernel_access()) {					\
1183 		__cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from,	\
1184 						    __cu_len);		\
1185 	} else {							\
1186 		if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\
1187 			   access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\
1188 			might_fault();					\
1189 			__cu_len = ___invoke_copy_in_user(__cu_to,	\
1190 							  __cu_from,	\
1191 							  __cu_len);	\
1192 		}							\
1193 	}								\
1194 	__cu_len;							\
1195 })
1196 
1197 /*
1198  * __clear_user: - Zero a block of memory in user space, with less checking.
1199  * @to:	  Destination address, in user space.
1200  * @n:	  Number of bytes to zero.
1201  *
1202  * Zero a block of memory in user space.  Caller must check
1203  * the specified block with access_ok() before calling this function.
1204  *
1205  * Returns number of bytes that could not be cleared.
1206  * On success, this will be zero.
1207  */
1208 static inline __kernel_size_t
__clear_user(void __user * addr,__kernel_size_t size)1209 __clear_user(void __user *addr, __kernel_size_t size)
1210 {
1211 	__kernel_size_t res;
1212 
1213 	might_fault();
1214 	__asm__ __volatile__(
1215 		"move\t$4, %1\n\t"
1216 		"move\t$5, $0\n\t"
1217 		"move\t$6, %2\n\t"
1218 		__MODULE_JAL(__bzero)
1219 		"move\t%0, $6"
1220 		: "=r" (res)
1221 		: "r" (addr), "r" (size)
1222 		: "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
1223 
1224 	return res;
1225 }
1226 
1227 #define clear_user(addr,n)						\
1228 ({									\
1229 	void __user * __cl_addr = (addr);				\
1230 	unsigned long __cl_size = (n);					\
1231 	if (__cl_size && access_ok(VERIFY_WRITE,			\
1232 					__cl_addr, __cl_size))		\
1233 		__cl_size = __clear_user(__cl_addr, __cl_size);		\
1234 	__cl_size;							\
1235 })
1236 
1237 /*
1238  * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
1239  * @dst:   Destination address, in kernel space.  This buffer must be at
1240  *	   least @count bytes long.
1241  * @src:   Source address, in user space.
1242  * @count: Maximum number of bytes to copy, including the trailing NUL.
1243  *
1244  * Copies a NUL-terminated string from userspace to kernel space.
1245  * Caller must check the specified block with access_ok() before calling
1246  * this function.
1247  *
1248  * On success, returns the length of the string (not including the trailing
1249  * NUL).
1250  *
1251  * If access to userspace fails, returns -EFAULT (some data may have been
1252  * copied).
1253  *
1254  * If @count is smaller than the length of the string, copies @count bytes
1255  * and returns @count.
1256  */
1257 static inline long
__strncpy_from_user(char * __to,const char __user * __from,long __len)1258 __strncpy_from_user(char *__to, const char __user *__from, long __len)
1259 {
1260 	long res;
1261 
1262 	if (eva_kernel_access()) {
1263 		__asm__ __volatile__(
1264 			"move\t$4, %1\n\t"
1265 			"move\t$5, %2\n\t"
1266 			"move\t$6, %3\n\t"
1267 			__MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
1268 			"move\t%0, $2"
1269 			: "=r" (res)
1270 			: "r" (__to), "r" (__from), "r" (__len)
1271 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1272 	} else {
1273 		might_fault();
1274 		__asm__ __volatile__(
1275 			"move\t$4, %1\n\t"
1276 			"move\t$5, %2\n\t"
1277 			"move\t$6, %3\n\t"
1278 			__MODULE_JAL(__strncpy_from_user_nocheck_asm)
1279 			"move\t%0, $2"
1280 			: "=r" (res)
1281 			: "r" (__to), "r" (__from), "r" (__len)
1282 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1283 	}
1284 
1285 	return res;
1286 }
1287 
1288 /*
1289  * strncpy_from_user: - Copy a NUL terminated string from userspace.
1290  * @dst:   Destination address, in kernel space.  This buffer must be at
1291  *	   least @count bytes long.
1292  * @src:   Source address, in user space.
1293  * @count: Maximum number of bytes to copy, including the trailing NUL.
1294  *
1295  * Copies a NUL-terminated string from userspace to kernel space.
1296  *
1297  * On success, returns the length of the string (not including the trailing
1298  * NUL).
1299  *
1300  * If access to userspace fails, returns -EFAULT (some data may have been
1301  * copied).
1302  *
1303  * If @count is smaller than the length of the string, copies @count bytes
1304  * and returns @count.
1305  */
1306 static inline long
strncpy_from_user(char * __to,const char __user * __from,long __len)1307 strncpy_from_user(char *__to, const char __user *__from, long __len)
1308 {
1309 	long res;
1310 
1311 	if (eva_kernel_access()) {
1312 		__asm__ __volatile__(
1313 			"move\t$4, %1\n\t"
1314 			"move\t$5, %2\n\t"
1315 			"move\t$6, %3\n\t"
1316 			__MODULE_JAL(__strncpy_from_kernel_asm)
1317 			"move\t%0, $2"
1318 			: "=r" (res)
1319 			: "r" (__to), "r" (__from), "r" (__len)
1320 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1321 	} else {
1322 		might_fault();
1323 		__asm__ __volatile__(
1324 			"move\t$4, %1\n\t"
1325 			"move\t$5, %2\n\t"
1326 			"move\t$6, %3\n\t"
1327 			__MODULE_JAL(__strncpy_from_user_asm)
1328 			"move\t%0, $2"
1329 			: "=r" (res)
1330 			: "r" (__to), "r" (__from), "r" (__len)
1331 			: "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
1332 	}
1333 
1334 	return res;
1335 }
1336 
1337 /*
1338  * strlen_user: - Get the size of a string in user space.
1339  * @str: The string to measure.
1340  *
1341  * Context: User context only.	This function may sleep.
1342  *
1343  * Get the size of a NUL-terminated string in user space.
1344  *
1345  * Returns the size of the string INCLUDING the terminating NUL.
1346  * On exception, returns 0.
1347  *
1348  * If there is a limit on the length of a valid string, you may wish to
1349  * consider using strnlen_user() instead.
1350  */
strlen_user(const char __user * s)1351 static inline long strlen_user(const char __user *s)
1352 {
1353 	long res;
1354 
1355 	if (eva_kernel_access()) {
1356 		__asm__ __volatile__(
1357 			"move\t$4, %1\n\t"
1358 			__MODULE_JAL(__strlen_kernel_asm)
1359 			"move\t%0, $2"
1360 			: "=r" (res)
1361 			: "r" (s)
1362 			: "$2", "$4", __UA_t0, "$31");
1363 	} else {
1364 		might_fault();
1365 		__asm__ __volatile__(
1366 			"move\t$4, %1\n\t"
1367 			__MODULE_JAL(__strlen_kernel_asm)
1368 			"move\t%0, $2"
1369 			: "=r" (res)
1370 			: "r" (s)
1371 			: "$2", "$4", __UA_t0, "$31");
1372 	}
1373 
1374 	return res;
1375 }
1376 
1377 /* Returns: 0 if bad, string length+1 (memory size) of string if ok */
__strnlen_user(const char __user * s,long n)1378 static inline long __strnlen_user(const char __user *s, long n)
1379 {
1380 	long res;
1381 
1382 	if (eva_kernel_access()) {
1383 		__asm__ __volatile__(
1384 			"move\t$4, %1\n\t"
1385 			"move\t$5, %2\n\t"
1386 			__MODULE_JAL(__strnlen_kernel_nocheck_asm)
1387 			"move\t%0, $2"
1388 			: "=r" (res)
1389 			: "r" (s), "r" (n)
1390 			: "$2", "$4", "$5", __UA_t0, "$31");
1391 	} else {
1392 		might_fault();
1393 		__asm__ __volatile__(
1394 			"move\t$4, %1\n\t"
1395 			"move\t$5, %2\n\t"
1396 			__MODULE_JAL(__strnlen_user_nocheck_asm)
1397 			"move\t%0, $2"
1398 			: "=r" (res)
1399 			: "r" (s), "r" (n)
1400 			: "$2", "$4", "$5", __UA_t0, "$31");
1401 	}
1402 
1403 	return res;
1404 }
1405 
1406 /*
1407  * strnlen_user: - Get the size of a string in user space.
1408  * @str: The string to measure.
1409  *
1410  * Context: User context only.	This function may sleep.
1411  *
1412  * Get the size of a NUL-terminated string in user space.
1413  *
1414  * Returns the size of the string INCLUDING the terminating NUL.
1415  * On exception, returns 0.
1416  * If the string is too long, returns a value greater than @n.
1417  */
strnlen_user(const char __user * s,long n)1418 static inline long strnlen_user(const char __user *s, long n)
1419 {
1420 	long res;
1421 
1422 	might_fault();
1423 	if (eva_kernel_access()) {
1424 		__asm__ __volatile__(
1425 			"move\t$4, %1\n\t"
1426 			"move\t$5, %2\n\t"
1427 			__MODULE_JAL(__strnlen_kernel_asm)
1428 			"move\t%0, $2"
1429 			: "=r" (res)
1430 			: "r" (s), "r" (n)
1431 			: "$2", "$4", "$5", __UA_t0, "$31");
1432 	} else {
1433 		__asm__ __volatile__(
1434 			"move\t$4, %1\n\t"
1435 			"move\t$5, %2\n\t"
1436 			__MODULE_JAL(__strnlen_user_asm)
1437 			"move\t%0, $2"
1438 			: "=r" (res)
1439 			: "r" (s), "r" (n)
1440 			: "$2", "$4", "$5", __UA_t0, "$31");
1441 	}
1442 
1443 	return res;
1444 }
1445 
1446 struct exception_table_entry
1447 {
1448 	unsigned long insn;
1449 	unsigned long nextinsn;
1450 };
1451 
1452 extern int fixup_exception(struct pt_regs *regs);
1453 
1454 #endif /* _ASM_UACCESS_H */
1455