• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_M32R_UACCESS_H
3 #define _ASM_M32R_UACCESS_H
4 
5 /*
6  *  linux/include/asm-m32r/uaccess.h
7  *
8  *  M32R version.
9  *    Copyright (C) 2004, 2006  Hirokazu Takata <takata at linux-m32r.org>
10  */
11 
12 /*
13  * User space memory access functions
14  */
15 #include <asm/page.h>
16 #include <asm/setup.h>
17 #include <linux/prefetch.h>
18 
19 /*
20  * The fs value determines whether argument validity checking should be
21  * performed or not.  If get_fs() == USER_DS, checking is performed, with
22  * get_fs() == KERNEL_DS, checking is bypassed.
23  *
24  * For historical reasons, these macros are grossly misnamed.
25  */
26 
27 #define MAKE_MM_SEG(s)	((mm_segment_t) { (s) })
28 
29 #ifdef CONFIG_MMU
30 
31 #define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
32 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
33 #define get_ds()	(KERNEL_DS)
34 #define get_fs()	(current_thread_info()->addr_limit)
35 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
36 
37 #else /* not CONFIG_MMU */
38 
39 #define KERNEL_DS	MAKE_MM_SEG(0xFFFFFFFF)
40 #define USER_DS		MAKE_MM_SEG(0xFFFFFFFF)
41 #define get_ds()	(KERNEL_DS)
42 
get_fs(void)43 static inline mm_segment_t get_fs(void)
44 {
45 	return USER_DS;
46 }
47 
set_fs(mm_segment_t s)48 static inline void set_fs(mm_segment_t s)
49 {
50 }
51 
52 #endif /* not CONFIG_MMU */
53 
54 #define segment_eq(a, b)	((a).seg == (b).seg)
55 
56 #define __addr_ok(addr) \
57 	((unsigned long)(addr) < (current_thread_info()->addr_limit.seg))
58 
59 /*
60  * Test whether a block of memory is a valid user space address.
61  * Returns 0 if the range is valid, nonzero otherwise.
62  *
63  * This is equivalent to the following test:
64  * (u33)addr + (u33)size >= (u33)current->addr_limit.seg
65  *
66  * This needs 33-bit arithmetic. We have a carry...
67  */
68 #define __range_ok(addr, size) ({					\
69 	unsigned long flag, roksum; 					\
70 	__chk_user_ptr(addr);						\
71 	asm ( 								\
72 		"	cmpu	%1, %1    ; clear cbit\n"		\
73 		"	addx	%1, %3    ; set cbit if overflow\n"	\
74 		"	subx	%0, %0\n"				\
75 		"	cmpu	%4, %1\n"				\
76 		"	subx	%0, %5\n"				\
77 		: "=&r" (flag), "=r" (roksum)				\
78 		: "1" (addr), "r" ((int)(size)), 			\
79 		  "r" (current_thread_info()->addr_limit.seg), "r" (0)	\
80 		: "cbit" );						\
81 	flag; })
82 
83 /**
84  * access_ok: - Checks if a user space pointer is valid
85  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
86  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
87  *        to write to a block, it is always safe to read from it.
88  * @addr: User space pointer to start of block to check
89  * @size: Size of block to check
90  *
91  * Context: User context only. This function may sleep if pagefaults are
92  *          enabled.
93  *
94  * Checks if a pointer to a block of memory in user space is valid.
95  *
96  * Returns true (nonzero) if the memory block may be valid, false (zero)
97  * if it is definitely invalid.
98  *
99  * Note that, depending on architecture, this function probably just
100  * checks that the pointer is in the user space range - after calling
101  * this function, memory access functions may still return -EFAULT.
102  */
103 #ifdef CONFIG_MMU
104 #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0))
105 #else
access_ok(int type,const void * addr,unsigned long size)106 static inline int access_ok(int type, const void *addr, unsigned long size)
107 {
108 	unsigned long val = (unsigned long)addr;
109 
110 	return ((val >= memory_start) && ((val + size) < memory_end));
111 }
112 #endif /* CONFIG_MMU */
113 
114 #include <asm/extable.h>
115 
116 /*
117  * These are the main single-value transfer routines.  They automatically
118  * use the right size if we just have the right pointer type.
119  *
120  * This gets kind of ugly. We want to return _two_ values in "get_user()"
121  * and yet we don't want to do any pointers, because that is too much
122  * of a performance impact. Thus we have a few rather ugly macros here,
123  * and hide all the uglyness from the user.
124  *
125  * The "__xxx" versions of the user access functions are versions that
126  * do not verify the address space, that must have been done previously
127  * with a separate "access_ok()" call (this is used when we do multiple
128  * accesses to the same area of user memory).
129  */
130 
131 /* Careful: we have to cast the result to the type of the pointer for sign
132    reasons */
133 /**
134  * get_user: - Get a simple variable from user space.
135  * @x:   Variable to store result.
136  * @ptr: Source address, in user space.
137  *
138  * Context: User context only. This function may sleep if pagefaults are
139  *          enabled.
140  *
141  * This macro copies a single simple variable from user space to kernel
142  * space.  It supports simple types like char and int, but not larger
143  * data types like structures or arrays.
144  *
145  * @ptr must have pointer-to-simple-variable type, and the result of
146  * dereferencing @ptr must be assignable to @x without a cast.
147  *
148  * Returns zero on success, or -EFAULT on error.
149  * On error, the variable @x is set to zero.
150  */
151 #define get_user(x, ptr)							\
152 	__get_user_check((x), (ptr), sizeof(*(ptr)))
153 
154 /**
155  * put_user: - Write a simple value into user space.
156  * @x:   Value to copy to user space.
157  * @ptr: Destination address, in user space.
158  *
159  * Context: User context only. This function may sleep if pagefaults are
160  *          enabled.
161  *
162  * This macro copies a single simple value from kernel space to user
163  * space.  It supports simple types like char and int, but not larger
164  * data types like structures or arrays.
165  *
166  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
167  * to the result of dereferencing @ptr.
168  *
169  * Returns zero on success, or -EFAULT on error.
170  */
171 #define put_user(x, ptr)							\
172 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
173 
174 /**
175  * __get_user: - Get a simple variable from user space, with less checking.
176  * @x:   Variable to store result.
177  * @ptr: Source address, in user space.
178  *
179  * Context: User context only. This function may sleep if pagefaults are
180  *          enabled.
181  *
182  * This macro copies a single simple variable from user space to kernel
183  * space.  It supports simple types like char and int, but not larger
184  * data types like structures or arrays.
185  *
186  * @ptr must have pointer-to-simple-variable type, and the result of
187  * dereferencing @ptr must be assignable to @x without a cast.
188  *
189  * Caller must check the pointer with access_ok() before calling this
190  * function.
191  *
192  * Returns zero on success, or -EFAULT on error.
193  * On error, the variable @x is set to zero.
194  */
195 #define __get_user(x, ptr) \
196 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
197 
198 #define __get_user_nocheck(x, ptr, size)				\
199 ({									\
200 	long __gu_err = 0;						\
201 	unsigned long __gu_val = 0;					\
202 	might_fault();							\
203 	__get_user_size(__gu_val, (ptr), (size), __gu_err);		\
204 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
205 	__gu_err;							\
206 })
207 
208 #define __get_user_check(x, ptr, size)					\
209 ({									\
210 	long __gu_err = -EFAULT;					\
211 	unsigned long __gu_val = 0;					\
212 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
213 	might_fault();							\
214 	if (access_ok(VERIFY_READ, __gu_addr, size))			\
215 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
216 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
217 	__gu_err;							\
218 })
219 
220 extern long __get_user_bad(void);
221 
222 #define __get_user_size(x, ptr, size, retval)				\
223 do {									\
224 	retval = 0;							\
225 	__chk_user_ptr(ptr);						\
226 	switch (size) {							\
227 	  case 1: __get_user_asm(x, ptr, retval, "ub"); break;		\
228 	  case 2: __get_user_asm(x, ptr, retval, "uh"); break;		\
229 	  case 4: __get_user_asm(x, ptr, retval, ""); break;		\
230 	  default: (x) = __get_user_bad();				\
231 	}								\
232 } while (0)
233 
234 #define __get_user_asm(x, addr, err, itype)				\
235 	__asm__ __volatile__(						\
236 		"	.fillinsn\n"					\
237 		"1:	ld"itype" %1,@%2\n"				\
238 		"	.fillinsn\n"					\
239 		"2:\n"							\
240 		".section .fixup,\"ax\"\n"				\
241 		"	.balign 4\n"					\
242 		"3:	ldi %0,%3\n"					\
243 		"	seth r14,#high(2b)\n"				\
244 		"	or3 r14,r14,#low(2b)\n"				\
245 		"	jmp r14\n"					\
246 		".previous\n"						\
247 		".section __ex_table,\"a\"\n"				\
248 		"	.balign 4\n"					\
249 		"	.long 1b,3b\n"					\
250 		".previous"						\
251 		: "=&r" (err), "=&r" (x)				\
252 		: "r" (addr), "i" (-EFAULT), "0" (err)			\
253 		: "r14", "memory")
254 
255 /**
256  * __put_user: - Write a simple value into user space, with less checking.
257  * @x:   Value to copy to user space.
258  * @ptr: Destination address, in user space.
259  *
260  * Context: User context only. This function may sleep if pagefaults are
261  *          enabled.
262  *
263  * This macro copies a single simple value from kernel space to user
264  * space.  It supports simple types like char and int, but not larger
265  * data types like structures or arrays.
266  *
267  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
268  * to the result of dereferencing @ptr.
269  *
270  * Caller must check the pointer with access_ok() before calling this
271  * function.
272  *
273  * Returns zero on success, or -EFAULT on error.
274  */
275 #define __put_user(x, ptr) \
276 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
277 
278 
279 #define __put_user_nocheck(x, ptr, size)				\
280 ({									\
281 	long __pu_err;							\
282 	might_fault();							\
283 	__put_user_size((x), (ptr), (size), __pu_err);			\
284 	__pu_err;							\
285 })
286 
287 
288 #define __put_user_check(x, ptr, size)					\
289 ({									\
290 	long __pu_err = -EFAULT;					\
291 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
292 	might_fault();							\
293 	if (access_ok(VERIFY_WRITE, __pu_addr, size))			\
294 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
295 	__pu_err;							\
296 })
297 
298 #if defined(__LITTLE_ENDIAN__)
299 #define __put_user_u64(x, addr, err)					\
300         __asm__ __volatile__(						\
301                 "       .fillinsn\n"					\
302                 "1:     st %L1,@%2\n"					\
303                 "       .fillinsn\n"					\
304                 "2:     st %H1,@(4,%2)\n"				\
305                 "       .fillinsn\n"					\
306                 "3:\n"							\
307                 ".section .fixup,\"ax\"\n"				\
308                 "       .balign 4\n"					\
309                 "4:     ldi %0,%3\n"					\
310                 "       seth r14,#high(3b)\n"				\
311                 "       or3 r14,r14,#low(3b)\n"				\
312                 "       jmp r14\n"					\
313                 ".previous\n"						\
314                 ".section __ex_table,\"a\"\n"				\
315                 "       .balign 4\n"					\
316                 "       .long 1b,4b\n"					\
317                 "       .long 2b,4b\n"					\
318                 ".previous"						\
319                 : "=&r" (err)						\
320                 : "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)		\
321                 : "r14", "memory")
322 
323 #elif defined(__BIG_ENDIAN__)
324 #define __put_user_u64(x, addr, err)					\
325 	__asm__ __volatile__(						\
326 		"	.fillinsn\n"					\
327 		"1:	st %H1,@%2\n"					\
328 		"	.fillinsn\n"					\
329 		"2:	st %L1,@(4,%2)\n"				\
330 		"	.fillinsn\n"					\
331 		"3:\n"							\
332 		".section .fixup,\"ax\"\n"				\
333 		"	.balign 4\n"					\
334 		"4:	ldi %0,%3\n"					\
335 		"	seth r14,#high(3b)\n"				\
336 		"	or3 r14,r14,#low(3b)\n"				\
337 		"	jmp r14\n"					\
338 		".previous\n"						\
339 		".section __ex_table,\"a\"\n"				\
340 		"	.balign 4\n"					\
341 		"	.long 1b,4b\n"					\
342 		"	.long 2b,4b\n"					\
343 		".previous"						\
344 		: "=&r" (err)						\
345 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)		\
346 		: "r14", "memory")
347 #else
348 #error no endian defined
349 #endif
350 
351 extern void __put_user_bad(void);
352 
353 #define __put_user_size(x, ptr, size, retval)				\
354 do {									\
355 	retval = 0;							\
356 	__chk_user_ptr(ptr);						\
357 	switch (size) {							\
358 	  case 1: __put_user_asm(x, ptr, retval, "b"); break;		\
359 	  case 2: __put_user_asm(x, ptr, retval, "h"); break;		\
360 	  case 4: __put_user_asm(x, ptr, retval, ""); break;		\
361 	  case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\
362 	  default: __put_user_bad();					\
363 	}								\
364 } while (0)
365 
366 struct __large_struct { unsigned long buf[100]; };
367 #define __m(x) (*(struct __large_struct *)(x))
368 
369 /*
370  * Tell gcc we read from memory instead of writing: this is because
371  * we do not write to any memory gcc knows about, so there are no
372  * aliasing issues.
373  */
374 #define __put_user_asm(x, addr, err, itype)				\
375 	__asm__ __volatile__(						\
376 		"	.fillinsn\n"					\
377 		"1:	st"itype" %1,@%2\n"				\
378 		"	.fillinsn\n"					\
379 		"2:\n"							\
380 		".section .fixup,\"ax\"\n"				\
381 		"	.balign 4\n"					\
382 		"3:	ldi %0,%3\n"					\
383 		"	seth r14,#high(2b)\n"				\
384 		"	or3 r14,r14,#low(2b)\n"				\
385 		"	jmp r14\n"					\
386 		".previous\n"						\
387 		".section __ex_table,\"a\"\n"				\
388 		"	.balign 4\n"					\
389 		"	.long 1b,3b\n"					\
390 		".previous"						\
391 		: "=&r" (err)						\
392 		: "r" (x), "r" (addr), "i" (-EFAULT), "0" (err)		\
393 		: "r14", "memory")
394 
395 /*
396  * Here we special-case 1, 2 and 4-byte copy_*_user invocations.  On a fault
397  * we return the initial request size (1, 2 or 4), as copy_*_user should do.
398  * If a store crosses a page boundary and gets a fault, the m32r will not write
399  * anything, so this is accurate.
400  */
401 
402 /*
403  * Copy To/From Userspace
404  */
405 
406 /* Generic arbitrary sized copy.  */
407 /* Return the number of bytes NOT copied.  */
408 #define __copy_user(to, from, size)					\
409 do {									\
410 	unsigned long __dst, __src, __c;				\
411 	__asm__ __volatile__ (						\
412 		"	mv	r14, %0\n"				\
413 		"	or	r14, %1\n"				\
414 		"	beq	%0, %1, 9f\n"				\
415 		"	beqz	%2, 9f\n"				\
416 		"	and3	r14, r14, #3\n"				\
417 		"	bnez	r14, 2f\n"				\
418 		"	and3	%2, %2, #3\n"				\
419 		"	beqz	%3, 2f\n"				\
420 		"	addi	%0, #-4		; word_copy \n"		\
421 		"	.fillinsn\n"					\
422 		"0:	ld	r14, @%1+\n"				\
423 		"	addi	%3, #-1\n"				\
424 		"	.fillinsn\n"					\
425 		"1:	st	r14, @+%0\n"				\
426 		"	bnez	%3, 0b\n"				\
427 		"	beqz	%2, 9f\n"				\
428 		"	addi	%0, #4\n"				\
429 		"	.fillinsn\n"					\
430 		"2:	ldb	r14, @%1	; byte_copy \n"		\
431 		"	.fillinsn\n"					\
432 		"3:	stb	r14, @%0\n"				\
433 		"	addi	%1, #1\n"				\
434 		"	addi	%2, #-1\n"				\
435 		"	addi	%0, #1\n"				\
436 		"	bnez	%2, 2b\n"				\
437 		"	.fillinsn\n"					\
438 		"9:\n"							\
439 		".section .fixup,\"ax\"\n"				\
440 		"	.balign 4\n"					\
441 		"5:	addi	%3, #1\n"				\
442 		"	addi	%1, #-4\n"				\
443 		"	.fillinsn\n"					\
444 		"6:	slli	%3, #2\n"				\
445 		"	add	%2, %3\n"				\
446 		"	addi	%0, #4\n"				\
447 		"	.fillinsn\n"					\
448 		"7:	seth	r14, #high(9b)\n"			\
449 		"	or3	r14, r14, #low(9b)\n"			\
450 		"	jmp	r14\n"					\
451 		".previous\n"						\
452 		".section __ex_table,\"a\"\n"				\
453 		"	.balign 4\n"					\
454 		"	.long 0b,6b\n"					\
455 		"	.long 1b,5b\n"					\
456 		"	.long 2b,9b\n"					\
457 		"	.long 3b,9b\n"					\
458 		".previous\n"						\
459 		: "=&r" (__dst), "=&r" (__src), "=&r" (size),		\
460 		  "=&r" (__c)						\
461 		: "0" (to), "1" (from), "2" (size), "3" (size / 4)	\
462 		: "r14", "memory");					\
463 } while (0)
464 
465 /* We let the __ versions of copy_from/to_user inline, because they're often
466  * used in fast paths and have only a small space overhead.
467  */
468 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)469 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
470 {
471 	prefetchw(to);
472 	__copy_user(to, from, n);
473 	return n;
474 }
475 
476 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)477 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
478 {
479 	prefetch(from);
480 	__copy_user(to, from, n);
481 	return n;
482 }
483 
484 long __must_check strncpy_from_user(char *dst, const char __user *src,
485 				long count);
486 
487 /**
488  * __clear_user: - Zero a block of memory in user space, with less checking.
489  * @to:   Destination address, in user space.
490  * @n:    Number of bytes to zero.
491  *
492  * Zero a block of memory in user space.  Caller must check
493  * the specified block with access_ok() before calling this function.
494  *
495  * Returns number of bytes that could not be cleared.
496  * On success, this will be zero.
497  */
498 unsigned long __clear_user(void __user *mem, unsigned long len);
499 
500 /**
501  * clear_user: - Zero a block of memory in user space.
502  * @to:   Destination address, in user space.
503  * @n:    Number of bytes to zero.
504  *
505  * Zero a block of memory in user space.  Caller must check
506  * the specified block with access_ok() before calling this function.
507  *
508  * Returns number of bytes that could not be cleared.
509  * On success, this will be zero.
510  */
511 unsigned long clear_user(void __user *mem, unsigned long len);
512 
513 long strnlen_user(const char __user *str, long n);
514 
515 #endif /* _ASM_M32R_UACCESS_H */
516