• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #ifndef _ASM_TILE_UACCESS_H
16 #define _ASM_TILE_UACCESS_H
17 
18 /*
19  * User space memory access functions
20  */
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <asm-generic/uaccess-unaligned.h>
24 #include <asm/processor.h>
25 #include <asm/page.h>
26 
27 #define VERIFY_READ	0
28 #define VERIFY_WRITE	1
29 
30 /*
31  * The fs value determines whether argument validity checking should be
32  * performed or not.  If get_fs() == USER_DS, checking is performed, with
33  * get_fs() == KERNEL_DS, checking is bypassed.
34  *
35  * For historical reasons, these macros are grossly misnamed.
36  */
37 #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
38 
39 #define KERNEL_DS	MAKE_MM_SEG(-1UL)
40 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
41 
42 #define get_ds()	(KERNEL_DS)
43 #define get_fs()	(current_thread_info()->addr_limit)
44 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
45 
46 #define segment_eq(a, b) ((a).seg == (b).seg)
47 
48 #ifndef __tilegx__
49 /*
50  * We could allow mapping all 16 MB at 0xfc000000, but we set up a
51  * special hack in arch_setup_additional_pages() to auto-create a mapping
52  * for the first 16 KB, and it would seem strange to have different
53  * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
54  */
is_arch_mappable_range(unsigned long addr,unsigned long size)55 static inline int is_arch_mappable_range(unsigned long addr,
56 					 unsigned long size)
57 {
58 	return (addr >= MEM_USER_INTRPT &&
59 		addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
60 		size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
61 }
62 #define is_arch_mappable_range is_arch_mappable_range
63 #else
64 #define is_arch_mappable_range(addr, size) 0
65 #endif
66 
67 /*
68  * Note that using this definition ignores is_arch_mappable_range(),
69  * so on tilepro code that uses user_addr_max() is constrained not
70  * to reference the tilepro user-interrupt region.
71  */
72 #define user_addr_max() (current_thread_info()->addr_limit.seg)
73 
74 /*
75  * Test whether a block of memory is a valid user space address.
76  * Returns 0 if the range is valid, nonzero otherwise.
77  */
78 int __range_ok(unsigned long addr, unsigned long size);
79 
80 /**
81  * access_ok: - Checks if a user space pointer is valid
82  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
83  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
84  *        to write to a block, it is always safe to read from it.
85  * @addr: User space pointer to start of block to check
86  * @size: Size of block to check
87  *
88  * Context: User context only. This function may sleep if pagefaults are
89  *          enabled.
90  *
91  * Checks if a pointer to a block of memory in user space is valid.
92  *
93  * Returns true (nonzero) if the memory block may be valid, false (zero)
94  * if it is definitely invalid.
95  *
96  * Note that, depending on architecture, this function probably just
97  * checks that the pointer is in the user space range - after calling
98  * this function, memory access functions may still return -EFAULT.
99  */
100 #define access_ok(type, addr, size) ({ \
101 	__chk_user_ptr(addr); \
102 	likely(__range_ok((unsigned long)(addr), (size)) == 0);	\
103 })
104 
105 /*
106  * The exception table consists of pairs of addresses: the first is the
107  * address of an instruction that is allowed to fault, and the second is
108  * the address at which the program should continue.  No registers are
109  * modified, so it is entirely up to the continuation code to figure out
110  * what to do.
111  *
112  * All the routines below use bits of fixup code that are out of line
113  * with the main instruction path.  This means when everything is well,
114  * we don't even have to jump over them.  Further, they do not intrude
115  * on our cache or tlb entries.
116  */
117 
118 struct exception_table_entry {
119 	unsigned long insn, fixup;
120 };
121 
122 extern int fixup_exception(struct pt_regs *regs);
123 
124 /*
125  * This is a type: either unsigned long, if the argument fits into
126  * that type, or otherwise unsigned long long.
127  */
128 #define __inttype(x) \
129 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
130 
131 /*
132  * Support macros for __get_user().
133  * Note that __get_user() and __put_user() assume proper alignment.
134  */
135 
136 #ifdef __LP64__
137 #define _ASM_PTR	".quad"
138 #define _ASM_ALIGN	".align 8"
139 #else
140 #define _ASM_PTR	".long"
141 #define _ASM_ALIGN	".align 4"
142 #endif
143 
144 #define __get_user_asm(OP, x, ptr, ret)					\
145 	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
146 		     ".pushsection .fixup,\"ax\"\n"			\
147 		     "0: { movei %1, 0; movei %0, %3 }\n"		\
148 		     "j 9f\n"						\
149 		     ".section __ex_table,\"a\"\n"			\
150 		     _ASM_ALIGN "\n"					\
151 		     _ASM_PTR " 1b, 0b\n"				\
152 		     ".popsection\n"					\
153 		     "9:"						\
154 		     : "=r" (ret), "=r" (x)				\
155 		     : "r" (ptr), "i" (-EFAULT))
156 
157 #ifdef __tilegx__
158 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
159 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
160 #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
161 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
162 #else
163 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
164 #define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
165 #define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
166 #ifdef __LITTLE_ENDIAN
167 #define __lo32(a, b) a
168 #define __hi32(a, b) b
169 #else
170 #define __lo32(a, b) b
171 #define __hi32(a, b) a
172 #endif
173 #define __get_user_8(x, ptr, ret)					\
174 	({								\
175 		unsigned int __a, __b;					\
176 		asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"	\
177 			     "2: { lw %2, %2; movei %0, 0 }\n"		\
178 			     ".pushsection .fixup,\"ax\"\n"		\
179 			     "0: { movei %1, 0; movei %2, 0 }\n"	\
180 			     "{ movei %0, %4; j 9f }\n"			\
181 			     ".section __ex_table,\"a\"\n"		\
182 			     ".align 4\n"				\
183 			     ".word 1b, 0b\n"				\
184 			     ".word 2b, 0b\n"				\
185 			     ".popsection\n"				\
186 			     "9:"					\
187 			     : "=r" (ret), "=r" (__a), "=&r" (__b)	\
188 			     : "r" (ptr), "i" (-EFAULT));		\
189 		(x) = (__force __typeof(x))(__inttype(x))		\
190 			(((u64)__hi32(__a, __b) << 32) |		\
191 			 __lo32(__a, __b));				\
192 	})
193 #endif
194 
195 extern int __get_user_bad(void)
196   __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
197 
198 /**
199  * __get_user: - Get a simple variable from user space, with less checking.
200  * @x:   Variable to store result.
201  * @ptr: Source address, in user space.
202  *
203  * Context: User context only. This function may sleep if pagefaults are
204  *          enabled.
205  *
206  * This macro copies a single simple variable from user space to kernel
207  * space.  It supports simple types like char and int, but not larger
208  * data types like structures or arrays.
209  *
210  * @ptr must have pointer-to-simple-variable type, and the result of
211  * dereferencing @ptr must be assignable to @x without a cast.
212  *
213  * Returns zero on success, or -EFAULT on error.
214  * On error, the variable @x is set to zero.
215  *
216  * Caller must check the pointer with access_ok() before calling this
217  * function.
218  */
219 #define __get_user(x, ptr)						\
220 	({								\
221 		int __ret;						\
222 		typeof(x) _x;						\
223 		__chk_user_ptr(ptr);					\
224 		switch (sizeof(*(ptr))) {				\
225 		case 1: __get_user_1(_x, ptr, __ret); break;		\
226 		case 2: __get_user_2(_x, ptr, __ret); break;		\
227 		case 4: __get_user_4(_x, ptr, __ret); break;		\
228 		case 8: __get_user_8(_x, ptr, __ret); break;		\
229 		default: __ret = __get_user_bad(); break;		\
230 		}							\
231 		(x) = (typeof(*(ptr))) _x;				\
232 		__ret;							\
233 	})
234 
235 /* Support macros for __put_user(). */
236 
237 #define __put_user_asm(OP, x, ptr, ret)			\
238 	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
239 		     ".pushsection .fixup,\"ax\"\n"			\
240 		     "0: { movei %0, %3; j 9f }\n"			\
241 		     ".section __ex_table,\"a\"\n"			\
242 		     _ASM_ALIGN "\n"					\
243 		     _ASM_PTR " 1b, 0b\n"				\
244 		     ".popsection\n"					\
245 		     "9:"						\
246 		     : "=r" (ret)					\
247 		     : "r" (ptr), "r" (x), "i" (-EFAULT))
248 
249 #ifdef __tilegx__
250 #define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
251 #define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
252 #define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
253 #define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
254 #else
255 #define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
256 #define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
257 #define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
258 #define __put_user_8(x, ptr, ret)					\
259 	({								\
260 		u64 __x = (__force __inttype(x))(x);			\
261 		int __lo = (int) __x, __hi = (int) (__x >> 32);		\
262 		asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"	\
263 			     "2: { sw %0, %3; movei %0, 0 }\n"		\
264 			     ".pushsection .fixup,\"ax\"\n"		\
265 			     "0: { movei %0, %4; j 9f }\n"		\
266 			     ".section __ex_table,\"a\"\n"		\
267 			     ".align 4\n"				\
268 			     ".word 1b, 0b\n"				\
269 			     ".word 2b, 0b\n"				\
270 			     ".popsection\n"				\
271 			     "9:"					\
272 			     : "=&r" (ret)				\
273 			     : "r" (ptr), "r" (__lo32(__lo, __hi)),	\
274 			     "r" (__hi32(__lo, __hi)), "i" (-EFAULT));	\
275 	})
276 #endif
277 
278 extern int __put_user_bad(void)
279   __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
280 
281 /**
282  * __put_user: - Write a simple value into user space, with less checking.
283  * @x:   Value to copy to user space.
284  * @ptr: Destination address, in user space.
285  *
286  * Context: User context only. This function may sleep if pagefaults are
287  *          enabled.
288  *
289  * This macro copies a single simple value from kernel space to user
290  * space.  It supports simple types like char and int, but not larger
291  * data types like structures or arrays.
292  *
293  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
294  * to the result of dereferencing @ptr.
295  *
296  * Caller must check the pointer with access_ok() before calling this
297  * function.
298  *
299  * Returns zero on success, or -EFAULT on error.
300  */
301 #define __put_user(x, ptr)						\
302 ({									\
303 	int __ret;							\
304 	typeof(*(ptr)) _x = (x);					\
305 	__chk_user_ptr(ptr);						\
306 	switch (sizeof(*(ptr))) {					\
307 	case 1: __put_user_1(_x, ptr, __ret); break;			\
308 	case 2: __put_user_2(_x, ptr, __ret); break;			\
309 	case 4: __put_user_4(_x, ptr, __ret); break;			\
310 	case 8: __put_user_8(_x, ptr, __ret); break;			\
311 	default: __ret = __put_user_bad(); break;			\
312 	}								\
313 	__ret;								\
314 })
315 
316 /*
317  * The versions of get_user and put_user without initial underscores
318  * check the address of their arguments to make sure they are not
319  * in kernel space.
320  */
321 #define put_user(x, ptr)						\
322 ({									\
323 	__typeof__(*(ptr)) __user *__Pu_addr = (ptr);			\
324 	access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?	\
325 		__put_user((x), (__Pu_addr)) :				\
326 		-EFAULT;						\
327 })
328 
329 #define get_user(x, ptr)						\
330 ({									\
331 	__typeof__(*(ptr)) const __user *__Gu_addr = (ptr);		\
332 	access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?	\
333 		__get_user((x), (__Gu_addr)) :				\
334 		((x) = 0, -EFAULT);					\
335 })
336 
337 /**
338  * __copy_to_user() - copy data into user space, with less checking.
339  * @to:   Destination address, in user space.
340  * @from: Source address, in kernel space.
341  * @n:    Number of bytes to copy.
342  *
343  * Context: User context only. This function may sleep if pagefaults are
344  *          enabled.
345  *
346  * Copy data from kernel space to user space.  Caller must check
347  * the specified block with access_ok() before calling this function.
348  *
349  * Returns number of bytes that could not be copied.
350  * On success, this will be zero.
351  *
352  * An alternate version - __copy_to_user_inatomic() - is designed
353  * to be called from atomic context, typically bracketed by calls
354  * to pagefault_disable() and pagefault_enable().
355  */
356 extern unsigned long __must_check __copy_to_user_inatomic(
357 	void __user *to, const void *from, unsigned long n);
358 
359 static inline unsigned long __must_check
__copy_to_user(void __user * to,const void * from,unsigned long n)360 __copy_to_user(void __user *to, const void *from, unsigned long n)
361 {
362 	might_fault();
363 	return __copy_to_user_inatomic(to, from, n);
364 }
365 
366 static inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)367 copy_to_user(void __user *to, const void *from, unsigned long n)
368 {
369 	if (access_ok(VERIFY_WRITE, to, n))
370 		n = __copy_to_user(to, from, n);
371 	return n;
372 }
373 
374 /**
375  * __copy_from_user() - copy data from user space, with less checking.
376  * @to:   Destination address, in kernel space.
377  * @from: Source address, in user space.
378  * @n:    Number of bytes to copy.
379  *
380  * Context: User context only. This function may sleep if pagefaults are
381  *          enabled.
382  *
383  * Copy data from user space to kernel space.  Caller must check
384  * the specified block with access_ok() before calling this function.
385  *
386  * Returns number of bytes that could not be copied.
387  * On success, this will be zero.
388  *
389  * If some data could not be copied, this function will pad the copied
390  * data to the requested size using zero bytes.
391  *
392  * An alternate version - __copy_from_user_inatomic() - is designed
393  * to be called from atomic context, typically bracketed by calls
394  * to pagefault_disable() and pagefault_enable().  This version
395  * does *NOT* pad with zeros.
396  */
397 extern unsigned long __must_check __copy_from_user_inatomic(
398 	void *to, const void __user *from, unsigned long n);
399 extern unsigned long __must_check __copy_from_user_zeroing(
400 	void *to, const void __user *from, unsigned long n);
401 
402 static inline unsigned long __must_check
__copy_from_user(void * to,const void __user * from,unsigned long n)403 __copy_from_user(void *to, const void __user *from, unsigned long n)
404 {
405        might_fault();
406        return __copy_from_user_zeroing(to, from, n);
407 }
408 
409 static inline unsigned long __must_check
_copy_from_user(void * to,const void __user * from,unsigned long n)410 _copy_from_user(void *to, const void __user *from, unsigned long n)
411 {
412 	if (access_ok(VERIFY_READ, from, n))
413 		n = __copy_from_user(to, from, n);
414 	else
415 		memset(to, 0, n);
416 	return n;
417 }
418 
419 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
420 /*
421  * There are still unprovable places in the generic code as of 2.6.34, so this
422  * option is not really compatible with -Werror, which is more useful in
423  * general.
424  */
425 extern void copy_from_user_overflow(void)
426 	__compiletime_warning("copy_from_user() size is not provably correct");
427 
copy_from_user(void * to,const void __user * from,unsigned long n)428 static inline unsigned long __must_check copy_from_user(void *to,
429 					  const void __user *from,
430 					  unsigned long n)
431 {
432 	int sz = __compiletime_object_size(to);
433 
434 	if (likely(sz == -1 || sz >= n))
435 		n = _copy_from_user(to, from, n);
436 	else
437 		copy_from_user_overflow();
438 
439 	return n;
440 }
441 #else
442 #define copy_from_user _copy_from_user
443 #endif
444 
445 #ifdef __tilegx__
446 /**
447  * __copy_in_user() - copy data within user space, with less checking.
448  * @to:   Destination address, in user space.
449  * @from: Source address, in user space.
450  * @n:    Number of bytes to copy.
451  *
452  * Context: User context only. This function may sleep if pagefaults are
453  *          enabled.
454  *
455  * Copy data from user space to user space.  Caller must check
456  * the specified blocks with access_ok() before calling this function.
457  *
458  * Returns number of bytes that could not be copied.
459  * On success, this will be zero.
460  */
461 extern unsigned long __copy_in_user_inatomic(
462 	void __user *to, const void __user *from, unsigned long n);
463 
464 static inline unsigned long __must_check
__copy_in_user(void __user * to,const void __user * from,unsigned long n)465 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
466 {
467 	might_fault();
468 	return __copy_in_user_inatomic(to, from, n);
469 }
470 
471 static inline unsigned long __must_check
copy_in_user(void __user * to,const void __user * from,unsigned long n)472 copy_in_user(void __user *to, const void __user *from, unsigned long n)
473 {
474 	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
475 		n = __copy_in_user(to, from, n);
476 	return n;
477 }
478 #endif
479 
480 
481 extern long strnlen_user(const char __user *str, long n);
482 extern long strlen_user(const char __user *str);
483 extern long strncpy_from_user(char *dst, const char __user *src, long);
484 
485 /**
486  * clear_user: - Zero a block of memory in user space.
487  * @mem:   Destination address, in user space.
488  * @len:   Number of bytes to zero.
489  *
490  * Zero a block of memory in user space.
491  *
492  * Returns number of bytes that could not be cleared.
493  * On success, this will be zero.
494  */
495 extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
__clear_user(void __user * mem,unsigned long len)496 static inline unsigned long __must_check __clear_user(
497 	void __user *mem, unsigned long len)
498 {
499 	might_fault();
500 	return clear_user_asm(mem, len);
501 }
clear_user(void __user * mem,unsigned long len)502 static inline unsigned long __must_check clear_user(
503 	void __user *mem, unsigned long len)
504 {
505 	if (access_ok(VERIFY_WRITE, mem, len))
506 		return __clear_user(mem, len);
507 	return len;
508 }
509 
510 /**
511  * flush_user: - Flush a block of memory in user space from cache.
512  * @mem:   Destination address, in user space.
513  * @len:   Number of bytes to flush.
514  *
515  * Returns number of bytes that could not be flushed.
516  * On success, this will be zero.
517  */
518 extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
__flush_user(void __user * mem,unsigned long len)519 static inline unsigned long __must_check __flush_user(
520 	void __user *mem, unsigned long len)
521 {
522 	int retval;
523 
524 	might_fault();
525 	retval = flush_user_asm(mem, len);
526 	mb_incoherent();
527 	return retval;
528 }
529 
flush_user(void __user * mem,unsigned long len)530 static inline unsigned long __must_check flush_user(
531 	void __user *mem, unsigned long len)
532 {
533 	if (access_ok(VERIFY_WRITE, mem, len))
534 		return __flush_user(mem, len);
535 	return len;
536 }
537 
538 /**
539  * finv_user: - Flush-inval a block of memory in user space from cache.
540  * @mem:   Destination address, in user space.
541  * @len:   Number of bytes to invalidate.
542  *
543  * Returns number of bytes that could not be flush-invalidated.
544  * On success, this will be zero.
545  */
546 extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
__finv_user(void __user * mem,unsigned long len)547 static inline unsigned long __must_check __finv_user(
548 	void __user *mem, unsigned long len)
549 {
550 	int retval;
551 
552 	might_fault();
553 	retval = finv_user_asm(mem, len);
554 	mb_incoherent();
555 	return retval;
556 }
finv_user(void __user * mem,unsigned long len)557 static inline unsigned long __must_check finv_user(
558 	void __user *mem, unsigned long len)
559 {
560 	if (access_ok(VERIFY_WRITE, mem, len))
561 		return __finv_user(mem, len);
562 	return len;
563 }
564 
565 #endif /* _ASM_TILE_UACCESS_H */
566