• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #ifndef _ASM_TILE_UACCESS_H
16 #define _ASM_TILE_UACCESS_H
17 
18 /*
19  * User space memory access functions
20  */
21 #include <linux/sched.h>
22 #include <linux/mm.h>
23 #include <asm-generic/uaccess-unaligned.h>
24 #include <asm/processor.h>
25 #include <asm/page.h>
26 
27 #define VERIFY_READ	0
28 #define VERIFY_WRITE	1
29 
30 /*
31  * The fs value determines whether argument validity checking should be
32  * performed or not.  If get_fs() == USER_DS, checking is performed, with
33  * get_fs() == KERNEL_DS, checking is bypassed.
34  *
35  * For historical reasons, these macros are grossly misnamed.
36  */
37 #define MAKE_MM_SEG(a)  ((mm_segment_t) { (a) })
38 
39 #define KERNEL_DS	MAKE_MM_SEG(-1UL)
40 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
41 
42 #define get_ds()	(KERNEL_DS)
43 #define get_fs()	(current_thread_info()->addr_limit)
44 #define set_fs(x)	(current_thread_info()->addr_limit = (x))
45 
46 #define segment_eq(a, b) ((a).seg == (b).seg)
47 
48 #ifndef __tilegx__
49 /*
50  * We could allow mapping all 16 MB at 0xfc000000, but we set up a
51  * special hack in arch_setup_additional_pages() to auto-create a mapping
52  * for the first 16 KB, and it would seem strange to have different
53  * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000.
54  */
is_arch_mappable_range(unsigned long addr,unsigned long size)55 static inline int is_arch_mappable_range(unsigned long addr,
56 					 unsigned long size)
57 {
58 	return (addr >= MEM_USER_INTRPT &&
59 		addr < (MEM_USER_INTRPT + INTRPT_SIZE) &&
60 		size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr);
61 }
62 #define is_arch_mappable_range is_arch_mappable_range
63 #else
64 #define is_arch_mappable_range(addr, size) 0
65 #endif
66 
67 /*
68  * Note that using this definition ignores is_arch_mappable_range(),
69  * so on tilepro code that uses user_addr_max() is constrained not
70  * to reference the tilepro user-interrupt region.
71  */
72 #define user_addr_max() (current_thread_info()->addr_limit.seg)
73 
74 /*
75  * Test whether a block of memory is a valid user space address.
76  * Returns 0 if the range is valid, nonzero otherwise.
77  */
78 int __range_ok(unsigned long addr, unsigned long size);
79 
80 /**
81  * access_ok: - Checks if a user space pointer is valid
82  * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE.  Note that
83  *        %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe
84  *        to write to a block, it is always safe to read from it.
85  * @addr: User space pointer to start of block to check
86  * @size: Size of block to check
87  *
88  * Context: User context only.  This function may sleep.
89  *
90  * Checks if a pointer to a block of memory in user space is valid.
91  *
92  * Returns true (nonzero) if the memory block may be valid, false (zero)
93  * if it is definitely invalid.
94  *
95  * Note that, depending on architecture, this function probably just
96  * checks that the pointer is in the user space range - after calling
97  * this function, memory access functions may still return -EFAULT.
98  */
99 #define access_ok(type, addr, size) ({ \
100 	__chk_user_ptr(addr); \
101 	likely(__range_ok((unsigned long)(addr), (size)) == 0);	\
102 })
103 
104 /*
105  * The exception table consists of pairs of addresses: the first is the
106  * address of an instruction that is allowed to fault, and the second is
107  * the address at which the program should continue.  No registers are
108  * modified, so it is entirely up to the continuation code to figure out
109  * what to do.
110  *
111  * All the routines below use bits of fixup code that are out of line
112  * with the main instruction path.  This means when everything is well,
113  * we don't even have to jump over them.  Further, they do not intrude
114  * on our cache or tlb entries.
115  */
116 
117 struct exception_table_entry {
118 	unsigned long insn, fixup;
119 };
120 
121 extern int fixup_exception(struct pt_regs *regs);
122 
123 /*
124  * Support macros for __get_user().
125  *
126  * Implementation note: The "case 8" logic of casting to the type of
127  * the result of subtracting the value from itself is basically a way
128  * of keeping all integer types the same, but casting any pointers to
129  * ptrdiff_t, i.e. also an integer type.  This way there are no
130  * questionable casts seen by the compiler on an ILP32 platform.
131  *
132  * Note that __get_user() and __put_user() assume proper alignment.
133  */
134 
135 #ifdef __LP64__
136 #define _ASM_PTR	".quad"
137 #define _ASM_ALIGN	".align 8"
138 #else
139 #define _ASM_PTR	".long"
140 #define _ASM_ALIGN	".align 4"
141 #endif
142 
143 #define __get_user_asm(OP, x, ptr, ret)					\
144 	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
145 		     ".pushsection .fixup,\"ax\"\n"			\
146 		     "0: { movei %1, 0; movei %0, %3 }\n"		\
147 		     "j 9f\n"						\
148 		     ".section __ex_table,\"a\"\n"			\
149 		     _ASM_ALIGN "\n"					\
150 		     _ASM_PTR " 1b, 0b\n"				\
151 		     ".popsection\n"					\
152 		     "9:"						\
153 		     : "=r" (ret), "=r" (x)				\
154 		     : "r" (ptr), "i" (-EFAULT))
155 
156 #ifdef __tilegx__
157 #define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
158 #define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
159 #define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
160 #define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
161 #else
162 #define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
163 #define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
164 #define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
165 #ifdef __LITTLE_ENDIAN
166 #define __lo32(a, b) a
167 #define __hi32(a, b) b
168 #else
169 #define __lo32(a, b) b
170 #define __hi32(a, b) a
171 #endif
172 #define __get_user_8(x, ptr, ret)					\
173 	({								\
174 		unsigned int __a, __b;					\
175 		asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n"	\
176 			     "2: { lw %2, %2; movei %0, 0 }\n"		\
177 			     ".pushsection .fixup,\"ax\"\n"		\
178 			     "0: { movei %1, 0; movei %2, 0 }\n"	\
179 			     "{ movei %0, %4; j 9f }\n"			\
180 			     ".section __ex_table,\"a\"\n"		\
181 			     ".align 4\n"				\
182 			     ".word 1b, 0b\n"				\
183 			     ".word 2b, 0b\n"				\
184 			     ".popsection\n"				\
185 			     "9:"					\
186 			     : "=r" (ret), "=r" (__a), "=&r" (__b)	\
187 			     : "r" (ptr), "i" (-EFAULT));		\
188 		(x) = (__typeof(x))(__typeof((x)-(x)))			\
189 			(((u64)__hi32(__a, __b) << 32) |		\
190 			 __lo32(__a, __b));				\
191 	})
192 #endif
193 
194 extern int __get_user_bad(void)
195   __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
196 
197 /**
198  * __get_user: - Get a simple variable from user space, with less checking.
199  * @x:   Variable to store result.
200  * @ptr: Source address, in user space.
201  *
202  * Context: User context only.  This function may sleep.
203  *
204  * This macro copies a single simple variable from user space to kernel
205  * space.  It supports simple types like char and int, but not larger
206  * data types like structures or arrays.
207  *
208  * @ptr must have pointer-to-simple-variable type, and the result of
209  * dereferencing @ptr must be assignable to @x without a cast.
210  *
211  * Returns zero on success, or -EFAULT on error.
212  * On error, the variable @x is set to zero.
213  *
214  * Caller must check the pointer with access_ok() before calling this
215  * function.
216  */
217 #define __get_user(x, ptr)						\
218 	({								\
219 		int __ret;						\
220 		__chk_user_ptr(ptr);					\
221 		switch (sizeof(*(ptr))) {				\
222 		case 1: __get_user_1(x, ptr, __ret); break;		\
223 		case 2: __get_user_2(x, ptr, __ret); break;		\
224 		case 4: __get_user_4(x, ptr, __ret); break;		\
225 		case 8: __get_user_8(x, ptr, __ret); break;		\
226 		default: __ret = __get_user_bad(); break;		\
227 		}							\
228 		__ret;							\
229 	})
230 
231 /* Support macros for __put_user(). */
232 
233 #define __put_user_asm(OP, x, ptr, ret)			\
234 	asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n"		\
235 		     ".pushsection .fixup,\"ax\"\n"			\
236 		     "0: { movei %0, %3; j 9f }\n"			\
237 		     ".section __ex_table,\"a\"\n"			\
238 		     _ASM_ALIGN "\n"					\
239 		     _ASM_PTR " 1b, 0b\n"				\
240 		     ".popsection\n"					\
241 		     "9:"						\
242 		     : "=r" (ret)					\
243 		     : "r" (ptr), "r" (x), "i" (-EFAULT))
244 
245 #ifdef __tilegx__
246 #define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
247 #define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
248 #define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
249 #define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
250 #else
251 #define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
252 #define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
253 #define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
254 #define __put_user_8(x, ptr, ret)					\
255 	({								\
256 		u64 __x = (__typeof((x)-(x)))(x);			\
257 		int __lo = (int) __x, __hi = (int) (__x >> 32);		\
258 		asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n"	\
259 			     "2: { sw %0, %3; movei %0, 0 }\n"		\
260 			     ".pushsection .fixup,\"ax\"\n"		\
261 			     "0: { movei %0, %4; j 9f }\n"		\
262 			     ".section __ex_table,\"a\"\n"		\
263 			     ".align 4\n"				\
264 			     ".word 1b, 0b\n"				\
265 			     ".word 2b, 0b\n"				\
266 			     ".popsection\n"				\
267 			     "9:"					\
268 			     : "=&r" (ret)				\
269 			     : "r" (ptr), "r" (__lo32(__lo, __hi)),	\
270 			     "r" (__hi32(__lo, __hi)), "i" (-EFAULT));	\
271 	})
272 #endif
273 
274 extern int __put_user_bad(void)
275   __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
276 
277 /**
278  * __put_user: - Write a simple value into user space, with less checking.
279  * @x:   Value to copy to user space.
280  * @ptr: Destination address, in user space.
281  *
282  * Context: User context only.  This function may sleep.
283  *
284  * This macro copies a single simple value from kernel space to user
285  * space.  It supports simple types like char and int, but not larger
286  * data types like structures or arrays.
287  *
288  * @ptr must have pointer-to-simple-variable type, and @x must be assignable
289  * to the result of dereferencing @ptr.
290  *
291  * Caller must check the pointer with access_ok() before calling this
292  * function.
293  *
294  * Returns zero on success, or -EFAULT on error.
295  */
296 #define __put_user(x, ptr)						\
297 ({									\
298 	int __ret;							\
299 	__chk_user_ptr(ptr);						\
300 	switch (sizeof(*(ptr))) {					\
301 	case 1: __put_user_1(x, ptr, __ret); break;			\
302 	case 2: __put_user_2(x, ptr, __ret); break;			\
303 	case 4: __put_user_4(x, ptr, __ret); break;			\
304 	case 8: __put_user_8(x, ptr, __ret); break;			\
305 	default: __ret = __put_user_bad(); break;			\
306 	}								\
307 	__ret;								\
308 })
309 
310 /*
311  * The versions of get_user and put_user without initial underscores
312  * check the address of their arguments to make sure they are not
313  * in kernel space.
314  */
315 #define put_user(x, ptr)						\
316 ({									\
317 	__typeof__(*(ptr)) __user *__Pu_addr = (ptr);			\
318 	access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ?	\
319 		__put_user((x), (__Pu_addr)) :				\
320 		-EFAULT;						\
321 })
322 
323 #define get_user(x, ptr)						\
324 ({									\
325 	__typeof__(*(ptr)) const __user *__Gu_addr = (ptr);		\
326 	access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ?	\
327 		__get_user((x), (__Gu_addr)) :				\
328 		((x) = 0, -EFAULT);					\
329 })
330 
331 /**
332  * __copy_to_user() - copy data into user space, with less checking.
333  * @to:   Destination address, in user space.
334  * @from: Source address, in kernel space.
335  * @n:    Number of bytes to copy.
336  *
337  * Context: User context only.  This function may sleep.
338  *
339  * Copy data from kernel space to user space.  Caller must check
340  * the specified block with access_ok() before calling this function.
341  *
342  * Returns number of bytes that could not be copied.
343  * On success, this will be zero.
344  *
345  * An alternate version - __copy_to_user_inatomic() - is designed
346  * to be called from atomic context, typically bracketed by calls
347  * to pagefault_disable() and pagefault_enable().
348  */
349 extern unsigned long __must_check __copy_to_user_inatomic(
350 	void __user *to, const void *from, unsigned long n);
351 
352 static inline unsigned long __must_check
__copy_to_user(void __user * to,const void * from,unsigned long n)353 __copy_to_user(void __user *to, const void *from, unsigned long n)
354 {
355 	might_fault();
356 	return __copy_to_user_inatomic(to, from, n);
357 }
358 
359 static inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)360 copy_to_user(void __user *to, const void *from, unsigned long n)
361 {
362 	if (access_ok(VERIFY_WRITE, to, n))
363 		n = __copy_to_user(to, from, n);
364 	return n;
365 }
366 
367 /**
368  * __copy_from_user() - copy data from user space, with less checking.
369  * @to:   Destination address, in kernel space.
370  * @from: Source address, in user space.
371  * @n:    Number of bytes to copy.
372  *
373  * Context: User context only.  This function may sleep.
374  *
375  * Copy data from user space to kernel space.  Caller must check
376  * the specified block with access_ok() before calling this function.
377  *
378  * Returns number of bytes that could not be copied.
379  * On success, this will be zero.
380  *
381  * If some data could not be copied, this function will pad the copied
382  * data to the requested size using zero bytes.
383  *
384  * An alternate version - __copy_from_user_inatomic() - is designed
385  * to be called from atomic context, typically bracketed by calls
386  * to pagefault_disable() and pagefault_enable().  This version
387  * does *NOT* pad with zeros.
388  */
389 extern unsigned long __must_check __copy_from_user_inatomic(
390 	void *to, const void __user *from, unsigned long n);
391 extern unsigned long __must_check __copy_from_user_zeroing(
392 	void *to, const void __user *from, unsigned long n);
393 
394 static inline unsigned long __must_check
__copy_from_user(void * to,const void __user * from,unsigned long n)395 __copy_from_user(void *to, const void __user *from, unsigned long n)
396 {
397        might_fault();
398        return __copy_from_user_zeroing(to, from, n);
399 }
400 
401 static inline unsigned long __must_check
_copy_from_user(void * to,const void __user * from,unsigned long n)402 _copy_from_user(void *to, const void __user *from, unsigned long n)
403 {
404 	if (access_ok(VERIFY_READ, from, n))
405 		n = __copy_from_user(to, from, n);
406 	else
407 		memset(to, 0, n);
408 	return n;
409 }
410 
411 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
412 /*
413  * There are still unprovable places in the generic code as of 2.6.34, so this
414  * option is not really compatible with -Werror, which is more useful in
415  * general.
416  */
417 extern void copy_from_user_overflow(void)
418 	__compiletime_warning("copy_from_user() size is not provably correct");
419 
copy_from_user(void * to,const void __user * from,unsigned long n)420 static inline unsigned long __must_check copy_from_user(void *to,
421 					  const void __user *from,
422 					  unsigned long n)
423 {
424 	int sz = __compiletime_object_size(to);
425 
426 	if (likely(sz == -1 || sz >= n))
427 		n = _copy_from_user(to, from, n);
428 	else
429 		copy_from_user_overflow();
430 
431 	return n;
432 }
433 #else
434 #define copy_from_user _copy_from_user
435 #endif
436 
437 #ifdef __tilegx__
438 /**
439  * __copy_in_user() - copy data within user space, with less checking.
440  * @to:   Destination address, in user space.
441  * @from: Source address, in user space.
442  * @n:    Number of bytes to copy.
443  *
444  * Context: User context only.  This function may sleep.
445  *
446  * Copy data from user space to user space.  Caller must check
447  * the specified blocks with access_ok() before calling this function.
448  *
449  * Returns number of bytes that could not be copied.
450  * On success, this will be zero.
451  */
452 extern unsigned long __copy_in_user_inatomic(
453 	void __user *to, const void __user *from, unsigned long n);
454 
455 static inline unsigned long __must_check
__copy_in_user(void __user * to,const void __user * from,unsigned long n)456 __copy_in_user(void __user *to, const void __user *from, unsigned long n)
457 {
458 	might_fault();
459 	return __copy_in_user_inatomic(to, from, n);
460 }
461 
462 static inline unsigned long __must_check
copy_in_user(void __user * to,const void __user * from,unsigned long n)463 copy_in_user(void __user *to, const void __user *from, unsigned long n)
464 {
465 	if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
466 		n = __copy_in_user(to, from, n);
467 	return n;
468 }
469 #endif
470 
471 
472 extern long strnlen_user(const char __user *str, long n);
473 extern long strlen_user(const char __user *str);
474 extern long strncpy_from_user(char *dst, const char __user *src, long);
475 
476 /**
477  * clear_user: - Zero a block of memory in user space.
478  * @mem:   Destination address, in user space.
479  * @len:   Number of bytes to zero.
480  *
481  * Zero a block of memory in user space.
482  *
483  * Returns number of bytes that could not be cleared.
484  * On success, this will be zero.
485  */
486 extern unsigned long clear_user_asm(void __user *mem, unsigned long len);
__clear_user(void __user * mem,unsigned long len)487 static inline unsigned long __must_check __clear_user(
488 	void __user *mem, unsigned long len)
489 {
490 	might_fault();
491 	return clear_user_asm(mem, len);
492 }
clear_user(void __user * mem,unsigned long len)493 static inline unsigned long __must_check clear_user(
494 	void __user *mem, unsigned long len)
495 {
496 	if (access_ok(VERIFY_WRITE, mem, len))
497 		return __clear_user(mem, len);
498 	return len;
499 }
500 
501 /**
502  * flush_user: - Flush a block of memory in user space from cache.
503  * @mem:   Destination address, in user space.
504  * @len:   Number of bytes to flush.
505  *
506  * Returns number of bytes that could not be flushed.
507  * On success, this will be zero.
508  */
509 extern unsigned long flush_user_asm(void __user *mem, unsigned long len);
__flush_user(void __user * mem,unsigned long len)510 static inline unsigned long __must_check __flush_user(
511 	void __user *mem, unsigned long len)
512 {
513 	int retval;
514 
515 	might_fault();
516 	retval = flush_user_asm(mem, len);
517 	mb_incoherent();
518 	return retval;
519 }
520 
flush_user(void __user * mem,unsigned long len)521 static inline unsigned long __must_check flush_user(
522 	void __user *mem, unsigned long len)
523 {
524 	if (access_ok(VERIFY_WRITE, mem, len))
525 		return __flush_user(mem, len);
526 	return len;
527 }
528 
529 /**
530  * finv_user: - Flush-inval a block of memory in user space from cache.
531  * @mem:   Destination address, in user space.
532  * @len:   Number of bytes to invalidate.
533  *
534  * Returns number of bytes that could not be flush-invalidated.
535  * On success, this will be zero.
536  */
537 extern unsigned long finv_user_asm(void __user *mem, unsigned long len);
__finv_user(void __user * mem,unsigned long len)538 static inline unsigned long __must_check __finv_user(
539 	void __user *mem, unsigned long len)
540 {
541 	int retval;
542 
543 	might_fault();
544 	retval = finv_user_asm(mem, len);
545 	mb_incoherent();
546 	return retval;
547 }
finv_user(void __user * mem,unsigned long len)548 static inline unsigned long __must_check finv_user(
549 	void __user *mem, unsigned long len)
550 {
551 	if (access_ok(VERIFY_WRITE, mem, len))
552 		return __finv_user(mem, len);
553 	return len;
554 }
555 
556 #endif /* _ASM_TILE_UACCESS_H */
557