• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* uaccess.h: userspace accessor functions
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #ifndef _ASM_UACCESS_H
13 #define _ASM_UACCESS_H
14 
15 /*
16  * User space memory access functions
17  */
18 #include <linux/sched.h>
19 #include <linux/mm.h>
20 #include <asm/segment.h>
21 #include <asm/sections.h>
22 
23 #define HAVE_ARCH_UNMAPPED_AREA	/* we decide where to put mmaps */
24 
25 #define __ptr(x) ((unsigned long __force *)(x))
26 
27 #define VERIFY_READ	0
28 #define VERIFY_WRITE	1
29 
30 #define __addr_ok(addr) ((unsigned long)(addr) < get_addr_limit())
31 
32 /*
33  * check that a range of addresses falls within the current address limit
34  */
___range_ok(unsigned long addr,unsigned long size)35 static inline int ___range_ok(unsigned long addr, unsigned long size)
36 {
37 #ifdef CONFIG_MMU
38 	int flag = -EFAULT, tmp;
39 
40 	asm volatile (
41 		"	addcc	%3,%2,%1,icc0	\n"	/* set C-flag if addr+size>4GB */
42 		"	subcc.p	%1,%4,gr0,icc1	\n"	/* jump if addr+size>limit */
43 		"	bc	icc0,#0,0f	\n"
44 		"	bhi	icc1,#0,0f	\n"
45 		"	setlos	#0,%0		\n"	/* mark okay */
46 		"0:				\n"
47 		: "=r"(flag), "=&r"(tmp)
48 		: "r"(addr), "r"(size), "r"(get_addr_limit()), "0"(flag)
49 		);
50 
51 	return flag;
52 
53 #else
54 
55 	if (addr < memory_start ||
56 	    addr > memory_end ||
57 	    size > memory_end - memory_start ||
58 	    addr + size > memory_end)
59 		return -EFAULT;
60 
61 	return 0;
62 #endif
63 }
64 
65 #define __range_ok(addr,size) ___range_ok((unsigned long) (addr), (unsigned long) (size))
66 
67 #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0)
68 #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0)
69 
70 /*
71  * The exception table consists of pairs of addresses: the first is the
72  * address of an instruction that is allowed to fault, and the second is
73  * the address at which the program should continue.  No registers are
74  * modified, so it is entirely up to the continuation code to figure out
75  * what to do.
76  *
77  * All the routines below use bits of fixup code that are out of line
78  * with the main instruction path.  This means when everything is well,
79  * we don't even have to jump over them.  Further, they do not intrude
80  * on our cache or tlb entries.
81  */
82 struct exception_table_entry
83 {
84 	unsigned long insn, fixup;
85 };
86 
87 /* Returns 0 if exception not found and fixup otherwise.  */
88 extern unsigned long search_exception_table(unsigned long);
89 
90 
91 /*
92  * These are the main single-value transfer routines.  They automatically
93  * use the right size if we just have the right pointer type.
94  */
95 #define __put_user(x, ptr)						\
96 ({									\
97 	int __pu_err = 0;						\
98 									\
99 	typeof(*(ptr)) __pu_val = (x);					\
100 	__chk_user_ptr(ptr);						\
101 									\
102 	switch (sizeof (*(ptr))) {					\
103 	case 1:								\
104 		__put_user_asm(__pu_err, __pu_val, ptr, "b", "r");	\
105 		break;							\
106 	case 2:								\
107 		__put_user_asm(__pu_err, __pu_val, ptr, "h", "r");	\
108 		break;							\
109 	case 4:								\
110 		__put_user_asm(__pu_err, __pu_val, ptr, "",  "r");	\
111 		break;							\
112 	case 8:								\
113 		__put_user_asm(__pu_err, __pu_val, ptr, "d", "e");	\
114 		break;							\
115 	default:							\
116 		__pu_err = __put_user_bad();				\
117 		break;							\
118 	}								\
119 	__pu_err;							\
120 })
121 
122 #define put_user(x, ptr)			\
123 ({						\
124 	typeof(*(ptr)) __user *_p = (ptr);	\
125 	int _e;					\
126 						\
127 	_e = __range_ok(_p, sizeof(*_p));	\
128 	if (_e == 0)				\
129 		_e = __put_user((x), _p);	\
130 	_e;					\
131 })
132 
133 extern int __put_user_bad(void);
134 
135 /*
136  * Tell gcc we read from memory instead of writing: this is because
137  * we do not write to any memory gcc knows about, so there are no
138  * aliasing issues.
139  */
140 
141 #ifdef CONFIG_MMU
142 
143 #define __put_user_asm(err,x,ptr,dsize,constraint)					\
144 do {											\
145 	asm volatile("1:	st"dsize"%I1	%2,%M1	\n"				\
146 		     "2:				\n"				\
147 		     ".subsection 2			\n"				\
148 		     "3:	setlos		%3,%0	\n"				\
149 		     "		bra		2b	\n"				\
150 		     ".previous				\n"				\
151 		     ".section __ex_table,\"a\"		\n"				\
152 		     "		.balign		8	\n"				\
153 		     "		.long		1b,3b	\n"				\
154 		     ".previous"							\
155 		     : "=r" (err)							\
156 		     : "m" (*__ptr(ptr)), constraint (x), "i"(-EFAULT), "0"(err)	\
157 		     : "memory");							\
158 } while (0)
159 
160 #else
161 
162 #define __put_user_asm(err,x,ptr,bwl,con)	\
163 do {						\
164 	asm("	st"bwl"%I0	%1,%M0	\n"	\
165 	    "	membar			\n"	\
166 	    :					\
167 	    : "m" (*__ptr(ptr)), con (x)	\
168 	    : "memory");			\
169 } while (0)
170 
171 #endif
172 
173 /*****************************************************************************/
174 /*
175  *
176  */
177 #define __get_user(x, ptr)						\
178 ({									\
179 	int __gu_err = 0;						\
180 	__chk_user_ptr(ptr);						\
181 									\
182 	switch (sizeof(*(ptr))) {					\
183 	case 1: {							\
184 		unsigned char __gu_val;					\
185 		__get_user_asm(__gu_err, __gu_val, ptr, "ub", "=r");	\
186 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
187 		break;							\
188 	}								\
189 	case 2: {							\
190 		unsigned short __gu_val;				\
191 		__get_user_asm(__gu_err, __gu_val, ptr, "uh", "=r");	\
192 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
193 		break;							\
194 	}								\
195 	case 4: {							\
196 		unsigned int __gu_val;					\
197 		__get_user_asm(__gu_err, __gu_val, ptr, "", "=r");	\
198 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
199 		break;							\
200 	}								\
201 	case 8: {							\
202 		unsigned long long __gu_val;				\
203 		__get_user_asm(__gu_err, __gu_val, ptr, "d", "=e");	\
204 		(x) = *(__force __typeof__(*(ptr)) *) &__gu_val;	\
205 		break;							\
206 	}								\
207 	default:							\
208 		__gu_err = __get_user_bad();				\
209 		break;							\
210 	}								\
211 	__gu_err;							\
212 })
213 
214 #define get_user(x, ptr)			\
215 ({						\
216 	const typeof(*(ptr)) __user *_p = (ptr);\
217 	int _e;					\
218 						\
219 	_e = __range_ok(_p, sizeof(*_p));	\
220 	if (likely(_e == 0))			\
221 		_e = __get_user((x), _p);	\
222 	else					\
223 		(x) = (typeof(x)) 0;		\
224 	_e;					\
225 })
226 
227 extern int __get_user_bad(void);
228 
229 #ifdef CONFIG_MMU
230 
231 #define __get_user_asm(err,x,ptr,dtype,constraint)	\
232 do {							\
233 	asm("1:		ld"dtype"%I2	%M2,%1	\n"	\
234 	    "2:					\n"	\
235 	    ".subsection 2			\n"	\
236 	    "3:		setlos		%3,%0	\n"	\
237 	    "		setlos		#0,%1	\n"	\
238 	    "		bra		2b	\n"	\
239 	    ".previous				\n"	\
240 	    ".section __ex_table,\"a\"		\n"	\
241 	    "		.balign		8	\n"	\
242 	    "		.long		1b,3b	\n"	\
243 	    ".previous"					\
244 	    : "=r" (err), constraint (x)		\
245 	    : "m" (*__ptr(ptr)), "i"(-EFAULT), "0"(err)	\
246 	    );						\
247 } while(0)
248 
249 #else
250 
251 #define __get_user_asm(err,x,ptr,bwl,con)	\
252 	asm("	ld"bwl"%I1	%M1,%0	\n"	\
253 	    "	membar			\n"	\
254 	    : con(x)				\
255 	    : "m" (*__ptr(ptr)))
256 
257 #endif
258 
259 /*****************************************************************************/
260 /*
261  *
262  */
263 #define ____force(x) (__force void *)(void __user *)(x)
264 #ifdef CONFIG_MMU
265 extern long __memset_user(void *dst, unsigned long count);
266 extern long __memcpy_user(void *dst, const void *src, unsigned long count);
267 
268 #define clear_user(dst,count)			__memset_user(____force(dst), (count))
269 #define __copy_from_user_inatomic(to, from, n)	__memcpy_user((to), ____force(from), (n))
270 #define __copy_to_user_inatomic(to, from, n)	__memcpy_user(____force(to), (from), (n))
271 
272 #else
273 
274 #define clear_user(dst,count)			(memset(____force(dst), 0, (count)), 0)
275 #define __copy_from_user_inatomic(to, from, n)	(memcpy((to), ____force(from), (n)), 0)
276 #define __copy_to_user_inatomic(to, from, n)	(memcpy(____force(to), (from), (n)), 0)
277 
278 #endif
279 
280 #define __clear_user clear_user
281 
282 static inline unsigned long __must_check
__copy_to_user(void __user * to,const void * from,unsigned long n)283 __copy_to_user(void __user *to, const void *from, unsigned long n)
284 {
285        might_sleep();
286        return __copy_to_user_inatomic(to, from, n);
287 }
288 
289 static inline unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)290 __copy_from_user(void *to, const void __user *from, unsigned long n)
291 {
292        might_sleep();
293        return __copy_from_user_inatomic(to, from, n);
294 }
295 
copy_from_user(void * to,const void __user * from,unsigned long n)296 static inline long copy_from_user(void *to, const void __user *from, unsigned long n)
297 {
298 	unsigned long ret = n;
299 
300 	if (likely(__access_ok(from, n)))
301 		ret = __copy_from_user(to, from, n);
302 
303 	if (unlikely(ret != 0))
304 		memset(to + (n - ret), 0, ret);
305 
306 	return ret;
307 }
308 
copy_to_user(void __user * to,const void * from,unsigned long n)309 static inline long copy_to_user(void __user *to, const void *from, unsigned long n)
310 {
311 	return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n;
312 }
313 
314 extern long strncpy_from_user(char *dst, const char __user *src, long count);
315 extern long strnlen_user(const char __user *src, long count);
316 
317 #define strlen_user(str) strnlen_user(str, 32767)
318 
319 extern unsigned long search_exception_table(unsigned long addr);
320 
321 #endif /* _ASM_UACCESS_H */
322