• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __METAG_UACCESS_H
2 #define __METAG_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/sched.h>
8 
9 #define VERIFY_READ	0
10 #define VERIFY_WRITE	1
11 
12 /*
13  * The fs value determines whether argument validity checking should be
14  * performed or not.  If get_fs() == USER_DS, checking is performed, with
15  * get_fs() == KERNEL_DS, checking is bypassed.
16  *
17  * For historical reasons, these macros are grossly misnamed.
18  */
19 
20 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
21 
22 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
23 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
24 
25 #define get_ds()	(KERNEL_DS)
26 #define get_fs()        (current_thread_info()->addr_limit)
27 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
28 
29 #define segment_eq(a, b)	((a).seg == (b).seg)
30 
__access_ok(unsigned long addr,unsigned long size)31 static inline int __access_ok(unsigned long addr, unsigned long size)
32 {
33 	/*
34 	 * Allow access to the user mapped memory area, but not the system area
35 	 * before it. The check extends to the top of the address space when
36 	 * kernel access is allowed (there's no real reason to user copy to the
37 	 * system area in any case).
38 	 */
39 	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
40 		   size <= get_fs().seg - addr))
41 		return true;
42 	/*
43 	 * Explicitly allow NULL pointers here. Parts of the kernel such
44 	 * as readv/writev use access_ok to validate pointers, but want
45 	 * to allow NULL pointers for various reasons. NULL pointers are
46 	 * safe to allow through because the first page is not mappable on
47 	 * Meta.
48 	 */
49 	if (!addr)
50 		return true;
51 	/* Allow access to core code memory area... */
52 	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
53 	    size <= LINCORE_CODE_LIMIT + 1 - addr)
54 		return true;
55 	/* ... but no other areas. */
56 	return false;
57 }
58 
59 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
60 						(unsigned long)(size))
61 
verify_area(int type,const void * addr,unsigned long size)62 static inline int verify_area(int type, const void *addr, unsigned long size)
63 {
64 	return access_ok(type, addr, size) ? 0 : -EFAULT;
65 }
66 
67 /*
68  * The exception table consists of pairs of addresses: the first is the
69  * address of an instruction that is allowed to fault, and the second is
70  * the address at which the program should continue.  No registers are
71  * modified, so it is entirely up to the continuation code to figure out
72  * what to do.
73  *
74  * All the routines below use bits of fixup code that are out of line
75  * with the main instruction path.  This means when everything is well,
76  * we don't even have to jump over them.  Further, they do not intrude
77  * on our cache or tlb entries.
78  */
79 struct exception_table_entry {
80 	unsigned long insn, fixup;
81 };
82 
83 extern int fixup_exception(struct pt_regs *regs);
84 
85 /*
86  * These are the main single-value transfer routines.  They automatically
87  * use the right size if we just have the right pointer type.
88  */
89 
90 #define put_user(x, ptr) \
91 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
92 #define __put_user(x, ptr) \
93 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
94 
95 extern void __put_user_bad(void);
96 
97 #define __put_user_nocheck(x, ptr, size)		\
98 ({                                                      \
99 	long __pu_err;                                  \
100 	__put_user_size((x), (ptr), (size), __pu_err);	\
101 	__pu_err;                                       \
102 })
103 
104 #define __put_user_check(x, ptr, size)				\
105 ({                                                              \
106 	long __pu_err = -EFAULT;                                \
107 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
108 	if (access_ok(VERIFY_WRITE, __pu_addr, size))		\
109 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
110 	__pu_err;                                               \
111 })
112 
113 extern long __put_user_asm_b(unsigned int x, void __user *addr);
114 extern long __put_user_asm_w(unsigned int x, void __user *addr);
115 extern long __put_user_asm_d(unsigned int x, void __user *addr);
116 extern long __put_user_asm_l(unsigned long long x, void __user *addr);
117 
118 #define __put_user_size(x, ptr, size, retval)			\
119 do {                                                            \
120 	retval = 0;                                             \
121 	switch (size) {                                         \
122 	case 1:								\
123 		retval = __put_user_asm_b((unsigned int)x, ptr); break;	\
124 	case 2:								\
125 		retval = __put_user_asm_w((unsigned int)x, ptr); break;	\
126 	case 4:								\
127 		retval = __put_user_asm_d((unsigned int)x, ptr); break;	\
128 	case 8:								\
129 		retval = __put_user_asm_l((unsigned long long)x, ptr); break; \
130 	default:							\
131 		__put_user_bad();					\
132 	}								\
133 } while (0)
134 
135 #define get_user(x, ptr) \
136 	__get_user_check((x), (ptr), sizeof(*(ptr)))
137 #define __get_user(x, ptr) \
138 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
139 
140 extern long __get_user_bad(void);
141 
142 #define __get_user_nocheck(x, ptr, size)			\
143 ({                                                              \
144 	long __gu_err, __gu_val;                                \
145 	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
146 	(x) = (__typeof__(*(ptr)))__gu_val;                     \
147 	__gu_err;                                               \
148 })
149 
150 #define __get_user_check(x, ptr, size)					\
151 ({                                                                      \
152 	long __gu_err = -EFAULT, __gu_val = 0;                          \
153 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
154 	if (access_ok(VERIFY_READ, __gu_addr, size))			\
155 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
156 	(x) = (__typeof__(*(ptr)))__gu_val;                             \
157 	__gu_err;                                                       \
158 })
159 
160 extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
161 extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
162 extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
163 
164 #define __get_user_size(x, ptr, size, retval)			\
165 do {                                                            \
166 	retval = 0;                                             \
167 	switch (size) {                                         \
168 	case 1:							\
169 		x = __get_user_asm_b(ptr, &retval); break;	\
170 	case 2:							\
171 		x = __get_user_asm_w(ptr, &retval); break;	\
172 	case 4:							\
173 		x = __get_user_asm_d(ptr, &retval); break;	\
174 	default:						\
175 		(x) = __get_user_bad();				\
176 	}                                                       \
177 } while (0)
178 
179 /*
180  * Copy a null terminated string from userspace.
181  *
182  * Must return:
183  * -EFAULT		for an exception
184  * count		if we hit the buffer limit
185  * bytes copied		if we hit a null byte
186  * (without the null byte)
187  */
188 
189 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
190 					     long count);
191 
192 static inline long
strncpy_from_user(char * dst,const char __user * src,long count)193 strncpy_from_user(char *dst, const char __user *src, long count)
194 {
195 	if (!access_ok(VERIFY_READ, src, 1))
196 		return -EFAULT;
197 	return __strncpy_from_user(dst, src, count);
198 }
199 /*
200  * Return the size of a string (including the ending 0)
201  *
202  * Return 0 on exception, a value greater than N if too long
203  */
204 extern long __must_check strnlen_user(const char __user *src, long count);
205 
206 #define strlen_user(str) strnlen_user(str, 32767)
207 
208 extern unsigned long raw_copy_from_user(void *to, const void __user *from,
209 					unsigned long n);
210 
211 static inline unsigned long
copy_from_user(void * to,const void __user * from,unsigned long n)212 copy_from_user(void *to, const void __user *from, unsigned long n)
213 {
214 	unsigned long res = n;
215 	if (likely(access_ok(VERIFY_READ, from, n)))
216 		res = raw_copy_from_user(to, from, n);
217 	if (unlikely(res))
218 		memset(to + (n - res), 0, res);
219 	return res;
220 }
221 
222 #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
223 #define __copy_from_user_inatomic __copy_from_user
224 
225 extern unsigned long __must_check __copy_user(void __user *to,
226 					      const void *from,
227 					      unsigned long n);
228 
copy_to_user(void __user * to,const void * from,unsigned long n)229 static inline unsigned long copy_to_user(void __user *to, const void *from,
230 					 unsigned long n)
231 {
232 	if (access_ok(VERIFY_WRITE, to, n))
233 		return __copy_user(to, from, n);
234 	return n;
235 }
236 
237 #define __copy_to_user(to, from, n) __copy_user(to, from, n)
238 #define __copy_to_user_inatomic __copy_to_user
239 
240 /*
241  * Zero Userspace
242  */
243 
244 extern unsigned long __must_check __do_clear_user(void __user *to,
245 						  unsigned long n);
246 
clear_user(void __user * to,unsigned long n)247 static inline unsigned long clear_user(void __user *to, unsigned long n)
248 {
249 	if (access_ok(VERIFY_WRITE, to, n))
250 		return __do_clear_user(to, n);
251 	return n;
252 }
253 
254 #define __clear_user(to, n)            __do_clear_user(to, n)
255 
256 #endif /* _METAG_UACCESS_H */
257