• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __METAG_UACCESS_H
2 #define __METAG_UACCESS_H
3 
4 /*
5  * User space memory access functions
6  */
7 #include <linux/sched.h>
8 
9 #define VERIFY_READ	0
10 #define VERIFY_WRITE	1
11 
12 /*
13  * The fs value determines whether argument validity checking should be
14  * performed or not.  If get_fs() == USER_DS, checking is performed, with
15  * get_fs() == KERNEL_DS, checking is bypassed.
16  *
17  * For historical reasons, these macros are grossly misnamed.
18  */
19 
20 #define MAKE_MM_SEG(s)  ((mm_segment_t) { (s) })
21 
22 #define KERNEL_DS       MAKE_MM_SEG(0xFFFFFFFF)
23 #define USER_DS		MAKE_MM_SEG(PAGE_OFFSET)
24 
25 #define get_ds()	(KERNEL_DS)
26 #define get_fs()        (current_thread_info()->addr_limit)
27 #define set_fs(x)       (current_thread_info()->addr_limit = (x))
28 
29 #define segment_eq(a, b)	((a).seg == (b).seg)
30 
__access_ok(unsigned long addr,unsigned long size)31 static inline int __access_ok(unsigned long addr, unsigned long size)
32 {
33 	/*
34 	 * Allow access to the user mapped memory area, but not the system area
35 	 * before it. The check extends to the top of the address space when
36 	 * kernel access is allowed (there's no real reason to user copy to the
37 	 * system area in any case).
38 	 */
39 	if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
40 		   size <= get_fs().seg - addr))
41 		return true;
42 	/*
43 	 * Explicitly allow NULL pointers here. Parts of the kernel such
44 	 * as readv/writev use access_ok to validate pointers, but want
45 	 * to allow NULL pointers for various reasons. NULL pointers are
46 	 * safe to allow through because the first page is not mappable on
47 	 * Meta.
48 	 */
49 	if (!addr)
50 		return true;
51 	/* Allow access to core code memory area... */
52 	if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
53 	    size <= LINCORE_CODE_LIMIT + 1 - addr)
54 		return true;
55 	/* ... but no other areas. */
56 	return false;
57 }
58 
59 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr),	\
60 						(unsigned long)(size))
61 
verify_area(int type,const void * addr,unsigned long size)62 static inline int verify_area(int type, const void *addr, unsigned long size)
63 {
64 	return access_ok(type, addr, size) ? 0 : -EFAULT;
65 }
66 
67 /*
68  * The exception table consists of pairs of addresses: the first is the
69  * address of an instruction that is allowed to fault, and the second is
70  * the address at which the program should continue.  No registers are
71  * modified, so it is entirely up to the continuation code to figure out
72  * what to do.
73  *
74  * All the routines below use bits of fixup code that are out of line
75  * with the main instruction path.  This means when everything is well,
76  * we don't even have to jump over them.  Further, they do not intrude
77  * on our cache or tlb entries.
78  */
79 struct exception_table_entry {
80 	unsigned long insn, fixup;
81 };
82 
83 extern int fixup_exception(struct pt_regs *regs);
84 
85 /*
86  * These are the main single-value transfer routines.  They automatically
87  * use the right size if we just have the right pointer type.
88  */
89 
90 #define put_user(x, ptr) \
91 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
92 #define __put_user(x, ptr) \
93 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
94 
95 extern void __put_user_bad(void);
96 
97 #define __put_user_nocheck(x, ptr, size)		\
98 ({                                                      \
99 	long __pu_err;                                  \
100 	__put_user_size((x), (ptr), (size), __pu_err);	\
101 	__pu_err;                                       \
102 })
103 
104 #define __put_user_check(x, ptr, size)				\
105 ({                                                              \
106 	long __pu_err = -EFAULT;                                \
107 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);           \
108 	if (access_ok(VERIFY_WRITE, __pu_addr, size))		\
109 		__put_user_size((x), __pu_addr, (size), __pu_err);	\
110 	__pu_err;                                               \
111 })
112 
113 extern long __put_user_asm_b(unsigned int x, void __user *addr);
114 extern long __put_user_asm_w(unsigned int x, void __user *addr);
115 extern long __put_user_asm_d(unsigned int x, void __user *addr);
116 extern long __put_user_asm_l(unsigned long long x, void __user *addr);
117 
118 #define __put_user_size(x, ptr, size, retval)				\
119 do {                                                                    \
120 	retval = 0;                                                     \
121 	switch (size) {                                                 \
122 	case 1:								\
123 		retval = __put_user_asm_b((__force unsigned int)x, ptr);\
124 		break;							\
125 	case 2:								\
126 		retval = __put_user_asm_w((__force unsigned int)x, ptr);\
127 		break;							\
128 	case 4:								\
129 		retval = __put_user_asm_d((__force unsigned int)x, ptr);\
130 		break;							\
131 	case 8:								\
132 		retval = __put_user_asm_l((__force unsigned long long)x,\
133 					  ptr);				\
134 		break;							\
135 	default:							\
136 		__put_user_bad();					\
137 	}								\
138 } while (0)
139 
140 #define get_user(x, ptr) \
141 	__get_user_check((x), (ptr), sizeof(*(ptr)))
142 #define __get_user(x, ptr) \
143 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)))
144 
145 extern long __get_user_bad(void);
146 
147 #define __get_user_nocheck(x, ptr, size)			\
148 ({                                                              \
149 	long __gu_err, __gu_val;                                \
150 	__get_user_size(__gu_val, (ptr), (size), __gu_err);	\
151 	(x) = (__force __typeof__(*(ptr)))__gu_val;             \
152 	__gu_err;                                               \
153 })
154 
155 #define __get_user_check(x, ptr, size)					\
156 ({                                                                      \
157 	long __gu_err = -EFAULT, __gu_val = 0;                          \
158 	const __typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
159 	if (access_ok(VERIFY_READ, __gu_addr, size))			\
160 		__get_user_size(__gu_val, __gu_addr, (size), __gu_err);	\
161 	(x) = (__force __typeof__(*(ptr)))__gu_val;                     \
162 	__gu_err;                                                       \
163 })
164 
165 extern unsigned char __get_user_asm_b(const void __user *addr, long *err);
166 extern unsigned short __get_user_asm_w(const void __user *addr, long *err);
167 extern unsigned int __get_user_asm_d(const void __user *addr, long *err);
168 
169 #define __get_user_size(x, ptr, size, retval)			\
170 do {                                                            \
171 	retval = 0;                                             \
172 	switch (size) {                                         \
173 	case 1:							\
174 		x = __get_user_asm_b(ptr, &retval); break;	\
175 	case 2:							\
176 		x = __get_user_asm_w(ptr, &retval); break;	\
177 	case 4:							\
178 		x = __get_user_asm_d(ptr, &retval); break;	\
179 	default:						\
180 		(x) = __get_user_bad();				\
181 	}                                                       \
182 } while (0)
183 
184 /*
185  * Copy a null terminated string from userspace.
186  *
187  * Must return:
188  * -EFAULT		for an exception
189  * count		if we hit the buffer limit
190  * bytes copied		if we hit a null byte
191  * (without the null byte)
192  */
193 
194 extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
195 					     long count);
196 
197 static inline long
strncpy_from_user(char * dst,const char __user * src,long count)198 strncpy_from_user(char *dst, const char __user *src, long count)
199 {
200 	if (!access_ok(VERIFY_READ, src, 1))
201 		return -EFAULT;
202 	return __strncpy_from_user(dst, src, count);
203 }
204 /*
205  * Return the size of a string (including the ending 0)
206  *
207  * Return 0 on exception, a value greater than N if too long
208  */
209 extern long __must_check strnlen_user(const char __user *src, long count);
210 
211 #define strlen_user(str) strnlen_user(str, 32767)
212 
213 extern unsigned long raw_copy_from_user(void *to, const void __user *from,
214 					unsigned long n);
215 
216 static inline unsigned long
copy_from_user(void * to,const void __user * from,unsigned long n)217 copy_from_user(void *to, const void __user *from, unsigned long n)
218 {
219 	unsigned long res = n;
220 	if (likely(access_ok(VERIFY_READ, from, n)))
221 		res = raw_copy_from_user(to, from, n);
222 	if (unlikely(res))
223 		memset(to + (n - res), 0, res);
224 	return res;
225 }
226 
227 #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
228 #define __copy_from_user_inatomic __copy_from_user
229 
230 extern unsigned long __must_check __copy_user(void __user *to,
231 					      const void *from,
232 					      unsigned long n);
233 
copy_to_user(void __user * to,const void * from,unsigned long n)234 static inline unsigned long copy_to_user(void __user *to, const void *from,
235 					 unsigned long n)
236 {
237 	if (access_ok(VERIFY_WRITE, to, n))
238 		return __copy_user(to, from, n);
239 	return n;
240 }
241 
242 #define __copy_to_user(to, from, n) __copy_user(to, from, n)
243 #define __copy_to_user_inatomic __copy_to_user
244 
245 /*
246  * Zero Userspace
247  */
248 
249 extern unsigned long __must_check __do_clear_user(void __user *to,
250 						  unsigned long n);
251 
clear_user(void __user * to,unsigned long n)252 static inline unsigned long clear_user(void __user *to, unsigned long n)
253 {
254 	if (access_ok(VERIFY_WRITE, to, n))
255 		return __do_clear_user(to, n);
256 	return n;
257 }
258 
259 #define __clear_user(to, n)            __do_clear_user(to, n)
260 
261 #endif /* _METAG_UACCESS_H */
262