• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #ifndef _ASMANDES_UACCESS_H
5 #define _ASMANDES_UACCESS_H
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/sched.h>
11 #include <asm/errno.h>
12 #include <asm/memory.h>
13 #include <asm/types.h>
14 
15 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
16 
17 /*
18  * The exception table consists of pairs of addresses: the first is the
19  * address of an instruction that is allowed to fault, and the second is
20  * the address at which the program should continue.  No registers are
21  * modified, so it is entirely up to the continuation code to figure out
22  * what to do.
23  *
24  * All the routines below use bits of fixup code that are out of line
25  * with the main instruction path.  This means when everything is well,
26  * we don't even have to jump over them.  Further, they do not intrude
27  * on our cache or tlb entries.
28  */
29 
30 struct exception_table_entry {
31 	unsigned long insn, fixup;
32 };
33 
34 extern int fixup_exception(struct pt_regs *regs);
35 
36 #define KERNEL_DS 	((mm_segment_t) { ~0UL })
37 #define USER_DS		((mm_segment_t) {TASK_SIZE - 1})
38 
39 #define get_fs()	(current_thread_info()->addr_limit)
40 #define user_addr_max	get_fs
41 
set_fs(mm_segment_t fs)42 static inline void set_fs(mm_segment_t fs)
43 {
44 	current_thread_info()->addr_limit = fs;
45 }
46 
47 #define uaccess_kernel()	(get_fs() == KERNEL_DS)
48 
49 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
50 
51 #define access_ok(addr, size)	\
52 	__range_ok((unsigned long)addr, (unsigned long)size)
53 /*
54  * Single-value transfer routines.  They automatically use the right
55  * size if we just have the right pointer type.  Note that the functions
56  * which read from user space (*get_*) need to take care not to leak
57  * kernel data even if the calling code is buggy and fails to check
58  * the return value.  This means zeroing out the destination variable
59  * or buffer on error.  Normally this is done out of line by the
60  * fixup code, but there are a few places where it intrudes on the
61  * main code path.  When we only write to user space, there is no
62  * problem.
63  *
64  * The "__xxx" versions of the user access functions do not verify the
65  * address space - it must have been done previously with a separate
66  * "access_ok()" call.
67  *
68  * The "xxx_error" versions set the third argument to EFAULT if an
69  * error occurs, and leave it unchanged on success.  Note that these
70  * versions are void (ie, don't return a value as such).
71  */
72 
73 #define get_user(x, ptr)						\
74 ({									\
75 	long __gu_err = 0;						\
76 	__get_user_check((x), (ptr), __gu_err);				\
77 	__gu_err;							\
78 })
79 
80 #define __get_user_error(x, ptr, err)					\
81 ({									\
82 	__get_user_check((x), (ptr), (err));				\
83 	(void)0;							\
84 })
85 
86 #define __get_user(x, ptr)						\
87 ({									\
88 	long __gu_err = 0;						\
89 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
90 	__get_user_err((x), __p, (__gu_err));				\
91 	__gu_err;							\
92 })
93 
94 #define __get_user_check(x, ptr, err)					\
95 ({									\
96 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
97 	might_fault();							\
98 	if (access_ok(__p, sizeof(*__p))) {		\
99 		__get_user_err((x), __p, (err));			\
100 	} else {							\
101 		(x) = 0; (err) = -EFAULT;				\
102 	}								\
103 })
104 
105 #define __get_user_err(x, ptr, err)					\
106 do {									\
107 	unsigned long __gu_val;						\
108 	__chk_user_ptr(ptr);						\
109 	switch (sizeof(*(ptr))) {					\
110 	case 1:								\
111 		__get_user_asm("lbi", __gu_val, (ptr), (err));		\
112 		break;							\
113 	case 2:								\
114 		__get_user_asm("lhi", __gu_val, (ptr), (err));		\
115 		break;							\
116 	case 4:								\
117 		__get_user_asm("lwi", __gu_val, (ptr), (err));		\
118 		break;							\
119 	case 8:								\
120 		__get_user_asm_dword(__gu_val, (ptr), (err));		\
121 		break;							\
122 	default:							\
123 		BUILD_BUG(); 						\
124 		break;							\
125 	}								\
126 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
127 } while (0)
128 
129 #define __get_user_asm(inst, x, addr, err)				\
130 	__asm__ __volatile__ (						\
131 		"1:	"inst"	%1,[%2]\n"				\
132 		"2:\n"							\
133 		"	.section .fixup,\"ax\"\n"			\
134 		"	.align	2\n"					\
135 		"3:	move %0, %3\n"					\
136 		"	move %1, #0\n"					\
137 		"	b	2b\n"					\
138 		"	.previous\n"					\
139 		"	.section __ex_table,\"a\"\n"			\
140 		"	.align	3\n"					\
141 		"	.long	1b, 3b\n"				\
142 		"	.previous"					\
143 		: "+r" (err), "=&r" (x)					\
144 		: "r" (addr), "i" (-EFAULT)				\
145 		: "cc")
146 
147 #ifdef __NDS32_EB__
148 #define __gu_reg_oper0 "%H1"
149 #define __gu_reg_oper1 "%L1"
150 #else
151 #define __gu_reg_oper0 "%L1"
152 #define __gu_reg_oper1 "%H1"
153 #endif
154 
155 #define __get_user_asm_dword(x, addr, err) 				\
156 	__asm__ __volatile__ (						\
157 		"\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"			\
158 		"\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"			\
159 		"3:\n"							\
160 		"	.section .fixup,\"ax\"\n"			\
161 		"	.align	2\n"					\
162 		"4:	move	%0, %3\n"				\
163 		"	b	3b\n"					\
164 		"	.previous\n"					\
165 		"	.section __ex_table,\"a\"\n"			\
166 		"	.align	3\n"					\
167 		"	.long	1b, 4b\n"				\
168 		"	.long	2b, 4b\n"				\
169 		"	.previous"					\
170 		: "+r"(err), "=&r"(x)					\
171 		: "r"(addr), "i"(-EFAULT)				\
172 		: "cc")
173 
174 #define put_user(x, ptr)						\
175 ({									\
176 	long __pu_err = 0;						\
177 	__put_user_check((x), (ptr), __pu_err);				\
178 	__pu_err;							\
179 })
180 
181 #define __put_user(x, ptr)						\
182 ({									\
183 	long __pu_err = 0;						\
184 	__typeof__(*(ptr)) __user *__p = (ptr);				\
185 	__put_user_err((x), __p, __pu_err);				\
186 	__pu_err;							\
187 })
188 
189 #define __put_user_error(x, ptr, err)					\
190 ({									\
191 	__put_user_err((x), (ptr), (err));				\
192 	(void)0;							\
193 })
194 
195 #define __put_user_check(x, ptr, err)					\
196 ({									\
197 	__typeof__(*(ptr)) __user *__p = (ptr);				\
198 	might_fault();							\
199 	if (access_ok(__p, sizeof(*__p))) {		\
200 		__put_user_err((x), __p, (err));			\
201 	} else	{							\
202 		(err) = -EFAULT;					\
203 	}								\
204 })
205 
206 #define __put_user_err(x, ptr, err)					\
207 do {									\
208 	__typeof__(*(ptr)) __pu_val = (x);				\
209 	__chk_user_ptr(ptr);						\
210 	switch (sizeof(*(ptr))) {					\
211 	case 1:								\
212 		__put_user_asm("sbi", __pu_val, (ptr), (err));		\
213 		break;							\
214 	case 2: 							\
215 		__put_user_asm("shi", __pu_val, (ptr), (err));		\
216 		break;							\
217 	case 4: 							\
218 		__put_user_asm("swi", __pu_val, (ptr), (err));		\
219 		break;							\
220 	case 8:								\
221 		__put_user_asm_dword(__pu_val, (ptr), (err));		\
222 		break;							\
223 	default:							\
224 		BUILD_BUG(); 						\
225 		break;							\
226 	}								\
227 } while (0)
228 
229 #define __put_user_asm(inst, x, addr, err)				\
230 	__asm__ __volatile__ (						\
231 		"1:	"inst"	%1,[%2]\n"				\
232 		"2:\n"							\
233 		"	.section .fixup,\"ax\"\n"			\
234 		"	.align	2\n"					\
235 		"3:	move	%0, %3\n"				\
236 		"	b	2b\n"					\
237 		"	.previous\n"					\
238 		"	.section __ex_table,\"a\"\n"			\
239 		"	.align	3\n"					\
240 		"	.long	1b, 3b\n"				\
241 		"	.previous"					\
242 		: "+r" (err)						\
243 		: "r" (x), "r" (addr), "i" (-EFAULT)			\
244 		: "cc")
245 
246 #ifdef __NDS32_EB__
247 #define __pu_reg_oper0 "%H2"
248 #define __pu_reg_oper1 "%L2"
249 #else
250 #define __pu_reg_oper0 "%L2"
251 #define __pu_reg_oper1 "%H2"
252 #endif
253 
254 #define __put_user_asm_dword(x, addr, err) 				\
255 	__asm__ __volatile__ (						\
256 		"\n1:\tswi " __pu_reg_oper0 ",[%1]\n"			\
257 		"\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"			\
258 		"3:\n"							\
259 		"	.section .fixup,\"ax\"\n"			\
260 		"	.align	2\n"					\
261 		"4:	move	%0, %3\n"				\
262 		"	b	3b\n"					\
263 		"	.previous\n"					\
264 		"	.section __ex_table,\"a\"\n"			\
265 		"	.align	3\n"					\
266 		"	.long	1b, 4b\n"				\
267 		"	.long	2b, 4b\n"				\
268 		"	.previous"					\
269 		: "+r"(err)						\
270 		: "r"(addr), "r"(x), "i"(-EFAULT)			\
271 		: "cc")
272 
273 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
274 extern long strncpy_from_user(char *dest, const char __user * src, long count);
275 extern __must_check long strnlen_user(const char __user * str, long n);
276 extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
277                                            unsigned long n);
278 extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
279                                          unsigned long n);
280 
281 #define raw_copy_from_user __arch_copy_from_user
282 #define raw_copy_to_user __arch_copy_to_user
283 
284 #define INLINE_COPY_FROM_USER
285 #define INLINE_COPY_TO_USER
clear_user(void __user * to,unsigned long n)286 static inline unsigned long clear_user(void __user * to, unsigned long n)
287 {
288 	if (access_ok(to, n))
289 		n = __arch_clear_user(to, n);
290 	return n;
291 }
292 
__clear_user(void __user * to,unsigned long n)293 static inline unsigned long __clear_user(void __user * to, unsigned long n)
294 {
295 	return __arch_clear_user(to, n);
296 }
297 
298 #endif /* _ASMNDS32_UACCESS_H */
299