• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #ifndef _ASMANDES_UACCESS_H
5 #define _ASMANDES_UACCESS_H
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/sched.h>
11 #include <asm/errno.h>
12 #include <asm/memory.h>
13 #include <asm/types.h>
14 #include <linux/mm.h>
15 
16 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
17 
18 /*
19  * The exception table consists of pairs of addresses: the first is the
20  * address of an instruction that is allowed to fault, and the second is
21  * the address at which the program should continue.  No registers are
22  * modified, so it is entirely up to the continuation code to figure out
23  * what to do.
24  *
25  * All the routines below use bits of fixup code that are out of line
26  * with the main instruction path.  This means when everything is well,
27  * we don't even have to jump over them.  Further, they do not intrude
28  * on our cache or tlb entries.
29  */
30 
31 struct exception_table_entry {
32 	unsigned long insn, fixup;
33 };
34 
35 extern int fixup_exception(struct pt_regs *regs);
36 
37 #define KERNEL_DS 	((mm_segment_t) { ~0UL })
38 #define USER_DS		((mm_segment_t) {TASK_SIZE - 1})
39 
40 #define get_fs()	(current_thread_info()->addr_limit)
41 #define user_addr_max	get_fs
42 
set_fs(mm_segment_t fs)43 static inline void set_fs(mm_segment_t fs)
44 {
45 	current_thread_info()->addr_limit = fs;
46 }
47 
48 #define segment_eq(a, b)	((a) == (b))
49 
50 #define __range_ok(addr, size) (size <= get_fs() && addr <= (get_fs() -size))
51 
52 #define access_ok(addr, size)	\
53 	__range_ok((unsigned long)addr, (unsigned long)size)
54 /*
55  * Single-value transfer routines.  They automatically use the right
56  * size if we just have the right pointer type.  Note that the functions
57  * which read from user space (*get_*) need to take care not to leak
58  * kernel data even if the calling code is buggy and fails to check
59  * the return value.  This means zeroing out the destination variable
60  * or buffer on error.  Normally this is done out of line by the
61  * fixup code, but there are a few places where it intrudes on the
62  * main code path.  When we only write to user space, there is no
63  * problem.
64  *
65  * The "__xxx" versions of the user access functions do not verify the
66  * address space - it must have been done previously with a separate
67  * "access_ok()" call.
68  *
69  * The "xxx_error" versions set the third argument to EFAULT if an
70  * error occurs, and leave it unchanged on success.  Note that these
71  * versions are void (ie, don't return a value as such).
72  */
73 
74 #define get_user	__get_user					\
75 
76 #define __get_user(x, ptr)						\
77 ({									\
78 	long __gu_err = 0;						\
79 	__get_user_check((x), (ptr), __gu_err);				\
80 	__gu_err;							\
81 })
82 
83 #define __get_user_error(x, ptr, err)					\
84 ({									\
85 	__get_user_check((x), (ptr), (err));				\
86 	(void)0;							\
87 })
88 
89 #define __get_user_check(x, ptr, err)					\
90 ({									\
91 	const __typeof__(*(ptr)) __user *__p = (ptr);			\
92 	might_fault();							\
93 	if (access_ok(__p, sizeof(*__p))) {		\
94 		__get_user_err((x), __p, (err));			\
95 	} else {							\
96 		(x) = 0; (err) = -EFAULT;				\
97 	}								\
98 })
99 
100 #define __get_user_err(x, ptr, err)					\
101 do {									\
102 	unsigned long __gu_val;						\
103 	__chk_user_ptr(ptr);						\
104 	switch (sizeof(*(ptr))) {					\
105 	case 1:								\
106 		__get_user_asm("lbi", __gu_val, (ptr), (err));		\
107 		break;							\
108 	case 2:								\
109 		__get_user_asm("lhi", __gu_val, (ptr), (err));		\
110 		break;							\
111 	case 4:								\
112 		__get_user_asm("lwi", __gu_val, (ptr), (err));		\
113 		break;							\
114 	case 8:								\
115 		__get_user_asm_dword(__gu_val, (ptr), (err));		\
116 		break;							\
117 	default:							\
118 		BUILD_BUG(); 						\
119 		break;							\
120 	}								\
121 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
122 } while (0)
123 
124 #define __get_user_asm(inst, x, addr, err)				\
125 	__asm__ __volatile__ (						\
126 		"1:	"inst"	%1,[%2]\n"				\
127 		"2:\n"							\
128 		"	.section .fixup,\"ax\"\n"			\
129 		"	.align	2\n"					\
130 		"3:	move %0, %3\n"					\
131 		"	move %1, #0\n"					\
132 		"	b	2b\n"					\
133 		"	.previous\n"					\
134 		"	.section __ex_table,\"a\"\n"			\
135 		"	.align	3\n"					\
136 		"	.long	1b, 3b\n"				\
137 		"	.previous"					\
138 		: "+r" (err), "=&r" (x)					\
139 		: "r" (addr), "i" (-EFAULT)				\
140 		: "cc")
141 
142 #ifdef __NDS32_EB__
143 #define __gu_reg_oper0 "%H1"
144 #define __gu_reg_oper1 "%L1"
145 #else
146 #define __gu_reg_oper0 "%L1"
147 #define __gu_reg_oper1 "%H1"
148 #endif
149 
150 #define __get_user_asm_dword(x, addr, err) 				\
151 	__asm__ __volatile__ (						\
152 		"\n1:\tlwi " __gu_reg_oper0 ",[%2]\n"			\
153 		"\n2:\tlwi " __gu_reg_oper1 ",[%2+4]\n"			\
154 		"3:\n"							\
155 		"	.section .fixup,\"ax\"\n"			\
156 		"	.align	2\n"					\
157 		"4:	move	%0, %3\n"				\
158 		"	b	3b\n"					\
159 		"	.previous\n"					\
160 		"	.section __ex_table,\"a\"\n"			\
161 		"	.align	3\n"					\
162 		"	.long	1b, 4b\n"				\
163 		"	.long	2b, 4b\n"				\
164 		"	.previous"					\
165 		: "+r"(err), "=&r"(x)					\
166 		: "r"(addr), "i"(-EFAULT)				\
167 		: "cc")
168 
169 #define put_user	__put_user					\
170 
171 #define __put_user(x, ptr)						\
172 ({									\
173 	long __pu_err = 0;						\
174 	__put_user_err((x), (ptr), __pu_err);				\
175 	__pu_err;							\
176 })
177 
178 #define __put_user_error(x, ptr, err)					\
179 ({									\
180 	__put_user_err((x), (ptr), (err));				\
181 	(void)0;							\
182 })
183 
184 #define __put_user_check(x, ptr, err)					\
185 ({									\
186 	__typeof__(*(ptr)) __user *__p = (ptr);				\
187 	might_fault();							\
188 	if (access_ok(__p, sizeof(*__p))) {		\
189 		__put_user_err((x), __p, (err));			\
190 	} else	{							\
191 		(err) = -EFAULT;					\
192 	}								\
193 })
194 
195 #define __put_user_err(x, ptr, err)					\
196 do {									\
197 	__typeof__(*(ptr)) __pu_val = (x);				\
198 	__chk_user_ptr(ptr);						\
199 	switch (sizeof(*(ptr))) {					\
200 	case 1:								\
201 		__put_user_asm("sbi", __pu_val, (ptr), (err));		\
202 		break;							\
203 	case 2: 							\
204 		__put_user_asm("shi", __pu_val, (ptr), (err));		\
205 		break;							\
206 	case 4: 							\
207 		__put_user_asm("swi", __pu_val, (ptr), (err));		\
208 		break;							\
209 	case 8:								\
210 		__put_user_asm_dword(__pu_val, (ptr), (err));		\
211 		break;							\
212 	default:							\
213 		BUILD_BUG(); 						\
214 		break;							\
215 	}								\
216 } while (0)
217 
218 #define __put_user_asm(inst, x, addr, err)				\
219 	__asm__ __volatile__ (						\
220 		"1:	"inst"	%1,[%2]\n"				\
221 		"2:\n"							\
222 		"	.section .fixup,\"ax\"\n"			\
223 		"	.align	2\n"					\
224 		"3:	move	%0, %3\n"				\
225 		"	b	2b\n"					\
226 		"	.previous\n"					\
227 		"	.section __ex_table,\"a\"\n"			\
228 		"	.align	3\n"					\
229 		"	.long	1b, 3b\n"				\
230 		"	.previous"					\
231 		: "+r" (err)						\
232 		: "r" (x), "r" (addr), "i" (-EFAULT)			\
233 		: "cc")
234 
235 #ifdef __NDS32_EB__
236 #define __pu_reg_oper0 "%H2"
237 #define __pu_reg_oper1 "%L2"
238 #else
239 #define __pu_reg_oper0 "%L2"
240 #define __pu_reg_oper1 "%H2"
241 #endif
242 
243 #define __put_user_asm_dword(x, addr, err) 				\
244 	__asm__ __volatile__ (						\
245 		"\n1:\tswi " __pu_reg_oper0 ",[%1]\n"			\
246 		"\n2:\tswi " __pu_reg_oper1 ",[%1+4]\n"			\
247 		"3:\n"							\
248 		"	.section .fixup,\"ax\"\n"			\
249 		"	.align	2\n"					\
250 		"4:	move	%0, %3\n"				\
251 		"	b	3b\n"					\
252 		"	.previous\n"					\
253 		"	.section __ex_table,\"a\"\n"			\
254 		"	.align	3\n"					\
255 		"	.long	1b, 4b\n"				\
256 		"	.long	2b, 4b\n"				\
257 		"	.previous"					\
258 		: "+r"(err)						\
259 		: "r"(addr), "r"(x), "i"(-EFAULT)			\
260 		: "cc")
261 
262 extern unsigned long __arch_clear_user(void __user * addr, unsigned long n);
263 extern long strncpy_from_user(char *dest, const char __user * src, long count);
264 extern __must_check long strlen_user(const char __user * str);
265 extern __must_check long strnlen_user(const char __user * str, long n);
266 extern unsigned long __arch_copy_from_user(void *to, const void __user * from,
267                                            unsigned long n);
268 extern unsigned long __arch_copy_to_user(void __user * to, const void *from,
269                                          unsigned long n);
270 
271 #define raw_copy_from_user __arch_copy_from_user
272 #define raw_copy_to_user __arch_copy_to_user
273 
274 #define INLINE_COPY_FROM_USER
275 #define INLINE_COPY_TO_USER
clear_user(void __user * to,unsigned long n)276 static inline unsigned long clear_user(void __user * to, unsigned long n)
277 {
278 	if (access_ok(to, n))
279 		n = __arch_clear_user(to, n);
280 	return n;
281 }
282 
__clear_user(void __user * to,unsigned long n)283 static inline unsigned long __clear_user(void __user * to, unsigned long n)
284 {
285 	return __arch_clear_user(to, n);
286 }
287 
288 #endif /* _ASMNDS32_UACCESS_H */
289