• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __M68K_UACCESS_H
3 #define __M68K_UACCESS_H
4 
5 #ifdef CONFIG_MMU
6 
7 /*
8  * User space memory access functions
9  */
10 #include <linux/compiler.h>
11 #include <linux/types.h>
12 #include <asm/extable.h>
13 
14 /* We let the MMU do all checking */
access_ok(const void __user * ptr,unsigned long size)15 static inline int access_ok(const void __user *ptr,
16 			    unsigned long size)
17 {
18 	unsigned long limit = TASK_SIZE;
19 	unsigned long addr = (unsigned long)ptr;
20 
21 	if (IS_ENABLED(CONFIG_CPU_HAS_ADDRESS_SPACES) ||
22 	    !IS_ENABLED(CONFIG_MMU))
23 		return 1;
24 
25 	return (size <= limit) && (addr <= (limit - size));
26 }
27 
28 /*
29  * Not all varients of the 68k family support the notion of address spaces.
30  * The traditional 680x0 parts do, and they use the sfc/dfc registers and
31  * the "moves" instruction to access user space from kernel space. Other
32  * family members like ColdFire don't support this, and only have a single
33  * address space, and use the usual "move" instruction for user space access.
34  *
35  * Outside of this difference the user space access functions are the same.
36  * So lets keep the code simple and just define in what we need to use.
37  */
38 #ifdef CONFIG_CPU_HAS_ADDRESS_SPACES
39 #define	MOVES	"moves"
40 #else
41 #define	MOVES	"move"
42 #endif
43 
44 #define __put_user_asm(inst, res, x, ptr, bwl, reg, err) \
45 asm volatile ("\n"					\
46 	"1:	"inst"."#bwl"	%2,%1\n"		\
47 	"2:\n"						\
48 	"	.section .fixup,\"ax\"\n"		\
49 	"	.even\n"				\
50 	"10:	moveq.l	%3,%0\n"			\
51 	"	jra 2b\n"				\
52 	"	.previous\n"				\
53 	"\n"						\
54 	"	.section __ex_table,\"a\"\n"		\
55 	"	.align	4\n"				\
56 	"	.long	1b,10b\n"			\
57 	"	.long	2b,10b\n"			\
58 	"	.previous"				\
59 	: "+d" (res), "=m" (*(ptr))			\
60 	: #reg (x), "i" (err))
61 
62 #define __put_user_asm8(inst, res, x, ptr)			\
63 do {								\
64 	const void *__pu_ptr = (const void __force *)(ptr);	\
65 								\
66 	asm volatile ("\n"					\
67 		"1:	"inst".l %2,(%1)+\n"			\
68 		"2:	"inst".l %R2,(%1)\n"			\
69 		"3:\n"						\
70 		"	.section .fixup,\"ax\"\n"		\
71 		"	.even\n"				\
72 		"10:	movel %3,%0\n"				\
73 		"	jra 3b\n"				\
74 		"	.previous\n"				\
75 		"\n"						\
76 		"	.section __ex_table,\"a\"\n"		\
77 		"	.align 4\n"				\
78 		"	.long 1b,10b\n"				\
79 		"	.long 2b,10b\n"				\
80 		"	.long 3b,10b\n"				\
81 		"	.previous"				\
82 		: "+d" (res), "+a" (__pu_ptr)			\
83 		: "r" (x), "i" (-EFAULT)			\
84 		: "memory");					\
85 } while (0)
86 
87 /*
88  * These are the main single-value transfer routines.  They automatically
89  * use the right size if we just have the right pointer type.
90  */
91 
92 #define __put_user(x, ptr)						\
93 ({									\
94 	typeof(*(ptr)) __pu_val = (x);					\
95 	int __pu_err = 0;						\
96 	__chk_user_ptr(ptr);						\
97 	switch (sizeof (*(ptr))) {					\
98 	case 1:								\
99 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, b, d, -EFAULT); \
100 		break;							\
101 	case 2:								\
102 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, w, r, -EFAULT); \
103 		break;							\
104 	case 4:								\
105 		__put_user_asm(MOVES, __pu_err, __pu_val, ptr, l, r, -EFAULT); \
106 		break;							\
107 	case 8:								\
108 		__put_user_asm8(MOVES, __pu_err, __pu_val, ptr);	\
109 		break;							\
110 	default:							\
111 		BUILD_BUG();						\
112 	}								\
113 	__pu_err;							\
114 })
115 #define put_user(x, ptr)	__put_user(x, ptr)
116 
117 
118 #define __get_user_asm(inst, res, x, ptr, type, bwl, reg, err) ({	\
119 	type __gu_val;							\
120 	asm volatile ("\n"						\
121 		"1:	"inst"."#bwl"	%2,%1\n"			\
122 		"2:\n"							\
123 		"	.section .fixup,\"ax\"\n"			\
124 		"	.even\n"					\
125 		"10:	move.l	%3,%0\n"				\
126 		"	sub.l	%1,%1\n"				\
127 		"	jra	2b\n"					\
128 		"	.previous\n"					\
129 		"\n"							\
130 		"	.section __ex_table,\"a\"\n"			\
131 		"	.align	4\n"					\
132 		"	.long	1b,10b\n"				\
133 		"	.previous"					\
134 		: "+d" (res), "=&" #reg (__gu_val)			\
135 		: "m" (*(ptr)), "i" (err));				\
136 	(x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val;	\
137 })
138 
139 #define __get_user_asm8(inst, res, x, ptr) 				\
140 do {									\
141 	const void *__gu_ptr = (const void __force *)(ptr);		\
142 	union {								\
143 		u64 l;							\
144 		__typeof__(*(ptr)) t;					\
145 	} __gu_val;							\
146 									\
147 	asm volatile ("\n"						\
148 		"1:	"inst".l (%2)+,%1\n"				\
149 		"2:	"inst".l (%2),%R1\n"				\
150 		"3:\n"							\
151 		"	.section .fixup,\"ax\"\n"			\
152 		"	.even\n"					\
153 		"10:	move.l	%3,%0\n"				\
154 		"	sub.l	%1,%1\n"				\
155 		"	sub.l	%R1,%R1\n"				\
156 		"	jra	3b\n"					\
157 		"	.previous\n"					\
158 		"\n"							\
159 		"	.section __ex_table,\"a\"\n"			\
160 		"	.align	4\n"					\
161 		"	.long	1b,10b\n"				\
162 		"	.long	2b,10b\n"				\
163 		"	.previous"					\
164 		: "+d" (res), "=&r" (__gu_val.l),			\
165 		  "+a" (__gu_ptr)					\
166 		: "i" (-EFAULT)						\
167 		: "memory");						\
168 	(x) = __gu_val.t;						\
169 } while (0)
170 
171 #define __get_user(x, ptr)						\
172 ({									\
173 	int __gu_err = 0;						\
174 	__chk_user_ptr(ptr);						\
175 	switch (sizeof(*(ptr))) {					\
176 	case 1:								\
177 		__get_user_asm(MOVES, __gu_err, x, ptr, u8, b, d, -EFAULT); \
178 		break;							\
179 	case 2:								\
180 		__get_user_asm(MOVES, __gu_err, x, ptr, u16, w, r, -EFAULT); \
181 		break;							\
182 	case 4:								\
183 		__get_user_asm(MOVES, __gu_err, x, ptr, u32, l, r, -EFAULT); \
184 		break;							\
185 	case 8:								\
186 		__get_user_asm8(MOVES, __gu_err, x, ptr);		\
187 		break;							\
188 	default:							\
189 		BUILD_BUG();						\
190 	}								\
191 	__gu_err;							\
192 })
193 #define get_user(x, ptr) __get_user(x, ptr)
194 
195 unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n);
196 unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n);
197 
198 #define __suffix0
199 #define __suffix1 b
200 #define __suffix2 w
201 #define __suffix4 l
202 
203 #define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
204 	asm volatile ("\n"						\
205 		"1:	"MOVES"."#s1"	(%2)+,%3\n"			\
206 		"	move."#s1"	%3,(%1)+\n"			\
207 		"	.ifnc	\""#s2"\",\"\"\n"			\
208 		"2:	"MOVES"."#s2"	(%2)+,%3\n"			\
209 		"	move."#s2"	%3,(%1)+\n"			\
210 		"	.ifnc	\""#s3"\",\"\"\n"			\
211 		"3:	"MOVES"."#s3"	(%2)+,%3\n"			\
212 		"	move."#s3"	%3,(%1)+\n"			\
213 		"	.endif\n"					\
214 		"	.endif\n"					\
215 		"4:\n"							\
216 		"	.section __ex_table,\"a\"\n"			\
217 		"	.align	4\n"					\
218 		"	.long	1b,10f\n"				\
219 		"	.ifnc	\""#s2"\",\"\"\n"			\
220 		"	.long	2b,20f\n"				\
221 		"	.ifnc	\""#s3"\",\"\"\n"			\
222 		"	.long	3b,30f\n"				\
223 		"	.endif\n"					\
224 		"	.endif\n"					\
225 		"	.previous\n"					\
226 		"\n"							\
227 		"	.section .fixup,\"ax\"\n"			\
228 		"	.even\n"					\
229 		"10:	addq.l #"#n1",%0\n"				\
230 		"	.ifnc	\""#s2"\",\"\"\n"			\
231 		"20:	addq.l #"#n2",%0\n"				\
232 		"	.ifnc	\""#s3"\",\"\"\n"			\
233 		"30:	addq.l #"#n3",%0\n"				\
234 		"	.endif\n"					\
235 		"	.endif\n"					\
236 		"	jra	4b\n"					\
237 		"	.previous\n"					\
238 		: "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp)	\
239 		: : "memory")
240 
241 #define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\
242 	____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)
243 #define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3)	\
244 	___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3,  \
245 					__suffix##n1, __suffix##n2, __suffix##n3)
246 
247 static __always_inline unsigned long
__constant_copy_from_user(void * to,const void __user * from,unsigned long n)248 __constant_copy_from_user(void *to, const void __user *from, unsigned long n)
249 {
250 	unsigned long res = 0, tmp;
251 
252 	switch (n) {
253 	case 1:
254 		__constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0);
255 		break;
256 	case 2:
257 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0);
258 		break;
259 	case 3:
260 		__constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0);
261 		break;
262 	case 4:
263 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0);
264 		break;
265 	case 5:
266 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0);
267 		break;
268 	case 6:
269 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0);
270 		break;
271 	case 7:
272 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1);
273 		break;
274 	case 8:
275 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0);
276 		break;
277 	case 9:
278 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1);
279 		break;
280 	case 10:
281 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2);
282 		break;
283 	case 12:
284 		__constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4);
285 		break;
286 	default:
287 		/* we limit the inlined version to 3 moves */
288 		return __generic_copy_from_user(to, from, n);
289 	}
290 
291 	return res;
292 }
293 
294 #define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3)	\
295 	asm volatile ("\n"						\
296 		"	move."#s1"	(%2)+,%3\n"			\
297 		"11:	"MOVES"."#s1"	%3,(%1)+\n"			\
298 		"12:	move."#s2"	(%2)+,%3\n"			\
299 		"21:	"MOVES"."#s2"	%3,(%1)+\n"			\
300 		"22:\n"							\
301 		"	.ifnc	\""#s3"\",\"\"\n"			\
302 		"	move."#s3"	(%2)+,%3\n"			\
303 		"31:	"MOVES"."#s3"	%3,(%1)+\n"			\
304 		"32:\n"							\
305 		"	.endif\n"					\
306 		"4:\n"							\
307 		"\n"							\
308 		"	.section __ex_table,\"a\"\n"			\
309 		"	.align	4\n"					\
310 		"	.long	11b,5f\n"				\
311 		"	.long	12b,5f\n"				\
312 		"	.long	21b,5f\n"				\
313 		"	.long	22b,5f\n"				\
314 		"	.ifnc	\""#s3"\",\"\"\n"			\
315 		"	.long	31b,5f\n"				\
316 		"	.long	32b,5f\n"				\
317 		"	.endif\n"					\
318 		"	.previous\n"					\
319 		"\n"							\
320 		"	.section .fixup,\"ax\"\n"			\
321 		"	.even\n"					\
322 		"5:	moveq.l	#"#n",%0\n"				\
323 		"	jra	4b\n"					\
324 		"	.previous\n"					\
325 		: "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp)	\
326 		: : "memory")
327 
328 static __always_inline unsigned long
__constant_copy_to_user(void __user * to,const void * from,unsigned long n)329 __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
330 {
331 	unsigned long res = 0, tmp;
332 
333 	switch (n) {
334 	case 1:
335 		__put_user_asm(MOVES, res, *(u8 *)from, (u8 __user *)to,
336 				b, d, 1);
337 		break;
338 	case 2:
339 		__put_user_asm(MOVES, res, *(u16 *)from, (u16 __user *)to,
340 				w, r, 2);
341 		break;
342 	case 3:
343 		__constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,);
344 		break;
345 	case 4:
346 		__put_user_asm(MOVES, res, *(u32 *)from, (u32 __user *)to,
347 				l, r, 4);
348 		break;
349 	case 5:
350 		__constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,);
351 		break;
352 	case 6:
353 		__constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,);
354 		break;
355 	case 7:
356 		__constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b);
357 		break;
358 	case 8:
359 		__constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,);
360 		break;
361 	case 9:
362 		__constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b);
363 		break;
364 	case 10:
365 		__constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w);
366 		break;
367 	case 12:
368 		__constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l);
369 		break;
370 	default:
371 		/* limit the inlined version to 3 moves */
372 		return __generic_copy_to_user(to, from, n);
373 	}
374 
375 	return res;
376 }
377 
378 static inline unsigned long
raw_copy_from_user(void * to,const void __user * from,unsigned long n)379 raw_copy_from_user(void *to, const void __user *from, unsigned long n)
380 {
381 	if (__builtin_constant_p(n))
382 		return __constant_copy_from_user(to, from, n);
383 	return __generic_copy_from_user(to, from, n);
384 }
385 
386 static inline unsigned long
raw_copy_to_user(void __user * to,const void * from,unsigned long n)387 raw_copy_to_user(void __user *to, const void *from, unsigned long n)
388 {
389 	if (__builtin_constant_p(n))
390 		return __constant_copy_to_user(to, from, n);
391 	return __generic_copy_to_user(to, from, n);
392 }
393 #define INLINE_COPY_FROM_USER
394 #define INLINE_COPY_TO_USER
395 
396 #define HAVE_GET_KERNEL_NOFAULT
397 
398 #define __get_kernel_nofault(dst, src, type, err_label)			\
399 do {									\
400 	type *__gk_dst = (type *)(dst);					\
401 	type *__gk_src = (type *)(src);					\
402 	int __gk_err = 0;						\
403 									\
404 	switch (sizeof(type)) {						\
405 	case 1:								\
406 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
407 				u8, b, d, -EFAULT);			\
408 		break;							\
409 	case 2:								\
410 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
411 				u16, w, r, -EFAULT);			\
412 		break;							\
413 	case 4:								\
414 		__get_user_asm("move", __gk_err, *__gk_dst, __gk_src,	\
415 				u32, l, r, -EFAULT);			\
416 		break;							\
417 	case 8:								\
418 		__get_user_asm8("move", __gk_err, *__gk_dst, __gk_src);	\
419 		break;							\
420 	default:							\
421 		BUILD_BUG();						\
422 	}								\
423 	if (unlikely(__gk_err))						\
424 		goto err_label;						\
425 } while (0)
426 
427 #define __put_kernel_nofault(dst, src, type, err_label)			\
428 do {									\
429 	type __pk_src = *(type *)(src);					\
430 	type *__pk_dst = (type *)(dst);					\
431 	int __pk_err = 0;						\
432 									\
433 	switch (sizeof(type)) {						\
434 	case 1:								\
435 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
436 				b, d, -EFAULT);				\
437 		break;							\
438 	case 2:								\
439 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
440 				w, r, -EFAULT);				\
441 		break;							\
442 	case 4:								\
443 		__put_user_asm("move", __pk_err, __pk_src, __pk_dst,	\
444 				l, r, -EFAULT);				\
445 		break;							\
446 	case 8:								\
447 		__put_user_asm8("move", __pk_err, __pk_src, __pk_dst);	\
448 		break;							\
449 	default:							\
450 		BUILD_BUG();						\
451 	}								\
452 	if (unlikely(__pk_err))						\
453 		goto err_label;						\
454 } while (0)
455 
456 extern long strncpy_from_user(char *dst, const char __user *src, long count);
457 extern __must_check long strnlen_user(const char __user *str, long n);
458 
459 unsigned long __clear_user(void __user *to, unsigned long n);
460 
461 #define clear_user	__clear_user
462 
463 #else /* !CONFIG_MMU */
464 #include <asm-generic/uaccess.h>
465 #endif
466 
467 #endif /* _M68K_UACCESS_H */
468