• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/uaccess.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_UACCESS_H
8 #define __ASM_UACCESS_H
9 
10 #include <asm/alternative.h>
11 #include <asm/kernel-pgtable.h>
12 #include <asm/sysreg.h>
13 
14 /*
15  * User space memory access functions
16  */
17 #include <linux/bitops.h>
18 #include <linux/kasan-checks.h>
19 #include <linux/string.h>
20 
21 #include <asm/cpufeature.h>
22 #include <asm/mmu.h>
23 #include <asm/mte.h>
24 #include <asm/ptrace.h>
25 #include <asm/memory.h>
26 #include <asm/extable.h>
27 
28 #define HAVE_GET_KERNEL_NOFAULT
29 
30 /*
31  * Test whether a block of memory is a valid user space address.
32  * Returns 1 if the range is valid, 0 otherwise.
33  *
34  * This is equivalent to the following test:
35  * (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
36  */
__range_ok(const void __user * addr,unsigned long size)37 static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
38 {
39 	unsigned long ret, limit = TASK_SIZE_MAX - 1;
40 
41 	/*
42 	 * Asynchronous I/O running in a kernel thread does not have the
43 	 * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
44 	 * the user address before checking.
45 	 */
46 	if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
47 	    (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
48 		addr = untagged_addr(addr);
49 
50 	__chk_user_ptr(addr);
51 	asm volatile(
52 	// A + B <= C + 1 for all A,B,C, in four easy steps:
53 	// 1: X = A + B; X' = X % 2^64
54 	"	adds	%0, %3, %2\n"
55 	// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
56 	"	csel	%1, xzr, %1, hi\n"
57 	// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
58 	//    to compensate for the carry flag being set in step 4. For
59 	//    X > 2^64, X' merely has to remain nonzero, which it does.
60 	"	csinv	%0, %0, xzr, cc\n"
61 	// 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
62 	//    comes from the carry in being clear. Otherwise, we are
63 	//    testing X' - C == 0, subject to the previous adjustments.
64 	"	sbcs	xzr, %0, %1\n"
65 	"	cset	%0, ls\n"
66 	: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
67 
68 	return ret;
69 }
70 
71 #define access_ok(addr, size)	__range_ok(addr, size)
72 
73 #define _ASM_EXTABLE(from, to)						\
74 	"	.pushsection	__ex_table, \"a\"\n"			\
75 	"	.align		3\n"					\
76 	"	.long		(" #from " - .), (" #to " - .)\n"	\
77 	"	.popsection\n"
78 
79 /*
80  * User access enabling/disabling.
81  */
82 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
__uaccess_ttbr0_disable(void)83 static inline void __uaccess_ttbr0_disable(void)
84 {
85 	unsigned long flags, ttbr;
86 
87 	local_irq_save(flags);
88 	ttbr = read_sysreg(ttbr1_el1);
89 	ttbr &= ~TTBR_ASID_MASK;
90 	/* reserved_pg_dir placed before swapper_pg_dir */
91 	write_sysreg(ttbr - RESERVED_SWAPPER_OFFSET, ttbr0_el1);
92 	isb();
93 	/* Set reserved ASID */
94 	write_sysreg(ttbr, ttbr1_el1);
95 	isb();
96 	local_irq_restore(flags);
97 }
98 
__uaccess_ttbr0_enable(void)99 static inline void __uaccess_ttbr0_enable(void)
100 {
101 	unsigned long flags, ttbr0, ttbr1;
102 
103 	/*
104 	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
105 	 * variable and the MSR. A context switch could trigger an ASID
106 	 * roll-over and an update of 'ttbr0'.
107 	 */
108 	local_irq_save(flags);
109 	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
110 
111 	/* Restore active ASID */
112 	ttbr1 = read_sysreg(ttbr1_el1);
113 	ttbr1 &= ~TTBR_ASID_MASK;		/* safety measure */
114 	ttbr1 |= ttbr0 & TTBR_ASID_MASK;
115 	write_sysreg(ttbr1, ttbr1_el1);
116 	isb();
117 
118 	/* Restore user page table */
119 	write_sysreg(ttbr0, ttbr0_el1);
120 	isb();
121 	local_irq_restore(flags);
122 }
123 
uaccess_ttbr0_disable(void)124 static inline bool uaccess_ttbr0_disable(void)
125 {
126 	if (!system_uses_ttbr0_pan())
127 		return false;
128 	__uaccess_ttbr0_disable();
129 	return true;
130 }
131 
uaccess_ttbr0_enable(void)132 static inline bool uaccess_ttbr0_enable(void)
133 {
134 	if (!system_uses_ttbr0_pan())
135 		return false;
136 	__uaccess_ttbr0_enable();
137 	return true;
138 }
139 #else
uaccess_ttbr0_disable(void)140 static inline bool uaccess_ttbr0_disable(void)
141 {
142 	return false;
143 }
144 
uaccess_ttbr0_enable(void)145 static inline bool uaccess_ttbr0_enable(void)
146 {
147 	return false;
148 }
149 #endif
150 
__uaccess_disable_hw_pan(void)151 static inline void __uaccess_disable_hw_pan(void)
152 {
153 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
154 			CONFIG_ARM64_PAN));
155 }
156 
__uaccess_enable_hw_pan(void)157 static inline void __uaccess_enable_hw_pan(void)
158 {
159 	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
160 			CONFIG_ARM64_PAN));
161 }
162 
163 /*
164  * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
165  * affects EL0 and TCF affects EL1 irrespective of which TTBR is
166  * used.
167  * The kernel accesses TTBR0 usually with LDTR/STTR instructions
168  * when UAO is available, so these would act as EL0 accesses using
169  * TCF0.
170  * However futex.h code uses exclusives which would be executed as
171  * EL1, this can potentially cause a tag check fault even if the
172  * user disables TCF0.
173  *
174  * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
175  * and reset it in uaccess_disable().
176  *
177  * The Tag check override (TCO) bit disables temporarily the tag checking
178  * preventing the issue.
179  */
__uaccess_disable_tco(void)180 static inline void __uaccess_disable_tco(void)
181 {
182 	asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
183 				 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
184 }
185 
__uaccess_enable_tco(void)186 static inline void __uaccess_enable_tco(void)
187 {
188 	asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
189 				 ARM64_MTE, CONFIG_KASAN_HW_TAGS));
190 }
191 
192 /*
193  * These functions disable tag checking only if in MTE async mode
194  * since the sync mode generates exceptions synchronously and the
195  * nofault or load_unaligned_zeropad can handle them.
196  */
__uaccess_disable_tco_async(void)197 static inline void __uaccess_disable_tco_async(void)
198 {
199 	if (system_uses_mte_async_or_asymm_mode())
200 		 __uaccess_disable_tco();
201 }
202 
__uaccess_enable_tco_async(void)203 static inline void __uaccess_enable_tco_async(void)
204 {
205 	if (system_uses_mte_async_or_asymm_mode())
206 		__uaccess_enable_tco();
207 }
208 
uaccess_disable_privileged(void)209 static inline void uaccess_disable_privileged(void)
210 {
211 	mte_disable_tco();
212 
213 	if (uaccess_ttbr0_disable())
214 		return;
215 
216 	__uaccess_enable_hw_pan();
217 }
218 
uaccess_enable_privileged(void)219 static inline void uaccess_enable_privileged(void)
220 {
221 	mte_enable_tco();
222 
223 	if (uaccess_ttbr0_enable())
224 		return;
225 
226 	__uaccess_disable_hw_pan();
227 }
228 
229 /*
230  * Sanitise a uaccess pointer such that it becomes NULL if above the maximum
231  * user address. In case the pointer is tagged (has the top byte set), untag
232  * the pointer before checking.
233  */
234 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
__uaccess_mask_ptr(const void __user * ptr)235 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
236 {
237 	void __user *safe_ptr;
238 
239 	asm volatile(
240 	"	bics	xzr, %3, %2\n"
241 	"	csel	%0, %1, xzr, eq\n"
242 	: "=&r" (safe_ptr)
243 	: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
244 	  "r" (untagged_addr(ptr))
245 	: "cc");
246 
247 	csdb();
248 	return safe_ptr;
249 }
250 
251 /*
252  * The "__xxx" versions of the user access functions do not verify the address
253  * space - it must have been done previously with a separate "access_ok()"
254  * call.
255  *
256  * The "__xxx_error" versions set the third argument to -EFAULT if an error
257  * occurs, and leave it unchanged on success.
258  */
259 #define __get_mem_asm(load, reg, x, addr, err)				\
260 	asm volatile(							\
261 	"1:	" load "	" reg "1, [%2]\n"			\
262 	"2:\n"								\
263 	"	.section .fixup, \"ax\"\n"				\
264 	"	.align	2\n"						\
265 	"3:	mov	%w0, %3\n"					\
266 	"	mov	%1, #0\n"					\
267 	"	b	2b\n"						\
268 	"	.previous\n"						\
269 	_ASM_EXTABLE(1b, 3b)						\
270 	: "+r" (err), "=&r" (x)						\
271 	: "r" (addr), "i" (-EFAULT))
272 
273 #define __raw_get_mem(ldr, x, ptr, err)					\
274 do {									\
275 	unsigned long __gu_val;						\
276 	switch (sizeof(*(ptr))) {					\
277 	case 1:								\
278 		__get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err));	\
279 		break;							\
280 	case 2:								\
281 		__get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err));	\
282 		break;							\
283 	case 4:								\
284 		__get_mem_asm(ldr, "%w", __gu_val, (ptr), (err));	\
285 		break;							\
286 	case 8:								\
287 		__get_mem_asm(ldr, "%x",  __gu_val, (ptr), (err));	\
288 		break;							\
289 	default:							\
290 		BUILD_BUG();						\
291 	}								\
292 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
293 } while (0)
294 
295 /*
296  * We must not call into the scheduler between uaccess_ttbr0_enable() and
297  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
298  * we must evaluate these outside of the critical section.
299  */
300 #define __raw_get_user(x, ptr, err)					\
301 do {									\
302 	__typeof__(*(ptr)) __user *__rgu_ptr = (ptr);			\
303 	__typeof__(x) __rgu_val;					\
304 	__chk_user_ptr(ptr);						\
305 									\
306 	uaccess_ttbr0_enable();						\
307 	__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err);		\
308 	uaccess_ttbr0_disable();					\
309 									\
310 	(x) = __rgu_val;						\
311 } while (0)
312 
313 #define __get_user_error(x, ptr, err)					\
314 do {									\
315 	__typeof__(*(ptr)) __user *__p = (ptr);				\
316 	might_fault();							\
317 	if (access_ok(__p, sizeof(*__p))) {				\
318 		__p = uaccess_mask_ptr(__p);				\
319 		__raw_get_user((x), __p, (err));			\
320 	} else {							\
321 		(x) = (__force __typeof__(x))0; (err) = -EFAULT;	\
322 	}								\
323 } while (0)
324 
325 #define __get_user(x, ptr)						\
326 ({									\
327 	int __gu_err = 0;						\
328 	__get_user_error((x), (ptr), __gu_err);				\
329 	__gu_err;							\
330 })
331 
332 #define get_user	__get_user
333 
334 /*
335  * We must not call into the scheduler between __mte_enable_tco_async() and
336  * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
337  * functions, we must evaluate these outside of the critical section.
338  */
339 #define __get_kernel_nofault(dst, src, type, err_label)			\
340 do {									\
341 	__typeof__(dst) __gkn_dst = (dst);				\
342 	__typeof__(src) __gkn_src = (src);				\
343 	int __gkn_err = 0;						\
344 									\
345 	__mte_enable_tco_async();					\
346 	__raw_get_mem("ldr", *((type *)(__gkn_dst)),			\
347 		      (__force type *)(__gkn_src), __gkn_err);		\
348 	__mte_disable_tco_async();					\
349 									\
350 	if (unlikely(__gkn_err))					\
351 		goto err_label;						\
352 } while (0)
353 
354 #define __put_mem_asm(store, reg, x, addr, err)				\
355 	asm volatile(							\
356 	"1:	" store "	" reg "1, [%2]\n"			\
357 	"2:\n"								\
358 	"	.section .fixup,\"ax\"\n"				\
359 	"	.align	2\n"						\
360 	"3:	mov	%w0, %3\n"					\
361 	"	b	2b\n"						\
362 	"	.previous\n"						\
363 	_ASM_EXTABLE(1b, 3b)						\
364 	: "+r" (err)							\
365 	: "r" (x), "r" (addr), "i" (-EFAULT))
366 
367 #define __raw_put_mem(str, x, ptr, err)					\
368 do {									\
369 	__typeof__(*(ptr)) __pu_val = (x);				\
370 	switch (sizeof(*(ptr))) {					\
371 	case 1:								\
372 		__put_mem_asm(str "b", "%w", __pu_val, (ptr), (err));	\
373 		break;							\
374 	case 2:								\
375 		__put_mem_asm(str "h", "%w", __pu_val, (ptr), (err));	\
376 		break;							\
377 	case 4:								\
378 		__put_mem_asm(str, "%w", __pu_val, (ptr), (err));	\
379 		break;							\
380 	case 8:								\
381 		__put_mem_asm(str, "%x", __pu_val, (ptr), (err));	\
382 		break;							\
383 	default:							\
384 		BUILD_BUG();						\
385 	}								\
386 } while (0)
387 
388 /*
389  * We must not call into the scheduler between uaccess_ttbr0_enable() and
390  * uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
391  * we must evaluate these outside of the critical section.
392  */
393 #define __raw_put_user(x, ptr, err)					\
394 do {									\
395 	__typeof__(*(ptr)) __user *__rpu_ptr = (ptr);			\
396 	__typeof__(*(ptr)) __rpu_val = (x);				\
397 	__chk_user_ptr(__rpu_ptr);					\
398 									\
399 	uaccess_ttbr0_enable();						\
400 	__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err);		\
401 	uaccess_ttbr0_disable();					\
402 } while (0)
403 
404 #define __put_user_error(x, ptr, err)					\
405 do {									\
406 	__typeof__(*(ptr)) __user *__p = (ptr);				\
407 	might_fault();							\
408 	if (access_ok(__p, sizeof(*__p))) {				\
409 		__p = uaccess_mask_ptr(__p);				\
410 		__raw_put_user((x), __p, (err));			\
411 	} else	{							\
412 		(err) = -EFAULT;					\
413 	}								\
414 } while (0)
415 
416 #define __put_user(x, ptr)						\
417 ({									\
418 	int __pu_err = 0;						\
419 	__put_user_error((x), (ptr), __pu_err);				\
420 	__pu_err;							\
421 })
422 
423 #define put_user	__put_user
424 
425 /*
426  * We must not call into the scheduler between __mte_enable_tco_async() and
427  * __mte_disable_tco_async(). As `dst` and `src` may contain blocking
428  * functions, we must evaluate these outside of the critical section.
429  */
430 #define __put_kernel_nofault(dst, src, type, err_label)			\
431 do {									\
432 	__typeof__(dst) __pkn_dst = (dst);				\
433 	__typeof__(src) __pkn_src = (src);				\
434 	int __pkn_err = 0;						\
435 									\
436 	__mte_enable_tco_async();					\
437 	__raw_put_mem("str", *((type *)(__pkn_src)),			\
438 		      (__force type *)(__pkn_dst), __pkn_err);		\
439 	__mte_disable_tco_async();					\
440 									\
441 	if (unlikely(__pkn_err))					\
442 		goto err_label;						\
443 } while(0)
444 
445 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
446 #define raw_copy_from_user(to, from, n)					\
447 ({									\
448 	unsigned long __acfu_ret;					\
449 	uaccess_ttbr0_enable();						\
450 	__acfu_ret = __arch_copy_from_user((to),			\
451 				      __uaccess_mask_ptr(from), (n));	\
452 	uaccess_ttbr0_disable();					\
453 	__acfu_ret;							\
454 })
455 
456 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
457 #define raw_copy_to_user(to, from, n)					\
458 ({									\
459 	unsigned long __actu_ret;					\
460 	uaccess_ttbr0_enable();						\
461 	__actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to),	\
462 				    (from), (n));			\
463 	uaccess_ttbr0_disable();					\
464 	__actu_ret;							\
465 })
466 
467 #define INLINE_COPY_TO_USER
468 #define INLINE_COPY_FROM_USER
469 
470 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
__clear_user(void __user * to,unsigned long n)471 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
472 {
473 	if (access_ok(to, n)) {
474 		uaccess_ttbr0_enable();
475 		n = __arch_clear_user(__uaccess_mask_ptr(to), n);
476 		uaccess_ttbr0_disable();
477 	}
478 	return n;
479 }
480 #define clear_user	__clear_user
481 
482 extern long strncpy_from_user(char *dest, const char __user *src, long count);
483 
484 extern __must_check long strnlen_user(const char __user *str, long n);
485 
486 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
487 struct page;
488 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
489 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
490 
__copy_from_user_flushcache(void * dst,const void __user * src,unsigned size)491 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
492 {
493 	kasan_check_write(dst, size);
494 	return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
495 }
496 #endif
497 
498 #ifdef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
499 
500 /*
501  * Return 0 on success, the number of bytes not probed otherwise.
502  */
probe_subpage_writeable(const char __user * uaddr,size_t size)503 static inline size_t probe_subpage_writeable(const char __user *uaddr,
504 					     size_t size)
505 {
506 	if (!system_supports_mte())
507 		return 0;
508 	return mte_probe_user_range(uaddr, size);
509 }
510 
511 #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
512 
513 #endif /* __ASM_UACCESS_H */
514