• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
4 
5 #include <linux/fault-inject-usercopy.h>
6 #include <linux/instrumented.h>
7 #include <linux/minmax.h>
8 #include <linux/sched.h>
9 #include <linux/thread_info.h>
10 
11 #include <asm/uaccess.h>
12 
13 #ifdef CONFIG_SET_FS
14 /*
15  * Force the uaccess routines to be wired up for actual userspace access,
16  * overriding any possible set_fs(KERNEL_DS) still lingering around.  Undone
17  * using force_uaccess_end below.
18  */
force_uaccess_begin(void)19 static inline mm_segment_t force_uaccess_begin(void)
20 {
21 	mm_segment_t fs = get_fs();
22 
23 	set_fs(USER_DS);
24 	return fs;
25 }
26 
force_uaccess_end(mm_segment_t oldfs)27 static inline void force_uaccess_end(mm_segment_t oldfs)
28 {
29 	set_fs(oldfs);
30 }
31 #else /* CONFIG_SET_FS */
32 typedef struct {
33 	/* empty dummy */
34 } mm_segment_t;
35 
36 #ifndef TASK_SIZE_MAX
37 #define TASK_SIZE_MAX			TASK_SIZE
38 #endif
39 
40 #define uaccess_kernel()		(false)
41 #define user_addr_max()			(TASK_SIZE_MAX)
42 
force_uaccess_begin(void)43 static inline mm_segment_t force_uaccess_begin(void)
44 {
45 	return (mm_segment_t) { };
46 }
47 
force_uaccess_end(mm_segment_t oldfs)48 static inline void force_uaccess_end(mm_segment_t oldfs)
49 {
50 }
51 #endif /* CONFIG_SET_FS */
52 
53 /*
54  * Architectures should provide two primitives (raw_copy_{to,from}_user())
55  * and get rid of their private instances of copy_{to,from}_user() and
56  * __copy_{to,from}_user{,_inatomic}().
57  *
58  * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
59  * return the amount left to copy.  They should assume that access_ok() has
60  * already been checked (and succeeded); they should *not* zero-pad anything.
61  * No KASAN or object size checks either - those belong here.
62  *
63  * Both of these functions should attempt to copy size bytes starting at from
64  * into the area starting at to.  They must not fetch or store anything
65  * outside of those areas.  Return value must be between 0 (everything
66  * copied successfully) and size (nothing copied).
67  *
68  * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
69  * at to must become equal to the bytes fetched from the corresponding area
70  * starting at from.  All data past to + size - N must be left unmodified.
71  *
72  * If copying succeeds, the return value must be 0.  If some data cannot be
73  * fetched, it is permitted to copy less than had been fetched; the only
74  * hard requirement is that not storing anything at all (i.e. returning size)
75  * should happen only when nothing could be copied.  In other words, you don't
76  * have to squeeze as much as possible - it is allowed, but not necessary.
77  *
78  * For raw_copy_from_user() to always points to kernel memory and no faults
79  * on store should happen.  Interpretation of from is affected by set_fs().
80  * For raw_copy_to_user() it's the other way round.
81  *
82  * Both can be inlined - it's up to architectures whether it wants to bother
83  * with that.  They should not be used directly; they are used to implement
84  * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
85  * that are used instead.  Out of those, __... ones are inlined.  Plain
86  * copy_{to,from}_user() might or might not be inlined.  If you want them
87  * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
88  *
89  * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
90  * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
91  * at all; their callers absolutely must check the return value.
92  *
93  * Biarch ones should also provide raw_copy_in_user() - similar to the above,
94  * but both source and destination are __user pointers (affected by set_fs()
95  * as usual) and both source and destination can trigger faults.
96  */
97 
98 static __always_inline __must_check unsigned long
__copy_from_user_inatomic(void * to,const void __user * from,unsigned long n)99 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
100 {
101 	instrument_copy_from_user(to, from, n);
102 	check_object_size(to, n, false);
103 	return raw_copy_from_user(to, from, n);
104 }
105 
106 static __always_inline __must_check unsigned long
__copy_from_user(void * to,const void __user * from,unsigned long n)107 __copy_from_user(void *to, const void __user *from, unsigned long n)
108 {
109 	might_fault();
110 	if (should_fail_usercopy())
111 		return n;
112 	instrument_copy_from_user(to, from, n);
113 	check_object_size(to, n, false);
114 	return raw_copy_from_user(to, from, n);
115 }
116 
117 /**
118  * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
119  * @to:   Destination address, in user space.
120  * @from: Source address, in kernel space.
121  * @n:    Number of bytes to copy.
122  *
123  * Context: User context only.
124  *
125  * Copy data from kernel space to user space.  Caller must check
126  * the specified block with access_ok() before calling this function.
127  * The caller should also make sure he pins the user space address
128  * so that we don't result in page fault and sleep.
129  */
130 static __always_inline __must_check unsigned long
__copy_to_user_inatomic(void __user * to,const void * from,unsigned long n)131 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
132 {
133 	if (should_fail_usercopy())
134 		return n;
135 	instrument_copy_to_user(to, from, n);
136 	check_object_size(from, n, true);
137 	return raw_copy_to_user(to, from, n);
138 }
139 
140 static __always_inline __must_check unsigned long
__copy_to_user(void __user * to,const void * from,unsigned long n)141 __copy_to_user(void __user *to, const void *from, unsigned long n)
142 {
143 	might_fault();
144 	if (should_fail_usercopy())
145 		return n;
146 	instrument_copy_to_user(to, from, n);
147 	check_object_size(from, n, true);
148 	return raw_copy_to_user(to, from, n);
149 }
150 
151 #ifdef INLINE_COPY_FROM_USER
152 static inline __must_check unsigned long
_copy_from_user(void * to,const void __user * from,unsigned long n)153 _copy_from_user(void *to, const void __user *from, unsigned long n)
154 {
155 	unsigned long res = n;
156 	might_fault();
157 	if (!should_fail_usercopy() && likely(access_ok(from, n))) {
158 		instrument_copy_from_user(to, from, n);
159 		res = raw_copy_from_user(to, from, n);
160 	}
161 	if (unlikely(res))
162 		memset(to + (n - res), 0, res);
163 	return res;
164 }
165 #else
166 extern __must_check unsigned long
167 _copy_from_user(void *, const void __user *, unsigned long);
168 #endif
169 
170 #ifdef INLINE_COPY_TO_USER
171 static inline __must_check unsigned long
_copy_to_user(void __user * to,const void * from,unsigned long n)172 _copy_to_user(void __user *to, const void *from, unsigned long n)
173 {
174 	might_fault();
175 	if (should_fail_usercopy())
176 		return n;
177 	if (access_ok(to, n)) {
178 		instrument_copy_to_user(to, from, n);
179 		n = raw_copy_to_user(to, from, n);
180 	}
181 	return n;
182 }
183 #else
184 extern __must_check unsigned long
185 _copy_to_user(void __user *, const void *, unsigned long);
186 #endif
187 
188 static __always_inline unsigned long __must_check
copy_from_user(void * to,const void __user * from,unsigned long n)189 copy_from_user(void *to, const void __user *from, unsigned long n)
190 {
191 	if (likely(check_copy_size(to, n, false)))
192 		n = _copy_from_user(to, from, n);
193 	return n;
194 }
195 
196 static __always_inline unsigned long __must_check
copy_to_user(void __user * to,const void * from,unsigned long n)197 copy_to_user(void __user *to, const void *from, unsigned long n)
198 {
199 	if (likely(check_copy_size(from, n, true)))
200 		n = _copy_to_user(to, from, n);
201 	return n;
202 }
203 #ifdef CONFIG_COMPAT
204 static __always_inline unsigned long __must_check
copy_in_user(void __user * to,const void __user * from,unsigned long n)205 copy_in_user(void __user *to, const void __user *from, unsigned long n)
206 {
207 	might_fault();
208 	if (access_ok(to, n) && access_ok(from, n))
209 		n = raw_copy_in_user(to, from, n);
210 	return n;
211 }
212 #endif
213 
214 #ifndef copy_mc_to_kernel
215 /*
216  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
217  * #MC (or arch equivalent) during source read.
218  */
219 static inline unsigned long __must_check
copy_mc_to_kernel(void * dst,const void * src,size_t cnt)220 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
221 {
222 	memcpy(dst, src, cnt);
223 	return 0;
224 }
225 #endif
226 
pagefault_disabled_inc(void)227 static __always_inline void pagefault_disabled_inc(void)
228 {
229 	current->pagefault_disabled++;
230 }
231 
pagefault_disabled_dec(void)232 static __always_inline void pagefault_disabled_dec(void)
233 {
234 	current->pagefault_disabled--;
235 }
236 
237 /*
238  * These routines enable/disable the pagefault handler. If disabled, it will
239  * not take any locks and go straight to the fixup table.
240  *
241  * User access methods will not sleep when called from a pagefault_disabled()
242  * environment.
243  */
pagefault_disable(void)244 static inline void pagefault_disable(void)
245 {
246 	pagefault_disabled_inc();
247 	/*
248 	 * make sure to have issued the store before a pagefault
249 	 * can hit.
250 	 */
251 	barrier();
252 }
253 
pagefault_enable(void)254 static inline void pagefault_enable(void)
255 {
256 	/*
257 	 * make sure to issue those last loads/stores before enabling
258 	 * the pagefault handler again.
259 	 */
260 	barrier();
261 	pagefault_disabled_dec();
262 }
263 
264 /*
265  * Is the pagefault handler disabled? If so, user access methods will not sleep.
266  */
pagefault_disabled(void)267 static inline bool pagefault_disabled(void)
268 {
269 	return current->pagefault_disabled != 0;
270 }
271 
272 /*
273  * The pagefault handler is in general disabled by pagefault_disable() or
274  * when in irq context (via in_atomic()).
275  *
276  * This function should only be used by the fault handlers. Other users should
277  * stick to pagefault_disabled().
278  * Please NEVER use preempt_disable() to disable the fault handler. With
279  * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
280  * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
281  */
282 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
283 
284 #ifndef ARCH_HAS_NOCACHE_UACCESS
285 
286 static inline __must_check unsigned long
__copy_from_user_inatomic_nocache(void * to,const void __user * from,unsigned long n)287 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
288 				  unsigned long n)
289 {
290 	return __copy_from_user_inatomic(to, from, n);
291 }
292 
293 #endif		/* ARCH_HAS_NOCACHE_UACCESS */
294 
295 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
296 
297 /**
298  * copy_struct_from_user: copy a struct from userspace
299  * @dst:   Destination address, in kernel space. This buffer must be @ksize
300  *         bytes long.
301  * @ksize: Size of @dst struct.
302  * @src:   Source address, in userspace.
303  * @usize: (Alleged) size of @src struct.
304  *
305  * Copies a struct from userspace to kernel space, in a way that guarantees
306  * backwards-compatibility for struct syscall arguments (as long as future
307  * struct extensions are made such that all new fields are *appended* to the
308  * old struct, and zeroed-out new fields have the same meaning as the old
309  * struct).
310  *
311  * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
312  * The recommended usage is something like the following:
313  *
314  *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
315  *   {
316  *      int err;
317  *      struct foo karg = {};
318  *
319  *      if (usize > PAGE_SIZE)
320  *        return -E2BIG;
321  *      if (usize < FOO_SIZE_VER0)
322  *        return -EINVAL;
323  *
324  *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
325  *      if (err)
326  *        return err;
327  *
328  *      // ...
329  *   }
330  *
331  * There are three cases to consider:
332  *  * If @usize == @ksize, then it's copied verbatim.
333  *  * If @usize < @ksize, then the userspace has passed an old struct to a
334  *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
335  *    are to be zero-filled.
336  *  * If @usize > @ksize, then the userspace has passed a new struct to an
337  *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
338  *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
339  *
340  * Returns (in all cases, some data may have been copied):
341  *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
342  *  * -EFAULT: access to userspace failed.
343  */
344 static __always_inline __must_check int
copy_struct_from_user(void * dst,size_t ksize,const void __user * src,size_t usize)345 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
346 		      size_t usize)
347 {
348 	size_t size = min(ksize, usize);
349 	size_t rest = max(ksize, usize) - size;
350 
351 	/* Double check if ksize is larger than a known object size. */
352 	if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1)))
353 		return -E2BIG;
354 
355 	/* Deal with trailing bytes. */
356 	if (usize < ksize) {
357 		memset(dst + size, 0, rest);
358 	} else if (usize > ksize) {
359 		int ret = check_zeroed_user(src + size, rest);
360 		if (ret <= 0)
361 			return ret ?: -E2BIG;
362 	}
363 	/* Copy the interoperable parts of the struct. */
364 	if (copy_from_user(dst, src, size))
365 		return -EFAULT;
366 	return 0;
367 }
368 
369 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
370 
371 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
372 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
373 
374 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
375 long notrace copy_to_user_nofault(void __user *dst, const void *src,
376 		size_t size);
377 
378 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
379 		long count);
380 
381 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
382 		long count);
383 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
384 
385 /**
386  * get_kernel_nofault(): safely attempt to read from a location
387  * @val: read into this variable
388  * @ptr: address to read from
389  *
390  * Returns 0 on success, or -EFAULT.
391  */
392 #define get_kernel_nofault(val, ptr) ({				\
393 	const typeof(val) *__gk_ptr = (ptr);			\
394 	copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
395 })
396 
397 #ifndef user_access_begin
398 #define user_access_begin(ptr,len) access_ok(ptr, len)
399 #define user_access_end() do { } while (0)
400 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
401 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
402 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
403 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
user_access_save(void)404 static inline unsigned long user_access_save(void) { return 0UL; }
user_access_restore(unsigned long flags)405 static inline void user_access_restore(unsigned long flags) { }
406 #endif
407 #ifndef user_write_access_begin
408 #define user_write_access_begin user_access_begin
409 #define user_write_access_end user_access_end
410 #endif
411 #ifndef user_read_access_begin
412 #define user_read_access_begin user_access_begin
413 #define user_read_access_end user_access_end
414 #endif
415 
416 #ifdef CONFIG_HARDENED_USERCOPY
417 void usercopy_warn(const char *name, const char *detail, bool to_user,
418 		   unsigned long offset, unsigned long len);
419 void __noreturn usercopy_abort(const char *name, const char *detail,
420 			       bool to_user, unsigned long offset,
421 			       unsigned long len);
422 #endif
423 
424 #endif		/* __LINUX_UACCESS_H__ */
425