• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 #include <linux/page_size_compat.h>
27 
28 #include <linux/uaccess.h>
29 
30 #include <kunit/visibility.h>
31 
32 #include "internal.h"
33 #include "swap.h"
34 
35 #ifndef __GENKSYMS__
36 #include <trace/hooks/syscall_check.h>
37 #include <trace/hooks/mm.h>
38 #endif
39 
40 /**
41  * kfree_const - conditionally free memory
42  * @x: pointer to the memory
43  *
44  * Function calls kfree only if @x is not in .rodata section.
45  */
kfree_const(const void * x)46 void kfree_const(const void *x)
47 {
48 	if (!is_kernel_rodata((unsigned long)x))
49 		kfree(x);
50 }
51 EXPORT_SYMBOL(kfree_const);
52 
53 /**
54  * kstrdup - allocate space for and copy an existing string
55  * @s: the string to duplicate
56  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
57  *
58  * Return: newly allocated copy of @s or %NULL in case of error
59  */
60 noinline
kstrdup(const char * s,gfp_t gfp)61 char *kstrdup(const char *s, gfp_t gfp)
62 {
63 	size_t len;
64 	char *buf;
65 
66 	if (!s)
67 		return NULL;
68 
69 	len = strlen(s) + 1;
70 	buf = kmalloc_track_caller(len, gfp);
71 	if (buf)
72 		memcpy(buf, s, len);
73 	return buf;
74 }
75 EXPORT_SYMBOL(kstrdup);
76 
77 /**
78  * kstrdup_const - conditionally duplicate an existing const string
79  * @s: the string to duplicate
80  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
81  *
82  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
83  * must not be passed to krealloc().
84  *
85  * Return: source string if it is in .rodata section otherwise
86  * fallback to kstrdup.
87  */
kstrdup_const(const char * s,gfp_t gfp)88 const char *kstrdup_const(const char *s, gfp_t gfp)
89 {
90 	if (is_kernel_rodata((unsigned long)s))
91 		return s;
92 
93 	return kstrdup(s, gfp);
94 }
95 EXPORT_SYMBOL(kstrdup_const);
96 
97 /**
98  * kstrndup - allocate space for and copy an existing string
99  * @s: the string to duplicate
100  * @max: read at most @max chars from @s
101  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
102  *
103  * Note: Use kmemdup_nul() instead if the size is known exactly.
104  *
105  * Return: newly allocated copy of @s or %NULL in case of error
106  */
kstrndup(const char * s,size_t max,gfp_t gfp)107 char *kstrndup(const char *s, size_t max, gfp_t gfp)
108 {
109 	size_t len;
110 	char *buf;
111 
112 	if (!s)
113 		return NULL;
114 
115 	len = strnlen(s, max);
116 	buf = kmalloc_track_caller(len+1, gfp);
117 	if (buf) {
118 		memcpy(buf, s, len);
119 		buf[len] = '\0';
120 	}
121 	return buf;
122 }
123 EXPORT_SYMBOL(kstrndup);
124 
125 /**
126  * kmemdup - duplicate region of memory
127  *
128  * @src: memory region to duplicate
129  * @len: memory region length
130  * @gfp: GFP mask to use
131  *
132  * Return: newly allocated copy of @src or %NULL in case of error,
133  * result is physically contiguous. Use kfree() to free.
134  */
kmemdup_noprof(const void * src,size_t len,gfp_t gfp)135 void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
136 {
137 	void *p;
138 
139 	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
140 	if (p)
141 		memcpy(p, src, len);
142 	return p;
143 }
144 EXPORT_SYMBOL(kmemdup_noprof);
145 
146 /**
147  * kmemdup_array - duplicate a given array.
148  *
149  * @src: array to duplicate.
150  * @count: number of elements to duplicate from array.
151  * @element_size: size of each element of array.
152  * @gfp: GFP mask to use.
153  *
154  * Return: duplicated array of @src or %NULL in case of error,
155  * result is physically contiguous. Use kfree() to free.
156  */
kmemdup_array(const void * src,size_t count,size_t element_size,gfp_t gfp)157 void *kmemdup_array(const void *src, size_t count, size_t element_size, gfp_t gfp)
158 {
159 	return kmemdup(src, size_mul(element_size, count), gfp);
160 }
161 EXPORT_SYMBOL(kmemdup_array);
162 
163 /**
164  * kvmemdup - duplicate region of memory
165  *
166  * @src: memory region to duplicate
167  * @len: memory region length
168  * @gfp: GFP mask to use
169  *
170  * Return: newly allocated copy of @src or %NULL in case of error,
171  * result may be not physically contiguous. Use kvfree() to free.
172  */
kvmemdup(const void * src,size_t len,gfp_t gfp)173 void *kvmemdup(const void *src, size_t len, gfp_t gfp)
174 {
175 	void *p;
176 
177 	p = kvmalloc(len, gfp);
178 	if (p)
179 		memcpy(p, src, len);
180 	return p;
181 }
182 EXPORT_SYMBOL(kvmemdup);
183 
184 /**
185  * kmemdup_nul - Create a NUL-terminated string from unterminated data
186  * @s: The data to stringify
187  * @len: The size of the data
188  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
189  *
190  * Return: newly allocated copy of @s with NUL-termination or %NULL in
191  * case of error
192  */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)193 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
194 {
195 	char *buf;
196 
197 	if (!s)
198 		return NULL;
199 
200 	buf = kmalloc_track_caller(len + 1, gfp);
201 	if (buf) {
202 		memcpy(buf, s, len);
203 		buf[len] = '\0';
204 	}
205 	return buf;
206 }
207 EXPORT_SYMBOL(kmemdup_nul);
208 
209 static kmem_buckets *user_buckets __ro_after_init;
210 
init_user_buckets(void)211 static int __init init_user_buckets(void)
212 {
213 	user_buckets = kmem_buckets_create("memdup_user", 0, 0, INT_MAX, NULL);
214 
215 	return 0;
216 }
217 subsys_initcall(init_user_buckets);
218 
219 /**
220  * memdup_user - duplicate memory region from user space
221  *
222  * @src: source address in user space
223  * @len: number of bytes to copy
224  *
225  * Return: an ERR_PTR() on failure.  Result is physically
226  * contiguous, to be freed by kfree().
227  */
memdup_user(const void __user * src,size_t len)228 void *memdup_user(const void __user *src, size_t len)
229 {
230 	void *p;
231 
232 	p = kmem_buckets_alloc_track_caller(user_buckets, len, GFP_USER | __GFP_NOWARN);
233 	if (!p)
234 		return ERR_PTR(-ENOMEM);
235 
236 	if (copy_from_user(p, src, len)) {
237 		kfree(p);
238 		return ERR_PTR(-EFAULT);
239 	}
240 
241 	return p;
242 }
243 EXPORT_SYMBOL(memdup_user);
244 
245 /**
246  * vmemdup_user - duplicate memory region from user space
247  *
248  * @src: source address in user space
249  * @len: number of bytes to copy
250  *
251  * Return: an ERR_PTR() on failure.  Result may be not
252  * physically contiguous.  Use kvfree() to free.
253  */
vmemdup_user(const void __user * src,size_t len)254 void *vmemdup_user(const void __user *src, size_t len)
255 {
256 	void *p;
257 
258 	p = kmem_buckets_valloc(user_buckets, len, GFP_USER);
259 	if (!p)
260 		return ERR_PTR(-ENOMEM);
261 
262 	if (copy_from_user(p, src, len)) {
263 		kvfree(p);
264 		return ERR_PTR(-EFAULT);
265 	}
266 
267 	return p;
268 }
269 EXPORT_SYMBOL(vmemdup_user);
270 
271 /**
272  * strndup_user - duplicate an existing string from user space
273  * @s: The string to duplicate
274  * @n: Maximum number of bytes to copy, including the trailing NUL.
275  *
276  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
277  */
strndup_user(const char __user * s,long n)278 char *strndup_user(const char __user *s, long n)
279 {
280 	char *p;
281 	long length;
282 
283 	length = strnlen_user(s, n);
284 
285 	if (!length)
286 		return ERR_PTR(-EFAULT);
287 
288 	if (length > n)
289 		return ERR_PTR(-EINVAL);
290 
291 	p = memdup_user(s, length);
292 
293 	if (IS_ERR(p))
294 		return p;
295 
296 	p[length - 1] = '\0';
297 
298 	return p;
299 }
300 EXPORT_SYMBOL(strndup_user);
301 
302 /**
303  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
304  *
305  * @src: source address in user space
306  * @len: number of bytes to copy
307  *
308  * Return: an ERR_PTR() on failure.
309  */
memdup_user_nul(const void __user * src,size_t len)310 void *memdup_user_nul(const void __user *src, size_t len)
311 {
312 	char *p;
313 
314 	/*
315 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
316 	 * cause pagefault, which makes it pointless to use GFP_NOFS
317 	 * or GFP_ATOMIC.
318 	 */
319 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
320 	if (!p)
321 		return ERR_PTR(-ENOMEM);
322 
323 	if (copy_from_user(p, src, len)) {
324 		kfree(p);
325 		return ERR_PTR(-EFAULT);
326 	}
327 	p[len] = '\0';
328 
329 	return p;
330 }
331 EXPORT_SYMBOL(memdup_user_nul);
332 
333 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)334 int vma_is_stack_for_current(struct vm_area_struct *vma)
335 {
336 	struct task_struct * __maybe_unused t = current;
337 
338 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
339 }
340 
341 /*
342  * Change backing file, only valid to use during initial VMA setup.
343  */
vma_set_file(struct vm_area_struct * vma,struct file * file)344 void vma_set_file(struct vm_area_struct *vma, struct file *file)
345 {
346 	/* Changing an anonymous vma with this is illegal */
347 	get_file(file);
348 	swap(vma->vm_file, file);
349 	fput(file);
350 }
351 EXPORT_SYMBOL(vma_set_file);
352 
353 #ifndef STACK_RND_MASK
354 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
355 #endif
356 
randomize_stack_top(unsigned long stack_top)357 unsigned long randomize_stack_top(unsigned long stack_top)
358 {
359 	unsigned long random_variable = 0;
360 
361 	if (current->flags & PF_RANDOMIZE) {
362 		random_variable = get_random_long();
363 		random_variable &= STACK_RND_MASK;
364 		random_variable <<= __PAGE_SHIFT;
365 	}
366 #ifdef CONFIG_STACK_GROWSUP
367 	return __PAGE_ALIGN(stack_top) + random_variable;
368 #else
369 	return __PAGE_ALIGN(stack_top) - random_variable;
370 #endif
371 }
372 
373 /**
374  * randomize_page - Generate a random, page aligned address
375  * @start:	The smallest acceptable address the caller will take.
376  * @range:	The size of the area, starting at @start, within which the
377  *		random address must fall.
378  *
379  * If @start + @range would overflow, @range is capped.
380  *
381  * NOTE: Historical use of randomize_range, which this replaces, presumed that
382  * @start was already page aligned.  We now align it regardless.
383  *
384  * Return: A page aligned address within [start, start + range).  On error,
385  * @start is returned.
386  */
randomize_page(unsigned long start,unsigned long range)387 unsigned long randomize_page(unsigned long start, unsigned long range)
388 {
389 	if (__offset_in_page(start)) {
390 		range -= __PAGE_ALIGN(start) - start;
391 		start = __PAGE_ALIGN(start);
392 	}
393 
394 	if (start > ULONG_MAX - range)
395 		range = ULONG_MAX - start;
396 
397 	range >>= __PAGE_SHIFT;
398 
399 	if (range == 0)
400 		return start;
401 
402 	return start + (get_random_long() % range << __PAGE_SHIFT);
403 }
404 
405 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)406 unsigned long __weak arch_randomize_brk(struct mm_struct *mm)
407 {
408 	/* Is the current task 32bit ? */
409 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
410 		return randomize_page(mm->brk, SZ_32M);
411 
412 	return randomize_page(mm->brk, SZ_1G);
413 }
414 
arch_mmap_rnd(void)415 unsigned long arch_mmap_rnd(void)
416 {
417 	unsigned long rnd;
418 
419 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
420 	if (is_compat_task())
421 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
422 	else
423 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
424 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
425 
426 	return rnd << PAGE_SHIFT;
427 }
428 
mmap_is_legacy(struct rlimit * rlim_stack)429 static int mmap_is_legacy(struct rlimit *rlim_stack)
430 {
431 	if (current->personality & ADDR_COMPAT_LAYOUT)
432 		return 1;
433 
434 	/* On parisc the stack always grows up - so a unlimited stack should
435 	 * not be an indicator to use the legacy memory layout. */
436 	if (rlim_stack->rlim_cur == RLIM_INFINITY &&
437 		!IS_ENABLED(CONFIG_STACK_GROWSUP))
438 		return 1;
439 
440 	return sysctl_legacy_va_layout;
441 }
442 
443 /*
444  * Leave enough space between the mmap area and the stack to honour ulimit in
445  * the face of randomisation.
446  */
447 #define MIN_GAP		(SZ_128M)
448 #define MAX_GAP		(STACK_TOP / 6 * 5)
449 
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)450 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
451 {
452 #ifdef CONFIG_STACK_GROWSUP
453 	/*
454 	 * For an upwards growing stack the calculation is much simpler.
455 	 * Memory for the maximum stack size is reserved at the top of the
456 	 * task. mmap_base starts directly below the stack and grows
457 	 * downwards.
458 	 */
459 	return PAGE_ALIGN_DOWN(mmap_upper_limit(rlim_stack) - rnd);
460 #else
461 	unsigned long gap = rlim_stack->rlim_cur;
462 	unsigned long pad = stack_guard_gap;
463 
464 	/* Account for stack randomization if necessary */
465 	if (current->flags & PF_RANDOMIZE)
466 		pad += (STACK_RND_MASK << PAGE_SHIFT);
467 
468 	/* Values close to RLIM_INFINITY can overflow. */
469 	if (gap + pad > gap)
470 		gap += pad;
471 
472 	if (gap < MIN_GAP && MIN_GAP < MAX_GAP)
473 		gap = MIN_GAP;
474 	else if (gap > MAX_GAP)
475 		gap = MAX_GAP;
476 
477 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
478 #endif
479 }
480 
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)481 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
482 {
483 	unsigned long random_factor = 0UL;
484 
485 	if (current->flags & PF_RANDOMIZE)
486 		random_factor = arch_mmap_rnd();
487 
488 	if (mmap_is_legacy(rlim_stack)) {
489 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
490 		clear_bit(MMF_TOPDOWN, &mm->flags);
491 	} else {
492 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
493 		set_bit(MMF_TOPDOWN, &mm->flags);
494 	}
495 }
496 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)497 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
498 {
499 	mm->mmap_base = TASK_UNMAPPED_BASE;
500 	clear_bit(MMF_TOPDOWN, &mm->flags);
501 }
502 #endif
503 #ifdef CONFIG_MMU
504 EXPORT_SYMBOL_IF_KUNIT(arch_pick_mmap_layout);
505 #endif
506 
507 /**
508  * __account_locked_vm - account locked pages to an mm's locked_vm
509  * @mm:          mm to account against
510  * @pages:       number of pages to account
511  * @inc:         %true if @pages should be considered positive, %false if not
512  * @task:        task used to check RLIMIT_MEMLOCK
513  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
514  *
515  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
516  * that mmap_lock is held as writer.
517  *
518  * Return:
519  * * 0       on success
520  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
521  */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)522 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
523 			struct task_struct *task, bool bypass_rlim)
524 {
525 	unsigned long locked_vm, limit;
526 	int ret = 0;
527 
528 	mmap_assert_write_locked(mm);
529 
530 	locked_vm = mm->locked_vm;
531 	if (inc) {
532 		if (!bypass_rlim) {
533 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
534 			if (locked_vm + pages > limit)
535 				ret = -ENOMEM;
536 		}
537 		if (!ret)
538 			mm->locked_vm = locked_vm + pages;
539 	} else {
540 		WARN_ON_ONCE(pages > locked_vm);
541 		mm->locked_vm = locked_vm - pages;
542 	}
543 
544 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
545 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
546 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
547 		 ret ? " - exceeded" : "");
548 
549 	return ret;
550 }
551 EXPORT_SYMBOL_GPL(__account_locked_vm);
552 
553 /**
554  * account_locked_vm - account locked pages to an mm's locked_vm
555  * @mm:          mm to account against, may be NULL
556  * @pages:       number of pages to account
557  * @inc:         %true if @pages should be considered positive, %false if not
558  *
559  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
560  *
561  * Return:
562  * * 0       on success, or if mm is NULL
563  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
564  */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)565 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
566 {
567 	int ret;
568 
569 	if (pages == 0 || !mm)
570 		return 0;
571 
572 	mmap_write_lock(mm);
573 	ret = __account_locked_vm(mm, pages, inc, current,
574 				  capable(CAP_IPC_LOCK));
575 	mmap_write_unlock(mm);
576 
577 	return ret;
578 }
579 EXPORT_SYMBOL_GPL(account_locked_vm);
580 
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)581 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
582 	unsigned long len, unsigned long prot,
583 	unsigned long flag, unsigned long pgoff)
584 {
585 	unsigned long ret;
586 	struct mm_struct *mm = current->mm;
587 	unsigned long populate;
588 	LIST_HEAD(uf);
589 
590 	ret = security_mmap_file(file, prot, flag);
591 	if (!ret) {
592 		if (mmap_write_lock_killable(mm))
593 			return -EINTR;
594 		ret = do_mmap(file, addr, len, prot, flag, 0, pgoff, &populate,
595 			      &uf);
596 		mmap_write_unlock(mm);
597 		userfaultfd_unmap_complete(mm, &uf);
598 		if (populate)
599 			mm_populate(ret, populate);
600 	}
601 	trace_android_vh_check_mmap_file(file, prot, flag, ret);
602 	return ret;
603 }
604 
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)605 unsigned long vm_mmap(struct file *file, unsigned long addr,
606 	unsigned long len, unsigned long prot,
607 	unsigned long flag, unsigned long offset)
608 {
609 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
610 		return -EINVAL;
611 	if (unlikely(offset_in_page(offset)))
612 		return -EINVAL;
613 
614 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
615 }
616 EXPORT_SYMBOL(vm_mmap);
617 
kmalloc_gfp_adjust(gfp_t flags,size_t size)618 static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
619 {
620 	/*
621 	 * We want to attempt a large physically contiguous block first because
622 	 * it is less likely to fragment multiple larger blocks and therefore
623 	 * contribute to a long term fragmentation less than vmalloc fallback.
624 	 * However make sure that larger requests are not too disruptive - no
625 	 * OOM killer and no allocation failure warnings as we have a fallback.
626 	 */
627 	if (size > PAGE_SIZE) {
628 		flags |= __GFP_NOWARN;
629 
630 		if (!(flags & __GFP_RETRY_MAYFAIL))
631 			flags |= __GFP_NORETRY;
632 
633 		/* nofail semantic is implemented by the vmalloc fallback */
634 		flags &= ~__GFP_NOFAIL;
635 	}
636 
637 	trace_android_vh_adjust_kvmalloc_flags(get_order(size), &flags);
638 
639 	return flags;
640 }
641 
642 /**
643  * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
644  * failure, fall back to non-contiguous (vmalloc) allocation.
645  * @size: size of the request.
646  * @b: which set of kmalloc buckets to allocate from.
647  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
648  * @node: numa node to allocate from
649  *
650  * Uses kmalloc to get the memory but if the allocation fails then falls back
651  * to the vmalloc allocator. Use kvfree for freeing the memory.
652  *
653  * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
654  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
655  * preferable to the vmalloc fallback, due to visible performance drawbacks.
656  *
657  * Return: pointer to the allocated memory of %NULL in case of failure
658  */
__kvmalloc_node_noprof(DECL_BUCKET_PARAMS (size,b),gfp_t flags,int node)659 void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
660 {
661 	void *ret;
662 	bool use_vmalloc = false;
663 
664 	trace_android_vh_kvmalloc_node_use_vmalloc(size, &flags, &use_vmalloc);
665 	if (use_vmalloc)
666 		goto use_vmalloc_node;
667 	/*
668 	 * It doesn't really make sense to fallback to vmalloc for sub page
669 	 * requests
670 	 */
671 	ret = __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, b),
672 				    kmalloc_gfp_adjust(flags, size),
673 				    node);
674 	if (ret || size <= PAGE_SIZE)
675 		return ret;
676 
677 	/* non-sleeping allocations are not supported by vmalloc */
678 	if (!gfpflags_allow_blocking(flags))
679 		return NULL;
680 
681 	/* Don't even allow crazy sizes */
682 	if (unlikely(size > INT_MAX)) {
683 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
684 		return NULL;
685 	}
686 
687 	/*
688 	 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
689 	 * since the callers already cannot assume anything
690 	 * about the resulting pointer, and cannot play
691 	 * protection games.
692 	 */
693 use_vmalloc_node:
694 	return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
695 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
696 			node, __builtin_return_address(0));
697 }
698 EXPORT_SYMBOL(__kvmalloc_node_noprof);
699 
700 /**
701  * kvfree() - Free memory.
702  * @addr: Pointer to allocated memory.
703  *
704  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
705  * It is slightly more efficient to use kfree() or vfree() if you are certain
706  * that you know which one to use.
707  *
708  * Context: Either preemptible task context or not-NMI interrupt.
709  */
kvfree(const void * addr)710 void kvfree(const void *addr)
711 {
712 	if (is_vmalloc_addr(addr))
713 		vfree(addr);
714 	else
715 		kfree(addr);
716 }
717 EXPORT_SYMBOL(kvfree);
718 
719 /**
720  * kvfree_sensitive - Free a data object containing sensitive information.
721  * @addr: address of the data object to be freed.
722  * @len: length of the data object.
723  *
724  * Use the special memzero_explicit() function to clear the content of a
725  * kvmalloc'ed object containing sensitive data to make sure that the
726  * compiler won't optimize out the data clearing.
727  */
kvfree_sensitive(const void * addr,size_t len)728 void kvfree_sensitive(const void *addr, size_t len)
729 {
730 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
731 		memzero_explicit((void *)addr, len);
732 		kvfree(addr);
733 	}
734 }
735 EXPORT_SYMBOL(kvfree_sensitive);
736 
737 /**
738  * kvrealloc - reallocate memory; contents remain unchanged
739  * @p: object to reallocate memory for
740  * @size: the size to reallocate
741  * @flags: the flags for the page level allocator
742  *
743  * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
744  * and @p is not a %NULL pointer, the object pointed to is freed.
745  *
746  * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
747  * initial memory allocation, every subsequent call to this API for the same
748  * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
749  * __GFP_ZERO is not fully honored by this API.
750  *
751  * In any case, the contents of the object pointed to are preserved up to the
752  * lesser of the new and old sizes.
753  *
754  * This function must not be called concurrently with itself or kvfree() for the
755  * same memory allocation.
756  *
757  * Return: pointer to the allocated memory or %NULL in case of error
758  */
kvrealloc_noprof(const void * p,size_t size,gfp_t flags)759 void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
760 {
761 	void *n;
762 
763 	if (is_vmalloc_addr(p))
764 		return vrealloc_noprof(p, size, flags);
765 
766 	n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
767 	if (!n) {
768 		/* We failed to krealloc(), fall back to kvmalloc(). */
769 		n = kvmalloc_noprof(size, flags);
770 		if (!n)
771 			return NULL;
772 
773 		if (p) {
774 			/* We already know that `p` is not a vmalloc address. */
775 			kasan_disable_current();
776 			memcpy(n, kasan_reset_tag(p), ksize(p));
777 			kasan_enable_current();
778 
779 			kfree(p);
780 		}
781 	}
782 
783 	return n;
784 }
785 EXPORT_SYMBOL(kvrealloc_noprof);
786 
787 /**
788  * __vmalloc_array - allocate memory for a virtually contiguous array.
789  * @n: number of elements.
790  * @size: element size.
791  * @flags: the type of memory to allocate (see kmalloc).
792  */
__vmalloc_array_noprof(size_t n,size_t size,gfp_t flags)793 void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
794 {
795 	size_t bytes;
796 
797 	if (unlikely(check_mul_overflow(n, size, &bytes)))
798 		return NULL;
799 	return __vmalloc_noprof(bytes, flags);
800 }
801 EXPORT_SYMBOL(__vmalloc_array_noprof);
802 
803 /**
804  * vmalloc_array - allocate memory for a virtually contiguous array.
805  * @n: number of elements.
806  * @size: element size.
807  */
vmalloc_array_noprof(size_t n,size_t size)808 void *vmalloc_array_noprof(size_t n, size_t size)
809 {
810 	return __vmalloc_array_noprof(n, size, GFP_KERNEL);
811 }
812 EXPORT_SYMBOL(vmalloc_array_noprof);
813 
814 /**
815  * __vcalloc - allocate and zero memory for a virtually contiguous array.
816  * @n: number of elements.
817  * @size: element size.
818  * @flags: the type of memory to allocate (see kmalloc).
819  */
__vcalloc_noprof(size_t n,size_t size,gfp_t flags)820 void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags)
821 {
822 	return __vmalloc_array_noprof(n, size, flags | __GFP_ZERO);
823 }
824 EXPORT_SYMBOL(__vcalloc_noprof);
825 
826 /**
827  * vcalloc - allocate and zero memory for a virtually contiguous array.
828  * @n: number of elements.
829  * @size: element size.
830  */
vcalloc_noprof(size_t n,size_t size)831 void *vcalloc_noprof(size_t n, size_t size)
832 {
833 	return __vmalloc_array_noprof(n, size, GFP_KERNEL | __GFP_ZERO);
834 }
835 EXPORT_SYMBOL(vcalloc_noprof);
836 
folio_anon_vma(struct folio * folio)837 struct anon_vma *folio_anon_vma(struct folio *folio)
838 {
839 	unsigned long mapping = (unsigned long)folio->mapping;
840 
841 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
842 		return NULL;
843 	return (void *)(mapping - PAGE_MAPPING_ANON);
844 }
845 
846 /**
847  * folio_mapping - Find the mapping where this folio is stored.
848  * @folio: The folio.
849  *
850  * For folios which are in the page cache, return the mapping that this
851  * page belongs to.  Folios in the swap cache return the swap mapping
852  * this page is stored in (which is different from the mapping for the
853  * swap file or swap device where the data is stored).
854  *
855  * You can call this for folios which aren't in the swap cache or page
856  * cache and it will return NULL.
857  */
folio_mapping(struct folio * folio)858 struct address_space *folio_mapping(struct folio *folio)
859 {
860 	struct address_space *mapping;
861 
862 	/* This happens if someone calls flush_dcache_page on slab page */
863 	if (unlikely(folio_test_slab(folio)))
864 		return NULL;
865 
866 	if (unlikely(folio_test_swapcache(folio)))
867 		return swap_address_space(folio->swap);
868 
869 	mapping = folio->mapping;
870 	if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
871 		return NULL;
872 
873 	return mapping;
874 }
875 EXPORT_SYMBOL(folio_mapping);
876 
877 /**
878  * folio_copy - Copy the contents of one folio to another.
879  * @dst: Folio to copy to.
880  * @src: Folio to copy from.
881  *
882  * The bytes in the folio represented by @src are copied to @dst.
883  * Assumes the caller has validated that @dst is at least as large as @src.
884  * Can be called in atomic context for order-0 folios, but if the folio is
885  * larger, it may sleep.
886  */
folio_copy(struct folio * dst,struct folio * src)887 void folio_copy(struct folio *dst, struct folio *src)
888 {
889 	long i = 0;
890 	long nr = folio_nr_pages(src);
891 
892 	for (;;) {
893 		copy_highpage(folio_page(dst, i), folio_page(src, i));
894 		if (++i == nr)
895 			break;
896 		cond_resched();
897 	}
898 }
899 EXPORT_SYMBOL(folio_copy);
900 
folio_mc_copy(struct folio * dst,struct folio * src)901 int folio_mc_copy(struct folio *dst, struct folio *src)
902 {
903 	long nr = folio_nr_pages(src);
904 	long i = 0;
905 
906 	for (;;) {
907 		if (copy_mc_highpage(folio_page(dst, i), folio_page(src, i)))
908 			return -EHWPOISON;
909 		if (++i == nr)
910 			break;
911 		cond_resched();
912 	}
913 
914 	return 0;
915 }
916 EXPORT_SYMBOL(folio_mc_copy);
917 
918 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
919 int sysctl_overcommit_ratio __read_mostly = 50;
920 unsigned long sysctl_overcommit_kbytes __read_mostly;
921 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
922 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
923 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
924 
overcommit_ratio_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)925 int overcommit_ratio_handler(const struct ctl_table *table, int write, void *buffer,
926 		size_t *lenp, loff_t *ppos)
927 {
928 	int ret;
929 
930 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
931 	if (ret == 0 && write)
932 		sysctl_overcommit_kbytes = 0;
933 	return ret;
934 }
935 
sync_overcommit_as(struct work_struct * dummy)936 static void sync_overcommit_as(struct work_struct *dummy)
937 {
938 	percpu_counter_sync(&vm_committed_as);
939 }
940 
overcommit_policy_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)941 int overcommit_policy_handler(const struct ctl_table *table, int write, void *buffer,
942 		size_t *lenp, loff_t *ppos)
943 {
944 	struct ctl_table t;
945 	int new_policy = -1;
946 	int ret;
947 
948 	/*
949 	 * The deviation of sync_overcommit_as could be big with loose policy
950 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
951 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
952 	 * with the strict "NEVER", and to avoid possible race condition (even
953 	 * though user usually won't too frequently do the switching to policy
954 	 * OVERCOMMIT_NEVER), the switch is done in the following order:
955 	 *	1. changing the batch
956 	 *	2. sync percpu count on each CPU
957 	 *	3. switch the policy
958 	 */
959 	if (write) {
960 		t = *table;
961 		t.data = &new_policy;
962 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
963 		if (ret || new_policy == -1)
964 			return ret;
965 
966 		mm_compute_batch(new_policy);
967 		if (new_policy == OVERCOMMIT_NEVER)
968 			schedule_on_each_cpu(sync_overcommit_as);
969 		sysctl_overcommit_memory = new_policy;
970 	} else {
971 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
972 	}
973 
974 	return ret;
975 }
976 
overcommit_kbytes_handler(const struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)977 int overcommit_kbytes_handler(const struct ctl_table *table, int write, void *buffer,
978 		size_t *lenp, loff_t *ppos)
979 {
980 	int ret;
981 
982 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
983 	if (ret == 0 && write)
984 		sysctl_overcommit_ratio = 0;
985 	return ret;
986 }
987 
988 /*
989  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
990  */
vm_commit_limit(void)991 unsigned long vm_commit_limit(void)
992 {
993 	unsigned long allowed;
994 
995 	if (sysctl_overcommit_kbytes)
996 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
997 	else
998 		allowed = ((totalram_pages() - hugetlb_total_pages())
999 			   * sysctl_overcommit_ratio / 100);
1000 	allowed += total_swap_pages;
1001 
1002 	return allowed;
1003 }
1004 
1005 /*
1006  * Make sure vm_committed_as in one cacheline and not cacheline shared with
1007  * other variables. It can be updated by several CPUs frequently.
1008  */
1009 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
1010 
1011 /*
1012  * The global memory commitment made in the system can be a metric
1013  * that can be used to drive ballooning decisions when Linux is hosted
1014  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
1015  * balancing memory across competing virtual machines that are hosted.
1016  * Several metrics drive this policy engine including the guest reported
1017  * memory commitment.
1018  *
1019  * The time cost of this is very low for small platforms, and for big
1020  * platform like a 2S/36C/72T Skylake server, in worst case where
1021  * vm_committed_as's spinlock is under severe contention, the time cost
1022  * could be about 30~40 microseconds.
1023  */
vm_memory_committed(void)1024 unsigned long vm_memory_committed(void)
1025 {
1026 	return percpu_counter_sum_positive(&vm_committed_as);
1027 }
1028 EXPORT_SYMBOL_GPL(vm_memory_committed);
1029 
1030 /*
1031  * Check that a process has enough memory to allocate a new virtual
1032  * mapping. 0 means there is enough memory for the allocation to
1033  * succeed and -ENOMEM implies there is not.
1034  *
1035  * We currently support three overcommit policies, which are set via the
1036  * vm.overcommit_memory sysctl.  See Documentation/mm/overcommit-accounting.rst
1037  *
1038  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
1039  * Additional code 2002 Jul 20 by Robert Love.
1040  *
1041  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
1042  *
1043  * Note this is a helper function intended to be used by LSMs which
1044  * wish to use this logic.
1045  */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)1046 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
1047 {
1048 	long allowed;
1049 	unsigned long bytes_failed;
1050 
1051 	vm_acct_memory(pages);
1052 
1053 	/*
1054 	 * Sometimes we want to use more memory than we have
1055 	 */
1056 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
1057 		return 0;
1058 
1059 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
1060 		if (pages > totalram_pages() + total_swap_pages)
1061 			goto error;
1062 		return 0;
1063 	}
1064 
1065 	allowed = vm_commit_limit();
1066 	/*
1067 	 * Reserve some for root
1068 	 */
1069 	if (!cap_sys_admin)
1070 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1071 
1072 	/*
1073 	 * Don't let a single process grow so big a user can't recover
1074 	 */
1075 	if (mm) {
1076 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1077 
1078 		allowed -= min_t(long, mm->total_vm / 32, reserve);
1079 	}
1080 
1081 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1082 		return 0;
1083 error:
1084 	bytes_failed = pages << PAGE_SHIFT;
1085 	pr_warn_ratelimited("%s: pid: %d, comm: %s, bytes: %lu not enough memory for the allocation\n",
1086 			    __func__, current->pid, current->comm, bytes_failed);
1087 	vm_unacct_memory(pages);
1088 
1089 	return -ENOMEM;
1090 }
1091 
1092 /**
1093  * get_cmdline() - copy the cmdline value to a buffer.
1094  * @task:     the task whose cmdline value to copy.
1095  * @buffer:   the buffer to copy to.
1096  * @buflen:   the length of the buffer. Larger cmdline values are truncated
1097  *            to this length.
1098  *
1099  * Return: the size of the cmdline field copied. Note that the copy does
1100  * not guarantee an ending NULL byte.
1101  */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1102 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1103 {
1104 	int res = 0;
1105 	unsigned int len;
1106 	struct mm_struct *mm = get_task_mm(task);
1107 	unsigned long arg_start, arg_end, env_start, env_end;
1108 	if (!mm)
1109 		goto out;
1110 	if (!mm->arg_end)
1111 		goto out_mm;	/* Shh! No looking before we're done */
1112 
1113 	spin_lock(&mm->arg_lock);
1114 	arg_start = mm->arg_start;
1115 	arg_end = mm->arg_end;
1116 	env_start = mm->env_start;
1117 	env_end = mm->env_end;
1118 	spin_unlock(&mm->arg_lock);
1119 
1120 	len = arg_end - arg_start;
1121 
1122 	if (len > buflen)
1123 		len = buflen;
1124 
1125 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1126 
1127 	/*
1128 	 * If the nul at the end of args has been overwritten, then
1129 	 * assume application is using setproctitle(3).
1130 	 */
1131 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1132 		len = strnlen(buffer, res);
1133 		if (len < res) {
1134 			res = len;
1135 		} else {
1136 			len = env_end - env_start;
1137 			if (len > buflen - res)
1138 				len = buflen - res;
1139 			res += access_process_vm(task, env_start,
1140 						 buffer+res, len,
1141 						 FOLL_FORCE);
1142 			res = strnlen(buffer, res);
1143 		}
1144 	}
1145 out_mm:
1146 	mmput(mm);
1147 out:
1148 	return res;
1149 }
1150 
memcmp_pages(struct page * page1,struct page * page2)1151 int __weak memcmp_pages(struct page *page1, struct page *page2)
1152 {
1153 	char *addr1, *addr2;
1154 	int ret;
1155 
1156 	addr1 = kmap_local_page(page1);
1157 	addr2 = kmap_local_page(page2);
1158 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1159 	kunmap_local(addr2);
1160 	kunmap_local(addr1);
1161 	return ret;
1162 }
1163 
1164 #ifdef CONFIG_PRINTK
1165 /**
1166  * mem_dump_obj - Print available provenance information
1167  * @object: object for which to find provenance information.
1168  *
1169  * This function uses pr_cont(), so that the caller is expected to have
1170  * printed out whatever preamble is appropriate.  The provenance information
1171  * depends on the type of object and on how much debugging is enabled.
1172  * For example, for a slab-cache object, the slab name is printed, and,
1173  * if available, the return address and stack trace from the allocation
1174  * and last free path of that object.
1175  */
mem_dump_obj(void * object)1176 void mem_dump_obj(void *object)
1177 {
1178 	const char *type;
1179 
1180 	if (kmem_dump_obj(object))
1181 		return;
1182 
1183 	if (vmalloc_dump_obj(object))
1184 		return;
1185 
1186 	if (is_vmalloc_addr(object))
1187 		type = "vmalloc memory";
1188 	else if (virt_addr_valid(object))
1189 		type = "non-slab/vmalloc memory";
1190 	else if (object == NULL)
1191 		type = "NULL pointer";
1192 	else if (object == ZERO_SIZE_PTR)
1193 		type = "zero-size pointer";
1194 	else
1195 		type = "non-paged memory";
1196 
1197 	pr_cont(" %s\n", type);
1198 }
1199 EXPORT_SYMBOL_GPL(mem_dump_obj);
1200 #endif
1201 
1202 /*
1203  * A driver might set a page logically offline -- PageOffline() -- and
1204  * turn the page inaccessible in the hypervisor; after that, access to page
1205  * content can be fatal.
1206  *
1207  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1208  * pages after checking PageOffline(); however, these PFN walkers can race
1209  * with drivers that set PageOffline().
1210  *
1211  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1212  * synchronize with such drivers, achieving that a page cannot be set
1213  * PageOffline() while frozen.
1214  *
1215  * page_offline_begin()/page_offline_end() is used by drivers that care about
1216  * such races when setting a page PageOffline().
1217  */
1218 static DECLARE_RWSEM(page_offline_rwsem);
1219 
page_offline_freeze(void)1220 void page_offline_freeze(void)
1221 {
1222 	down_read(&page_offline_rwsem);
1223 }
1224 
page_offline_thaw(void)1225 void page_offline_thaw(void)
1226 {
1227 	up_read(&page_offline_rwsem);
1228 }
1229 
page_offline_begin(void)1230 void page_offline_begin(void)
1231 {
1232 	down_write(&page_offline_rwsem);
1233 }
1234 EXPORT_SYMBOL(page_offline_begin);
1235 
page_offline_end(void)1236 void page_offline_end(void)
1237 {
1238 	up_write(&page_offline_rwsem);
1239 }
1240 EXPORT_SYMBOL(page_offline_end);
1241 
1242 #ifndef flush_dcache_folio
flush_dcache_folio(struct folio * folio)1243 void flush_dcache_folio(struct folio *folio)
1244 {
1245 	long i, nr = folio_nr_pages(folio);
1246 
1247 	for (i = 0; i < nr; i++)
1248 		flush_dcache_page(folio_page(folio, i));
1249 }
1250 EXPORT_SYMBOL(flush_dcache_folio);
1251 #endif
1252