• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26 
27 #include <linux/uaccess.h>
28 
29 #include "internal.h"
30 #ifndef __GENKSYMS__
31 #include <trace/hooks/syscall_check.h>
32 #include <trace/hooks/mm.h>
33 #endif
34 
35 /**
36  * kfree_const - conditionally free memory
37  * @x: pointer to the memory
38  *
39  * Function calls kfree only if @x is not in .rodata section.
40  */
kfree_const(const void * x)41 void kfree_const(const void *x)
42 {
43 	if (!is_kernel_rodata((unsigned long)x))
44 		kfree(x);
45 }
46 EXPORT_SYMBOL(kfree_const);
47 
48 /**
49  * kstrdup - allocate space for and copy an existing string
50  * @s: the string to duplicate
51  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
52  *
53  * Return: newly allocated copy of @s or %NULL in case of error
54  */
kstrdup(const char * s,gfp_t gfp)55 char *kstrdup(const char *s, gfp_t gfp)
56 {
57 	size_t len;
58 	char *buf;
59 
60 	if (!s)
61 		return NULL;
62 
63 	len = strlen(s) + 1;
64 	buf = kmalloc_track_caller(len, gfp);
65 	if (buf)
66 		memcpy(buf, s, len);
67 	return buf;
68 }
69 EXPORT_SYMBOL(kstrdup);
70 
71 /**
72  * kstrdup_const - conditionally duplicate an existing const string
73  * @s: the string to duplicate
74  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
75  *
76  * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
77  * must not be passed to krealloc().
78  *
79  * Return: source string if it is in .rodata section otherwise
80  * fallback to kstrdup.
81  */
kstrdup_const(const char * s,gfp_t gfp)82 const char *kstrdup_const(const char *s, gfp_t gfp)
83 {
84 	if (is_kernel_rodata((unsigned long)s))
85 		return s;
86 
87 	return kstrdup(s, gfp);
88 }
89 EXPORT_SYMBOL(kstrdup_const);
90 
91 /**
92  * kstrndup - allocate space for and copy an existing string
93  * @s: the string to duplicate
94  * @max: read at most @max chars from @s
95  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
96  *
97  * Note: Use kmemdup_nul() instead if the size is known exactly.
98  *
99  * Return: newly allocated copy of @s or %NULL in case of error
100  */
kstrndup(const char * s,size_t max,gfp_t gfp)101 char *kstrndup(const char *s, size_t max, gfp_t gfp)
102 {
103 	size_t len;
104 	char *buf;
105 
106 	if (!s)
107 		return NULL;
108 
109 	len = strnlen(s, max);
110 	buf = kmalloc_track_caller(len+1, gfp);
111 	if (buf) {
112 		memcpy(buf, s, len);
113 		buf[len] = '\0';
114 	}
115 	return buf;
116 }
117 EXPORT_SYMBOL(kstrndup);
118 
119 /**
120  * kmemdup - duplicate region of memory
121  *
122  * @src: memory region to duplicate
123  * @len: memory region length
124  * @gfp: GFP mask to use
125  *
126  * Return: newly allocated copy of @src or %NULL in case of error
127  */
kmemdup(const void * src,size_t len,gfp_t gfp)128 void *kmemdup(const void *src, size_t len, gfp_t gfp)
129 {
130 	void *p;
131 
132 	p = kmalloc_track_caller(len, gfp);
133 	if (p)
134 		memcpy(p, src, len);
135 	return p;
136 }
137 EXPORT_SYMBOL(kmemdup);
138 
139 /**
140  * kmemdup_nul - Create a NUL-terminated string from unterminated data
141  * @s: The data to stringify
142  * @len: The size of the data
143  * @gfp: the GFP mask used in the kmalloc() call when allocating memory
144  *
145  * Return: newly allocated copy of @s with NUL-termination or %NULL in
146  * case of error
147  */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)148 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
149 {
150 	char *buf;
151 
152 	if (!s)
153 		return NULL;
154 
155 	buf = kmalloc_track_caller(len + 1, gfp);
156 	if (buf) {
157 		memcpy(buf, s, len);
158 		buf[len] = '\0';
159 	}
160 	return buf;
161 }
162 EXPORT_SYMBOL(kmemdup_nul);
163 
164 /**
165  * memdup_user - duplicate memory region from user space
166  *
167  * @src: source address in user space
168  * @len: number of bytes to copy
169  *
170  * Return: an ERR_PTR() on failure.  Result is physically
171  * contiguous, to be freed by kfree().
172  */
memdup_user(const void __user * src,size_t len)173 void *memdup_user(const void __user *src, size_t len)
174 {
175 	void *p;
176 
177 	p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
178 	if (!p)
179 		return ERR_PTR(-ENOMEM);
180 
181 	if (copy_from_user(p, src, len)) {
182 		kfree(p);
183 		return ERR_PTR(-EFAULT);
184 	}
185 
186 	return p;
187 }
188 EXPORT_SYMBOL(memdup_user);
189 
190 /**
191  * vmemdup_user - duplicate memory region from user space
192  *
193  * @src: source address in user space
194  * @len: number of bytes to copy
195  *
196  * Return: an ERR_PTR() on failure.  Result may be not
197  * physically contiguous.  Use kvfree() to free.
198  */
vmemdup_user(const void __user * src,size_t len)199 void *vmemdup_user(const void __user *src, size_t len)
200 {
201 	void *p;
202 
203 	p = kvmalloc(len, GFP_USER);
204 	if (!p)
205 		return ERR_PTR(-ENOMEM);
206 
207 	if (copy_from_user(p, src, len)) {
208 		kvfree(p);
209 		return ERR_PTR(-EFAULT);
210 	}
211 
212 	return p;
213 }
214 EXPORT_SYMBOL(vmemdup_user);
215 
216 /**
217  * strndup_user - duplicate an existing string from user space
218  * @s: The string to duplicate
219  * @n: Maximum number of bytes to copy, including the trailing NUL.
220  *
221  * Return: newly allocated copy of @s or an ERR_PTR() in case of error
222  */
strndup_user(const char __user * s,long n)223 char *strndup_user(const char __user *s, long n)
224 {
225 	char *p;
226 	long length;
227 
228 	length = strnlen_user(s, n);
229 
230 	if (!length)
231 		return ERR_PTR(-EFAULT);
232 
233 	if (length > n)
234 		return ERR_PTR(-EINVAL);
235 
236 	p = memdup_user(s, length);
237 
238 	if (IS_ERR(p))
239 		return p;
240 
241 	p[length - 1] = '\0';
242 
243 	return p;
244 }
245 EXPORT_SYMBOL(strndup_user);
246 
247 /**
248  * memdup_user_nul - duplicate memory region from user space and NUL-terminate
249  *
250  * @src: source address in user space
251  * @len: number of bytes to copy
252  *
253  * Return: an ERR_PTR() on failure.
254  */
memdup_user_nul(const void __user * src,size_t len)255 void *memdup_user_nul(const void __user *src, size_t len)
256 {
257 	char *p;
258 
259 	/*
260 	 * Always use GFP_KERNEL, since copy_from_user() can sleep and
261 	 * cause pagefault, which makes it pointless to use GFP_NOFS
262 	 * or GFP_ATOMIC.
263 	 */
264 	p = kmalloc_track_caller(len + 1, GFP_KERNEL);
265 	if (!p)
266 		return ERR_PTR(-ENOMEM);
267 
268 	if (copy_from_user(p, src, len)) {
269 		kfree(p);
270 		return ERR_PTR(-EFAULT);
271 	}
272 	p[len] = '\0';
273 
274 	return p;
275 }
276 EXPORT_SYMBOL(memdup_user_nul);
277 
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev)278 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
279 		struct vm_area_struct *prev)
280 {
281 	struct vm_area_struct *next;
282 
283 	vma->vm_prev = prev;
284 	if (prev) {
285 		next = prev->vm_next;
286 		prev->vm_next = vma;
287 	} else {
288 		next = mm->mmap;
289 		mm->mmap = vma;
290 	}
291 	vma->vm_next = next;
292 	if (next)
293 		next->vm_prev = vma;
294 }
295 
__vma_unlink_list(struct mm_struct * mm,struct vm_area_struct * vma)296 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
297 {
298 	struct vm_area_struct *prev, *next;
299 
300 	next = vma->vm_next;
301 	prev = vma->vm_prev;
302 	if (prev)
303 		prev->vm_next = next;
304 	else
305 		mm->mmap = next;
306 	if (next)
307 		next->vm_prev = prev;
308 }
309 
310 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)311 int vma_is_stack_for_current(struct vm_area_struct *vma)
312 {
313 	struct task_struct * __maybe_unused t = current;
314 
315 	return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
316 }
317 
318 /*
319  * Change backing file, only valid to use during initial VMA setup.
320  */
vma_set_file(struct vm_area_struct * vma,struct file * file)321 void vma_set_file(struct vm_area_struct *vma, struct file *file)
322 {
323 	/* Changing an anonymous vma with this is illegal */
324 	get_file(file);
325 	swap(vma->vm_file, file);
326 	fput(file);
327 }
328 EXPORT_SYMBOL(vma_set_file);
329 
330 #ifndef STACK_RND_MASK
331 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))     /* 8MB of VA */
332 #endif
333 
randomize_stack_top(unsigned long stack_top)334 unsigned long randomize_stack_top(unsigned long stack_top)
335 {
336 	unsigned long random_variable = 0;
337 
338 	if (current->flags & PF_RANDOMIZE) {
339 		random_variable = get_random_long();
340 		random_variable &= STACK_RND_MASK;
341 		random_variable <<= PAGE_SHIFT;
342 	}
343 #ifdef CONFIG_STACK_GROWSUP
344 	return PAGE_ALIGN(stack_top) + random_variable;
345 #else
346 	return PAGE_ALIGN(stack_top) - random_variable;
347 #endif
348 }
349 
350 /**
351  * randomize_page - Generate a random, page aligned address
352  * @start:	The smallest acceptable address the caller will take.
353  * @range:	The size of the area, starting at @start, within which the
354  *		random address must fall.
355  *
356  * If @start + @range would overflow, @range is capped.
357  *
358  * NOTE: Historical use of randomize_range, which this replaces, presumed that
359  * @start was already page aligned.  We now align it regardless.
360  *
361  * Return: A page aligned address within [start, start + range).  On error,
362  * @start is returned.
363  */
randomize_page(unsigned long start,unsigned long range)364 unsigned long randomize_page(unsigned long start, unsigned long range)
365 {
366 	if (!PAGE_ALIGNED(start)) {
367 		range -= PAGE_ALIGN(start) - start;
368 		start = PAGE_ALIGN(start);
369 	}
370 
371 	if (start > ULONG_MAX - range)
372 		range = ULONG_MAX - start;
373 
374 	range >>= PAGE_SHIFT;
375 
376 	if (range == 0)
377 		return start;
378 
379 	return start + (get_random_long() % range << PAGE_SHIFT);
380 }
381 
382 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)383 unsigned long arch_randomize_brk(struct mm_struct *mm)
384 {
385 	/* Is the current task 32bit ? */
386 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
387 		return randomize_page(mm->brk, SZ_32M);
388 
389 	return randomize_page(mm->brk, SZ_1G);
390 }
391 
arch_mmap_rnd(void)392 unsigned long arch_mmap_rnd(void)
393 {
394 	unsigned long rnd;
395 
396 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
397 	if (is_compat_task())
398 		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
399 	else
400 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
401 		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
402 
403 	return rnd << PAGE_SHIFT;
404 }
405 
mmap_is_legacy(struct rlimit * rlim_stack)406 static int mmap_is_legacy(struct rlimit *rlim_stack)
407 {
408 	if (current->personality & ADDR_COMPAT_LAYOUT)
409 		return 1;
410 
411 	if (rlim_stack->rlim_cur == RLIM_INFINITY)
412 		return 1;
413 
414 	return sysctl_legacy_va_layout;
415 }
416 
417 /*
418  * Leave enough space between the mmap area and the stack to honour ulimit in
419  * the face of randomisation.
420  */
421 #define MIN_GAP		(SZ_128M)
422 #define MAX_GAP		(STACK_TOP / 6 * 5)
423 
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)424 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
425 {
426 	unsigned long gap = rlim_stack->rlim_cur;
427 	unsigned long pad = stack_guard_gap;
428 
429 	/* Account for stack randomization if necessary */
430 	if (current->flags & PF_RANDOMIZE)
431 		pad += (STACK_RND_MASK << PAGE_SHIFT);
432 
433 	/* Values close to RLIM_INFINITY can overflow. */
434 	if (gap + pad > gap)
435 		gap += pad;
436 
437 	if (gap < MIN_GAP)
438 		gap = MIN_GAP;
439 	else if (gap > MAX_GAP)
440 		gap = MAX_GAP;
441 
442 	return PAGE_ALIGN(STACK_TOP - gap - rnd);
443 }
444 
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)445 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
446 {
447 	unsigned long random_factor = 0UL;
448 
449 	if (current->flags & PF_RANDOMIZE)
450 		random_factor = arch_mmap_rnd();
451 
452 	if (mmap_is_legacy(rlim_stack)) {
453 		mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
454 		mm->get_unmapped_area = arch_get_unmapped_area;
455 	} else {
456 		mm->mmap_base = mmap_base(random_factor, rlim_stack);
457 		mm->get_unmapped_area = arch_get_unmapped_area_topdown;
458 	}
459 }
460 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)461 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
462 {
463 	mm->mmap_base = TASK_UNMAPPED_BASE;
464 	mm->get_unmapped_area = arch_get_unmapped_area;
465 }
466 #endif
467 
468 /**
469  * __account_locked_vm - account locked pages to an mm's locked_vm
470  * @mm:          mm to account against
471  * @pages:       number of pages to account
472  * @inc:         %true if @pages should be considered positive, %false if not
473  * @task:        task used to check RLIMIT_MEMLOCK
474  * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
475  *
476  * Assumes @task and @mm are valid (i.e. at least one reference on each), and
477  * that mmap_lock is held as writer.
478  *
479  * Return:
480  * * 0       on success
481  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
482  */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)483 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
484 			struct task_struct *task, bool bypass_rlim)
485 {
486 	unsigned long locked_vm, limit;
487 	int ret = 0;
488 
489 	mmap_assert_write_locked(mm);
490 
491 	locked_vm = mm->locked_vm;
492 	if (inc) {
493 		if (!bypass_rlim) {
494 			limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
495 			if (locked_vm + pages > limit)
496 				ret = -ENOMEM;
497 		}
498 		if (!ret)
499 			mm->locked_vm = locked_vm + pages;
500 	} else {
501 		WARN_ON_ONCE(pages > locked_vm);
502 		mm->locked_vm = locked_vm - pages;
503 	}
504 
505 	pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
506 		 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
507 		 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
508 		 ret ? " - exceeded" : "");
509 
510 	return ret;
511 }
512 EXPORT_SYMBOL_GPL(__account_locked_vm);
513 
514 /**
515  * account_locked_vm - account locked pages to an mm's locked_vm
516  * @mm:          mm to account against, may be NULL
517  * @pages:       number of pages to account
518  * @inc:         %true if @pages should be considered positive, %false if not
519  *
520  * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
521  *
522  * Return:
523  * * 0       on success, or if mm is NULL
524  * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
525  */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)526 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
527 {
528 	int ret;
529 
530 	if (pages == 0 || !mm)
531 		return 0;
532 
533 	mmap_write_lock(mm);
534 	ret = __account_locked_vm(mm, pages, inc, current,
535 				  capable(CAP_IPC_LOCK));
536 	mmap_write_unlock(mm);
537 
538 	return ret;
539 }
540 EXPORT_SYMBOL_GPL(account_locked_vm);
541 
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)542 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
543 	unsigned long len, unsigned long prot,
544 	unsigned long flag, unsigned long pgoff)
545 {
546 	unsigned long ret;
547 	struct mm_struct *mm = current->mm;
548 	unsigned long populate;
549 	LIST_HEAD(uf);
550 
551 	ret = security_mmap_file(file, prot, flag);
552 	if (!ret) {
553 		if (mmap_write_lock_killable(mm))
554 			return -EINTR;
555 		ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
556 			      &uf);
557 		mmap_write_unlock(mm);
558 		userfaultfd_unmap_complete(mm, &uf);
559 		if (populate)
560 			mm_populate(ret, populate);
561 	}
562 	trace_android_vh_check_mmap_file(file, prot, flag, ret);
563 	return ret;
564 }
565 
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)566 unsigned long vm_mmap(struct file *file, unsigned long addr,
567 	unsigned long len, unsigned long prot,
568 	unsigned long flag, unsigned long offset)
569 {
570 	if (unlikely(offset + PAGE_ALIGN(len) < offset))
571 		return -EINVAL;
572 	if (unlikely(offset_in_page(offset)))
573 		return -EINVAL;
574 
575 	return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
576 }
577 EXPORT_SYMBOL(vm_mmap);
578 
579 /**
580  * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
581  * failure, fall back to non-contiguous (vmalloc) allocation.
582  * @size: size of the request.
583  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
584  * @node: numa node to allocate from
585  *
586  * Uses kmalloc to get the memory but if the allocation fails then falls back
587  * to the vmalloc allocator. Use kvfree for freeing the memory.
588  *
589  * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
590  * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
591  * preferable to the vmalloc fallback, due to visible performance drawbacks.
592  *
593  * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
594  * fall back to vmalloc.
595  *
596  * Return: pointer to the allocated memory of %NULL in case of failure
597  */
kvmalloc_node(size_t size,gfp_t flags,int node)598 void *kvmalloc_node(size_t size, gfp_t flags, int node)
599 {
600 	gfp_t kmalloc_flags = flags;
601 	void *ret;
602 	bool use_vmalloc = false;
603 
604 	/*
605 	 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
606 	 * so the given set of flags has to be compatible.
607 	 */
608 	if ((flags & GFP_KERNEL) != GFP_KERNEL)
609 		return kmalloc_node(size, flags, node);
610 
611 	trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
612 	if (use_vmalloc)
613 		goto use_vmalloc_node;
614 	/*
615 	 * We want to attempt a large physically contiguous block first because
616 	 * it is less likely to fragment multiple larger blocks and therefore
617 	 * contribute to a long term fragmentation less than vmalloc fallback.
618 	 * However make sure that larger requests are not too disruptive - no
619 	 * OOM killer and no allocation failure warnings as we have a fallback.
620 	 */
621 	if (size > PAGE_SIZE) {
622 		kmalloc_flags |= __GFP_NOWARN;
623 
624 		if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
625 			kmalloc_flags |= __GFP_NORETRY;
626 	}
627 
628 	ret = kmalloc_node(size, kmalloc_flags, node);
629 
630 	/*
631 	 * It doesn't really make sense to fallback to vmalloc for sub page
632 	 * requests
633 	 */
634 	if (ret || size <= PAGE_SIZE)
635 		return ret;
636 
637 	/* Don't even allow crazy sizes */
638 	if (unlikely(size > INT_MAX)) {
639 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
640 		return NULL;
641 	}
642 
643 use_vmalloc_node:
644 	return __vmalloc_node(size, 1, flags, node,
645 			__builtin_return_address(0));
646 }
647 EXPORT_SYMBOL(kvmalloc_node);
648 
649 /**
650  * kvfree() - Free memory.
651  * @addr: Pointer to allocated memory.
652  *
653  * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
654  * It is slightly more efficient to use kfree() or vfree() if you are certain
655  * that you know which one to use.
656  *
657  * Context: Either preemptible task context or not-NMI interrupt.
658  */
kvfree(const void * addr)659 void kvfree(const void *addr)
660 {
661 	if (is_vmalloc_addr(addr))
662 		vfree(addr);
663 	else
664 		kfree(addr);
665 }
666 EXPORT_SYMBOL(kvfree);
667 
668 /**
669  * kvfree_sensitive - Free a data object containing sensitive information.
670  * @addr: address of the data object to be freed.
671  * @len: length of the data object.
672  *
673  * Use the special memzero_explicit() function to clear the content of a
674  * kvmalloc'ed object containing sensitive data to make sure that the
675  * compiler won't optimize out the data clearing.
676  */
kvfree_sensitive(const void * addr,size_t len)677 void kvfree_sensitive(const void *addr, size_t len)
678 {
679 	if (likely(!ZERO_OR_NULL_PTR(addr))) {
680 		memzero_explicit((void *)addr, len);
681 		kvfree(addr);
682 	}
683 }
684 EXPORT_SYMBOL(kvfree_sensitive);
685 
kvrealloc(const void * p,size_t oldsize,size_t newsize,gfp_t flags)686 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
687 {
688 	void *newp;
689 
690 	if (oldsize >= newsize)
691 		return (void *)p;
692 	newp = kvmalloc(newsize, flags);
693 	if (!newp)
694 		return NULL;
695 	memcpy(newp, p, oldsize);
696 	kvfree(p);
697 	return newp;
698 }
699 EXPORT_SYMBOL(kvrealloc);
700 
__page_rmapping(struct page * page)701 static inline void *__page_rmapping(struct page *page)
702 {
703 	unsigned long mapping;
704 
705 	mapping = (unsigned long)page->mapping;
706 	mapping &= ~PAGE_MAPPING_FLAGS;
707 
708 	return (void *)mapping;
709 }
710 
711 /**
712  * __vmalloc_array - allocate memory for a virtually contiguous array.
713  * @n: number of elements.
714  * @size: element size.
715  * @flags: the type of memory to allocate (see kmalloc).
716  */
__vmalloc_array(size_t n,size_t size,gfp_t flags)717 void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
718 {
719 	size_t bytes;
720 
721 	if (unlikely(check_mul_overflow(n, size, &bytes)))
722 		return NULL;
723 	return __vmalloc(bytes, flags);
724 }
725 EXPORT_SYMBOL(__vmalloc_array);
726 
727 /**
728  * vmalloc_array - allocate memory for a virtually contiguous array.
729  * @n: number of elements.
730  * @size: element size.
731  */
vmalloc_array(size_t n,size_t size)732 void *vmalloc_array(size_t n, size_t size)
733 {
734 	return __vmalloc_array(n, size, GFP_KERNEL);
735 }
736 EXPORT_SYMBOL(vmalloc_array);
737 
738 /**
739  * __vcalloc - allocate and zero memory for a virtually contiguous array.
740  * @n: number of elements.
741  * @size: element size.
742  * @flags: the type of memory to allocate (see kmalloc).
743  */
__vcalloc(size_t n,size_t size,gfp_t flags)744 void *__vcalloc(size_t n, size_t size, gfp_t flags)
745 {
746 	return __vmalloc_array(n, size, flags | __GFP_ZERO);
747 }
748 EXPORT_SYMBOL(__vcalloc);
749 
750 /**
751  * vcalloc - allocate and zero memory for a virtually contiguous array.
752  * @n: number of elements.
753  * @size: element size.
754  */
vcalloc(size_t n,size_t size)755 void *vcalloc(size_t n, size_t size)
756 {
757 	return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
758 }
759 EXPORT_SYMBOL(vcalloc);
760 
761 /* Neutral page->mapping pointer to address_space or anon_vma or other */
page_rmapping(struct page * page)762 void *page_rmapping(struct page *page)
763 {
764 	page = compound_head(page);
765 	return __page_rmapping(page);
766 }
767 
768 /*
769  * Return true if this page is mapped into pagetables.
770  * For compound page it returns true if any subpage of compound page is mapped.
771  */
page_mapped(struct page * page)772 bool page_mapped(struct page *page)
773 {
774 	int i;
775 
776 	if (likely(!PageCompound(page)))
777 		return atomic_read(&page->_mapcount) >= 0;
778 	page = compound_head(page);
779 	if (atomic_read(compound_mapcount_ptr(page)) >= 0)
780 		return true;
781 	if (PageHuge(page))
782 		return false;
783 	for (i = 0; i < compound_nr(page); i++) {
784 		if (atomic_read(&page[i]._mapcount) >= 0)
785 			return true;
786 	}
787 	return false;
788 }
789 EXPORT_SYMBOL(page_mapped);
790 
page_anon_vma(struct page * page)791 struct anon_vma *page_anon_vma(struct page *page)
792 {
793 	unsigned long mapping;
794 
795 	page = compound_head(page);
796 	mapping = (unsigned long)page->mapping;
797 	if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
798 		return NULL;
799 	return __page_rmapping(page);
800 }
801 
page_mapping(struct page * page)802 struct address_space *page_mapping(struct page *page)
803 {
804 	struct address_space *mapping;
805 
806 	page = compound_head(page);
807 
808 	/* This happens if someone calls flush_dcache_page on slab page */
809 	if (unlikely(PageSlab(page)))
810 		return NULL;
811 
812 	if (unlikely(PageSwapCache(page))) {
813 		swp_entry_t entry;
814 
815 		entry.val = page_private(page);
816 		return swap_address_space(entry);
817 	}
818 
819 	mapping = page->mapping;
820 	if ((unsigned long)mapping & PAGE_MAPPING_ANON)
821 		return NULL;
822 
823 	return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
824 }
825 EXPORT_SYMBOL(page_mapping);
826 
827 /* Slow path of page_mapcount() for compound pages */
__page_mapcount(struct page * page)828 int __page_mapcount(struct page *page)
829 {
830 	int ret;
831 
832 	ret = atomic_read(&page->_mapcount) + 1;
833 	/*
834 	 * For file THP page->_mapcount contains total number of mapping
835 	 * of the page: no need to look into compound_mapcount.
836 	 */
837 	if (!PageAnon(page) && !PageHuge(page))
838 		return ret;
839 	page = compound_head(page);
840 	ret += atomic_read(compound_mapcount_ptr(page)) + 1;
841 	if (PageDoubleMap(page))
842 		ret--;
843 	return ret;
844 }
845 EXPORT_SYMBOL_GPL(__page_mapcount);
846 
copy_huge_page(struct page * dst,struct page * src)847 void copy_huge_page(struct page *dst, struct page *src)
848 {
849 	unsigned i, nr = compound_nr(src);
850 
851 	for (i = 0; i < nr; i++) {
852 		cond_resched();
853 		copy_highpage(nth_page(dst, i), nth_page(src, i));
854 	}
855 }
856 
857 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
858 int sysctl_overcommit_ratio __read_mostly = 50;
859 unsigned long sysctl_overcommit_kbytes __read_mostly;
860 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
861 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
862 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
863 
overcommit_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)864 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
865 		size_t *lenp, loff_t *ppos)
866 {
867 	int ret;
868 
869 	ret = proc_dointvec(table, write, buffer, lenp, ppos);
870 	if (ret == 0 && write)
871 		sysctl_overcommit_kbytes = 0;
872 	return ret;
873 }
874 
sync_overcommit_as(struct work_struct * dummy)875 static void sync_overcommit_as(struct work_struct *dummy)
876 {
877 	percpu_counter_sync(&vm_committed_as);
878 }
879 
overcommit_policy_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)880 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
881 		size_t *lenp, loff_t *ppos)
882 {
883 	struct ctl_table t;
884 	int new_policy = -1;
885 	int ret;
886 
887 	/*
888 	 * The deviation of sync_overcommit_as could be big with loose policy
889 	 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
890 	 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
891 	 * with the strict "NEVER", and to avoid possible race condition (even
892 	 * though user usually won't too frequently do the switching to policy
893 	 * OVERCOMMIT_NEVER), the switch is done in the following order:
894 	 *	1. changing the batch
895 	 *	2. sync percpu count on each CPU
896 	 *	3. switch the policy
897 	 */
898 	if (write) {
899 		t = *table;
900 		t.data = &new_policy;
901 		ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
902 		if (ret || new_policy == -1)
903 			return ret;
904 
905 		mm_compute_batch(new_policy);
906 		if (new_policy == OVERCOMMIT_NEVER)
907 			schedule_on_each_cpu(sync_overcommit_as);
908 		sysctl_overcommit_memory = new_policy;
909 	} else {
910 		ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
911 	}
912 
913 	return ret;
914 }
915 
overcommit_kbytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)916 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
917 		size_t *lenp, loff_t *ppos)
918 {
919 	int ret;
920 
921 	ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
922 	if (ret == 0 && write)
923 		sysctl_overcommit_ratio = 0;
924 	return ret;
925 }
926 
927 /*
928  * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
929  */
vm_commit_limit(void)930 unsigned long vm_commit_limit(void)
931 {
932 	unsigned long allowed;
933 
934 	if (sysctl_overcommit_kbytes)
935 		allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
936 	else
937 		allowed = ((totalram_pages() - hugetlb_total_pages())
938 			   * sysctl_overcommit_ratio / 100);
939 	allowed += total_swap_pages;
940 
941 	return allowed;
942 }
943 
944 /*
945  * Make sure vm_committed_as in one cacheline and not cacheline shared with
946  * other variables. It can be updated by several CPUs frequently.
947  */
948 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
949 
950 /*
951  * The global memory commitment made in the system can be a metric
952  * that can be used to drive ballooning decisions when Linux is hosted
953  * as a guest. On Hyper-V, the host implements a policy engine for dynamically
954  * balancing memory across competing virtual machines that are hosted.
955  * Several metrics drive this policy engine including the guest reported
956  * memory commitment.
957  *
958  * The time cost of this is very low for small platforms, and for big
959  * platform like a 2S/36C/72T Skylake server, in worst case where
960  * vm_committed_as's spinlock is under severe contention, the time cost
961  * could be about 30~40 microseconds.
962  */
vm_memory_committed(void)963 unsigned long vm_memory_committed(void)
964 {
965 	return percpu_counter_sum_positive(&vm_committed_as);
966 }
967 EXPORT_SYMBOL_GPL(vm_memory_committed);
968 
969 /*
970  * Check that a process has enough memory to allocate a new virtual
971  * mapping. 0 means there is enough memory for the allocation to
972  * succeed and -ENOMEM implies there is not.
973  *
974  * We currently support three overcommit policies, which are set via the
975  * vm.overcommit_memory sysctl.  See Documentation/vm/overcommit-accounting.rst
976  *
977  * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
978  * Additional code 2002 Jul 20 by Robert Love.
979  *
980  * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
981  *
982  * Note this is a helper function intended to be used by LSMs which
983  * wish to use this logic.
984  */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)985 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
986 {
987 	long allowed;
988 
989 	vm_acct_memory(pages);
990 
991 	/*
992 	 * Sometimes we want to use more memory than we have
993 	 */
994 	if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
995 		return 0;
996 
997 	if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
998 		if (pages > totalram_pages() + total_swap_pages)
999 			goto error;
1000 		return 0;
1001 	}
1002 
1003 	allowed = vm_commit_limit();
1004 	/*
1005 	 * Reserve some for root
1006 	 */
1007 	if (!cap_sys_admin)
1008 		allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
1009 
1010 	/*
1011 	 * Don't let a single process grow so big a user can't recover
1012 	 */
1013 	if (mm) {
1014 		long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1015 
1016 		allowed -= min_t(long, mm->total_vm / 32, reserve);
1017 	}
1018 
1019 	if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1020 		return 0;
1021 error:
1022 	vm_unacct_memory(pages);
1023 
1024 	return -ENOMEM;
1025 }
1026 
1027 /**
1028  * get_cmdline() - copy the cmdline value to a buffer.
1029  * @task:     the task whose cmdline value to copy.
1030  * @buffer:   the buffer to copy to.
1031  * @buflen:   the length of the buffer. Larger cmdline values are truncated
1032  *            to this length.
1033  *
1034  * Return: the size of the cmdline field copied. Note that the copy does
1035  * not guarantee an ending NULL byte.
1036  */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1037 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1038 {
1039 	int res = 0;
1040 	unsigned int len;
1041 	struct mm_struct *mm = get_task_mm(task);
1042 	unsigned long arg_start, arg_end, env_start, env_end;
1043 	if (!mm)
1044 		goto out;
1045 	if (!mm->arg_end)
1046 		goto out_mm;	/* Shh! No looking before we're done */
1047 
1048 	spin_lock(&mm->arg_lock);
1049 	arg_start = mm->arg_start;
1050 	arg_end = mm->arg_end;
1051 	env_start = mm->env_start;
1052 	env_end = mm->env_end;
1053 	spin_unlock(&mm->arg_lock);
1054 
1055 	len = arg_end - arg_start;
1056 
1057 	if (len > buflen)
1058 		len = buflen;
1059 
1060 	res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1061 
1062 	/*
1063 	 * If the nul at the end of args has been overwritten, then
1064 	 * assume application is using setproctitle(3).
1065 	 */
1066 	if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1067 		len = strnlen(buffer, res);
1068 		if (len < res) {
1069 			res = len;
1070 		} else {
1071 			len = env_end - env_start;
1072 			if (len > buflen - res)
1073 				len = buflen - res;
1074 			res += access_process_vm(task, env_start,
1075 						 buffer+res, len,
1076 						 FOLL_FORCE);
1077 			res = strnlen(buffer, res);
1078 		}
1079 	}
1080 out_mm:
1081 	mmput(mm);
1082 out:
1083 	return res;
1084 }
1085 
memcmp_pages(struct page * page1,struct page * page2)1086 int __weak memcmp_pages(struct page *page1, struct page *page2)
1087 {
1088 	char *addr1, *addr2;
1089 	int ret;
1090 
1091 	addr1 = kmap_atomic(page1);
1092 	addr2 = kmap_atomic(page2);
1093 	ret = memcmp(addr1, addr2, PAGE_SIZE);
1094 	kunmap_atomic(addr2);
1095 	kunmap_atomic(addr1);
1096 	return ret;
1097 }
1098 
1099 #ifdef CONFIG_PRINTK
1100 /**
1101  * mem_dump_obj - Print available provenance information
1102  * @object: object for which to find provenance information.
1103  *
1104  * This function uses pr_cont(), so that the caller is expected to have
1105  * printed out whatever preamble is appropriate.  The provenance information
1106  * depends on the type of object and on how much debugging is enabled.
1107  * For example, for a slab-cache object, the slab name is printed, and,
1108  * if available, the return address and stack trace from the allocation
1109  * and last free path of that object.
1110  */
mem_dump_obj(void * object)1111 void mem_dump_obj(void *object)
1112 {
1113 	const char *type;
1114 
1115 	if (kmem_valid_obj(object)) {
1116 		kmem_dump_obj(object);
1117 		return;
1118 	}
1119 
1120 	if (vmalloc_dump_obj(object))
1121 		return;
1122 
1123 	if (is_vmalloc_addr(object))
1124 		type = "vmalloc memory";
1125 	else if (virt_addr_valid(object))
1126 		type = "non-slab/vmalloc memory";
1127 	else if (object == NULL)
1128 		type = "NULL pointer";
1129 	else if (object == ZERO_SIZE_PTR)
1130 		type = "zero-size pointer";
1131 	else
1132 		type = "non-paged memory";
1133 
1134 	pr_cont(" %s\n", type);
1135 }
1136 EXPORT_SYMBOL_GPL(mem_dump_obj);
1137 #endif
1138 
1139 /*
1140  * A driver might set a page logically offline -- PageOffline() -- and
1141  * turn the page inaccessible in the hypervisor; after that, access to page
1142  * content can be fatal.
1143  *
1144  * Some special PFN walkers -- i.e., /proc/kcore -- read content of random
1145  * pages after checking PageOffline(); however, these PFN walkers can race
1146  * with drivers that set PageOffline().
1147  *
1148  * page_offline_freeze()/page_offline_thaw() allows for a subsystem to
1149  * synchronize with such drivers, achieving that a page cannot be set
1150  * PageOffline() while frozen.
1151  *
1152  * page_offline_begin()/page_offline_end() is used by drivers that care about
1153  * such races when setting a page PageOffline().
1154  */
1155 static DECLARE_RWSEM(page_offline_rwsem);
1156 
page_offline_freeze(void)1157 void page_offline_freeze(void)
1158 {
1159 	down_read(&page_offline_rwsem);
1160 }
1161 
page_offline_thaw(void)1162 void page_offline_thaw(void)
1163 {
1164 	up_read(&page_offline_rwsem);
1165 }
1166 
page_offline_begin(void)1167 void page_offline_begin(void)
1168 {
1169 	down_write(&page_offline_rwsem);
1170 }
1171 EXPORT_SYMBOL(page_offline_begin);
1172 
page_offline_end(void)1173 void page_offline_end(void)
1174 {
1175 	up_write(&page_offline_rwsem);
1176 }
1177 EXPORT_SYMBOL(page_offline_end);
1178