1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/mm.h>
3 #include <linux/slab.h>
4 #include <linux/string.h>
5 #include <linux/compiler.h>
6 #include <linux/export.h>
7 #include <linux/err.h>
8 #include <linux/sched.h>
9 #include <linux/sched/mm.h>
10 #include <linux/sched/signal.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/security.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mman.h>
16 #include <linux/hugetlb.h>
17 #include <linux/vmalloc.h>
18 #include <linux/userfaultfd_k.h>
19 #include <linux/elf.h>
20 #include <linux/elf-randomize.h>
21 #include <linux/personality.h>
22 #include <linux/random.h>
23 #include <linux/processor.h>
24 #include <linux/sizes.h>
25 #include <linux/compat.h>
26
27 #include <linux/uaccess.h>
28
29 #include "internal.h"
30 #ifndef __GENKSYMS__
31 #include <trace/hooks/syscall_check.h>
32 #include <trace/hooks/mm.h>
33 #endif
34
35 /**
36 * kfree_const - conditionally free memory
37 * @x: pointer to the memory
38 *
39 * Function calls kfree only if @x is not in .rodata section.
40 */
kfree_const(const void * x)41 void kfree_const(const void *x)
42 {
43 if (!is_kernel_rodata((unsigned long)x))
44 kfree(x);
45 }
46 EXPORT_SYMBOL(kfree_const);
47
48 /**
49 * kstrdup - allocate space for and copy an existing string
50 * @s: the string to duplicate
51 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
52 *
53 * Return: newly allocated copy of @s or %NULL in case of error
54 */
kstrdup(const char * s,gfp_t gfp)55 char *kstrdup(const char *s, gfp_t gfp)
56 {
57 size_t len;
58 char *buf;
59
60 if (!s)
61 return NULL;
62
63 len = strlen(s) + 1;
64 buf = kmalloc_track_caller(len, gfp);
65 if (buf)
66 memcpy(buf, s, len);
67 return buf;
68 }
69 EXPORT_SYMBOL(kstrdup);
70
71 /**
72 * kstrdup_const - conditionally duplicate an existing const string
73 * @s: the string to duplicate
74 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
75 *
76 * Note: Strings allocated by kstrdup_const should be freed by kfree_const and
77 * must not be passed to krealloc().
78 *
79 * Return: source string if it is in .rodata section otherwise
80 * fallback to kstrdup.
81 */
kstrdup_const(const char * s,gfp_t gfp)82 const char *kstrdup_const(const char *s, gfp_t gfp)
83 {
84 if (is_kernel_rodata((unsigned long)s))
85 return s;
86
87 return kstrdup(s, gfp);
88 }
89 EXPORT_SYMBOL(kstrdup_const);
90
91 /**
92 * kstrndup - allocate space for and copy an existing string
93 * @s: the string to duplicate
94 * @max: read at most @max chars from @s
95 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
96 *
97 * Note: Use kmemdup_nul() instead if the size is known exactly.
98 *
99 * Return: newly allocated copy of @s or %NULL in case of error
100 */
kstrndup(const char * s,size_t max,gfp_t gfp)101 char *kstrndup(const char *s, size_t max, gfp_t gfp)
102 {
103 size_t len;
104 char *buf;
105
106 if (!s)
107 return NULL;
108
109 len = strnlen(s, max);
110 buf = kmalloc_track_caller(len+1, gfp);
111 if (buf) {
112 memcpy(buf, s, len);
113 buf[len] = '\0';
114 }
115 return buf;
116 }
117 EXPORT_SYMBOL(kstrndup);
118
119 /**
120 * kmemdup - duplicate region of memory
121 *
122 * @src: memory region to duplicate
123 * @len: memory region length
124 * @gfp: GFP mask to use
125 *
126 * Return: newly allocated copy of @src or %NULL in case of error
127 */
kmemdup(const void * src,size_t len,gfp_t gfp)128 void *kmemdup(const void *src, size_t len, gfp_t gfp)
129 {
130 void *p;
131
132 p = kmalloc_track_caller(len, gfp);
133 if (p)
134 memcpy(p, src, len);
135 return p;
136 }
137 EXPORT_SYMBOL(kmemdup);
138
139 /**
140 * kmemdup_nul - Create a NUL-terminated string from unterminated data
141 * @s: The data to stringify
142 * @len: The size of the data
143 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
144 *
145 * Return: newly allocated copy of @s with NUL-termination or %NULL in
146 * case of error
147 */
kmemdup_nul(const char * s,size_t len,gfp_t gfp)148 char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
149 {
150 char *buf;
151
152 if (!s)
153 return NULL;
154
155 buf = kmalloc_track_caller(len + 1, gfp);
156 if (buf) {
157 memcpy(buf, s, len);
158 buf[len] = '\0';
159 }
160 return buf;
161 }
162 EXPORT_SYMBOL(kmemdup_nul);
163
164 /**
165 * memdup_user - duplicate memory region from user space
166 *
167 * @src: source address in user space
168 * @len: number of bytes to copy
169 *
170 * Return: an ERR_PTR() on failure. Result is physically
171 * contiguous, to be freed by kfree().
172 */
memdup_user(const void __user * src,size_t len)173 void *memdup_user(const void __user *src, size_t len)
174 {
175 void *p;
176
177 p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN);
178 if (!p)
179 return ERR_PTR(-ENOMEM);
180
181 if (copy_from_user(p, src, len)) {
182 kfree(p);
183 return ERR_PTR(-EFAULT);
184 }
185
186 return p;
187 }
188 EXPORT_SYMBOL(memdup_user);
189
190 /**
191 * vmemdup_user - duplicate memory region from user space
192 *
193 * @src: source address in user space
194 * @len: number of bytes to copy
195 *
196 * Return: an ERR_PTR() on failure. Result may be not
197 * physically contiguous. Use kvfree() to free.
198 */
vmemdup_user(const void __user * src,size_t len)199 void *vmemdup_user(const void __user *src, size_t len)
200 {
201 void *p;
202
203 p = kvmalloc(len, GFP_USER);
204 if (!p)
205 return ERR_PTR(-ENOMEM);
206
207 if (copy_from_user(p, src, len)) {
208 kvfree(p);
209 return ERR_PTR(-EFAULT);
210 }
211
212 return p;
213 }
214 EXPORT_SYMBOL(vmemdup_user);
215
216 /**
217 * strndup_user - duplicate an existing string from user space
218 * @s: The string to duplicate
219 * @n: Maximum number of bytes to copy, including the trailing NUL.
220 *
221 * Return: newly allocated copy of @s or an ERR_PTR() in case of error
222 */
strndup_user(const char __user * s,long n)223 char *strndup_user(const char __user *s, long n)
224 {
225 char *p;
226 long length;
227
228 length = strnlen_user(s, n);
229
230 if (!length)
231 return ERR_PTR(-EFAULT);
232
233 if (length > n)
234 return ERR_PTR(-EINVAL);
235
236 p = memdup_user(s, length);
237
238 if (IS_ERR(p))
239 return p;
240
241 p[length - 1] = '\0';
242
243 return p;
244 }
245 EXPORT_SYMBOL(strndup_user);
246
247 /**
248 * memdup_user_nul - duplicate memory region from user space and NUL-terminate
249 *
250 * @src: source address in user space
251 * @len: number of bytes to copy
252 *
253 * Return: an ERR_PTR() on failure.
254 */
memdup_user_nul(const void __user * src,size_t len)255 void *memdup_user_nul(const void __user *src, size_t len)
256 {
257 char *p;
258
259 /*
260 * Always use GFP_KERNEL, since copy_from_user() can sleep and
261 * cause pagefault, which makes it pointless to use GFP_NOFS
262 * or GFP_ATOMIC.
263 */
264 p = kmalloc_track_caller(len + 1, GFP_KERNEL);
265 if (!p)
266 return ERR_PTR(-ENOMEM);
267
268 if (copy_from_user(p, src, len)) {
269 kfree(p);
270 return ERR_PTR(-EFAULT);
271 }
272 p[len] = '\0';
273
274 return p;
275 }
276 EXPORT_SYMBOL(memdup_user_nul);
277
__vma_link_list(struct mm_struct * mm,struct vm_area_struct * vma,struct vm_area_struct * prev)278 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
279 struct vm_area_struct *prev)
280 {
281 struct vm_area_struct *next;
282
283 vma->vm_prev = prev;
284 if (prev) {
285 next = prev->vm_next;
286 prev->vm_next = vma;
287 } else {
288 next = mm->mmap;
289 mm->mmap = vma;
290 }
291 vma->vm_next = next;
292 if (next)
293 next->vm_prev = vma;
294 }
295
__vma_unlink_list(struct mm_struct * mm,struct vm_area_struct * vma)296 void __vma_unlink_list(struct mm_struct *mm, struct vm_area_struct *vma)
297 {
298 struct vm_area_struct *prev, *next;
299
300 next = vma->vm_next;
301 prev = vma->vm_prev;
302 if (prev)
303 prev->vm_next = next;
304 else
305 mm->mmap = next;
306 if (next)
307 next->vm_prev = prev;
308 }
309
310 /* Check if the vma is being used as a stack by this task */
vma_is_stack_for_current(struct vm_area_struct * vma)311 int vma_is_stack_for_current(struct vm_area_struct *vma)
312 {
313 struct task_struct * __maybe_unused t = current;
314
315 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
316 }
317
318 #ifndef STACK_RND_MASK
319 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
320 #endif
321
randomize_stack_top(unsigned long stack_top)322 unsigned long randomize_stack_top(unsigned long stack_top)
323 {
324 unsigned long random_variable = 0;
325
326 if (current->flags & PF_RANDOMIZE) {
327 random_variable = get_random_long();
328 random_variable &= STACK_RND_MASK;
329 random_variable <<= PAGE_SHIFT;
330 }
331 #ifdef CONFIG_STACK_GROWSUP
332 return PAGE_ALIGN(stack_top) + random_variable;
333 #else
334 return PAGE_ALIGN(stack_top) - random_variable;
335 #endif
336 }
337
338 /**
339 * randomize_page - Generate a random, page aligned address
340 * @start: The smallest acceptable address the caller will take.
341 * @range: The size of the area, starting at @start, within which the
342 * random address must fall.
343 *
344 * If @start + @range would overflow, @range is capped.
345 *
346 * NOTE: Historical use of randomize_range, which this replaces, presumed that
347 * @start was already page aligned. We now align it regardless.
348 *
349 * Return: A page aligned address within [start, start + range). On error,
350 * @start is returned.
351 */
randomize_page(unsigned long start,unsigned long range)352 unsigned long randomize_page(unsigned long start, unsigned long range)
353 {
354 if (!PAGE_ALIGNED(start)) {
355 range -= PAGE_ALIGN(start) - start;
356 start = PAGE_ALIGN(start);
357 }
358
359 if (start > ULONG_MAX - range)
360 range = ULONG_MAX - start;
361
362 range >>= PAGE_SHIFT;
363
364 if (range == 0)
365 return start;
366
367 return start + (get_random_long() % range << PAGE_SHIFT);
368 }
369
370 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
arch_randomize_brk(struct mm_struct * mm)371 unsigned long arch_randomize_brk(struct mm_struct *mm)
372 {
373 /* Is the current task 32bit ? */
374 if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
375 return randomize_page(mm->brk, SZ_32M);
376
377 return randomize_page(mm->brk, SZ_1G);
378 }
379
arch_mmap_rnd(void)380 unsigned long arch_mmap_rnd(void)
381 {
382 unsigned long rnd;
383
384 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
385 if (is_compat_task())
386 rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
387 else
388 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
389 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
390
391 return rnd << PAGE_SHIFT;
392 }
393 EXPORT_SYMBOL_GPL(arch_mmap_rnd);
394
mmap_is_legacy(struct rlimit * rlim_stack)395 static int mmap_is_legacy(struct rlimit *rlim_stack)
396 {
397 if (current->personality & ADDR_COMPAT_LAYOUT)
398 return 1;
399
400 if (rlim_stack->rlim_cur == RLIM_INFINITY)
401 return 1;
402
403 return sysctl_legacy_va_layout;
404 }
405
406 /*
407 * Leave enough space between the mmap area and the stack to honour ulimit in
408 * the face of randomisation.
409 */
410 #define MIN_GAP (SZ_128M)
411 #define MAX_GAP (STACK_TOP / 6 * 5)
412
mmap_base(unsigned long rnd,struct rlimit * rlim_stack)413 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
414 {
415 unsigned long gap = rlim_stack->rlim_cur;
416 unsigned long pad = stack_guard_gap;
417
418 /* Account for stack randomization if necessary */
419 if (current->flags & PF_RANDOMIZE)
420 pad += (STACK_RND_MASK << PAGE_SHIFT);
421
422 /* Values close to RLIM_INFINITY can overflow. */
423 if (gap + pad > gap)
424 gap += pad;
425
426 if (gap < MIN_GAP)
427 gap = MIN_GAP;
428 else if (gap > MAX_GAP)
429 gap = MAX_GAP;
430
431 return PAGE_ALIGN(STACK_TOP - gap - rnd);
432 }
433
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)434 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
435 {
436 unsigned long random_factor = 0UL;
437
438 if (current->flags & PF_RANDOMIZE)
439 random_factor = arch_mmap_rnd();
440
441 if (mmap_is_legacy(rlim_stack)) {
442 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
443 mm->get_unmapped_area = arch_get_unmapped_area;
444 } else {
445 mm->mmap_base = mmap_base(random_factor, rlim_stack);
446 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
447 }
448 }
449 #elif defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack)450 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
451 {
452 mm->mmap_base = TASK_UNMAPPED_BASE;
453 mm->get_unmapped_area = arch_get_unmapped_area;
454 }
455 #endif
456
457 /**
458 * __account_locked_vm - account locked pages to an mm's locked_vm
459 * @mm: mm to account against
460 * @pages: number of pages to account
461 * @inc: %true if @pages should be considered positive, %false if not
462 * @task: task used to check RLIMIT_MEMLOCK
463 * @bypass_rlim: %true if checking RLIMIT_MEMLOCK should be skipped
464 *
465 * Assumes @task and @mm are valid (i.e. at least one reference on each), and
466 * that mmap_lock is held as writer.
467 *
468 * Return:
469 * * 0 on success
470 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
471 */
__account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc,struct task_struct * task,bool bypass_rlim)472 int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
473 struct task_struct *task, bool bypass_rlim)
474 {
475 unsigned long locked_vm, limit;
476 int ret = 0;
477
478 mmap_assert_write_locked(mm);
479
480 locked_vm = mm->locked_vm;
481 if (inc) {
482 if (!bypass_rlim) {
483 limit = task_rlimit(task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
484 if (locked_vm + pages > limit)
485 ret = -ENOMEM;
486 }
487 if (!ret)
488 mm->locked_vm = locked_vm + pages;
489 } else {
490 WARN_ON_ONCE(pages > locked_vm);
491 mm->locked_vm = locked_vm - pages;
492 }
493
494 pr_debug("%s: [%d] caller %ps %c%lu %lu/%lu%s\n", __func__, task->pid,
495 (void *)_RET_IP_, (inc) ? '+' : '-', pages << PAGE_SHIFT,
496 locked_vm << PAGE_SHIFT, task_rlimit(task, RLIMIT_MEMLOCK),
497 ret ? " - exceeded" : "");
498
499 return ret;
500 }
501 EXPORT_SYMBOL_GPL(__account_locked_vm);
502
503 /**
504 * account_locked_vm - account locked pages to an mm's locked_vm
505 * @mm: mm to account against, may be NULL
506 * @pages: number of pages to account
507 * @inc: %true if @pages should be considered positive, %false if not
508 *
509 * Assumes a non-NULL @mm is valid (i.e. at least one reference on it).
510 *
511 * Return:
512 * * 0 on success, or if mm is NULL
513 * * -ENOMEM if RLIMIT_MEMLOCK would be exceeded.
514 */
account_locked_vm(struct mm_struct * mm,unsigned long pages,bool inc)515 int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc)
516 {
517 int ret;
518
519 if (pages == 0 || !mm)
520 return 0;
521
522 mmap_write_lock(mm);
523 ret = __account_locked_vm(mm, pages, inc, current,
524 capable(CAP_IPC_LOCK));
525 mmap_write_unlock(mm);
526
527 return ret;
528 }
529 EXPORT_SYMBOL_GPL(account_locked_vm);
530
vm_mmap_pgoff(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long pgoff)531 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
532 unsigned long len, unsigned long prot,
533 unsigned long flag, unsigned long pgoff)
534 {
535 unsigned long ret;
536 struct mm_struct *mm = current->mm;
537 unsigned long populate;
538 LIST_HEAD(uf);
539
540 ret = security_mmap_file(file, prot, flag);
541 if (!ret) {
542 if (mmap_write_lock_killable(mm))
543 return -EINTR;
544 ret = do_mmap(file, addr, len, prot, flag, pgoff, &populate,
545 &uf);
546 mmap_write_unlock(mm);
547 userfaultfd_unmap_complete(mm, &uf);
548 if (populate)
549 mm_populate(ret, populate);
550 }
551 trace_android_vh_check_mmap_file(file, prot, flag, ret);
552 return ret;
553 }
554
vm_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flag,unsigned long offset)555 unsigned long vm_mmap(struct file *file, unsigned long addr,
556 unsigned long len, unsigned long prot,
557 unsigned long flag, unsigned long offset)
558 {
559 if (unlikely(offset + PAGE_ALIGN(len) < offset))
560 return -EINVAL;
561 if (unlikely(offset_in_page(offset)))
562 return -EINVAL;
563
564 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
565 }
566 EXPORT_SYMBOL(vm_mmap);
567
568 /**
569 * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
570 * failure, fall back to non-contiguous (vmalloc) allocation.
571 * @size: size of the request.
572 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
573 * @node: numa node to allocate from
574 *
575 * Uses kmalloc to get the memory but if the allocation fails then falls back
576 * to the vmalloc allocator. Use kvfree for freeing the memory.
577 *
578 * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
579 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
580 * preferable to the vmalloc fallback, due to visible performance drawbacks.
581 *
582 * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
583 * fall back to vmalloc.
584 *
585 * Return: pointer to the allocated memory of %NULL in case of failure
586 */
kvmalloc_node(size_t size,gfp_t flags,int node)587 void *kvmalloc_node(size_t size, gfp_t flags, int node)
588 {
589 gfp_t kmalloc_flags = flags;
590 void *ret;
591 bool use_vmalloc = false;
592
593 /*
594 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
595 * so the given set of flags has to be compatible.
596 */
597 if ((flags & GFP_KERNEL) != GFP_KERNEL)
598 return kmalloc_node(size, flags, node);
599
600 trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
601 if (use_vmalloc)
602 goto use_vmalloc_node;
603
604 /*
605 * We want to attempt a large physically contiguous block first because
606 * it is less likely to fragment multiple larger blocks and therefore
607 * contribute to a long term fragmentation less than vmalloc fallback.
608 * However make sure that larger requests are not too disruptive - no
609 * OOM killer and no allocation failure warnings as we have a fallback.
610 */
611 if (size > PAGE_SIZE) {
612 kmalloc_flags |= __GFP_NOWARN;
613
614 if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
615 kmalloc_flags |= __GFP_NORETRY;
616 }
617
618 ret = kmalloc_node(size, kmalloc_flags, node);
619
620 /*
621 * It doesn't really make sense to fallback to vmalloc for sub page
622 * requests
623 */
624 if (ret || size <= PAGE_SIZE)
625 return ret;
626
627 /* Don't even allow crazy sizes */
628 if (unlikely(size > INT_MAX)) {
629 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
630 return NULL;
631 }
632
633 use_vmalloc_node:
634 return __vmalloc_node(size, 1, flags, node,
635 __builtin_return_address(0));
636 }
637 EXPORT_SYMBOL(kvmalloc_node);
638
639 /**
640 * kvfree() - Free memory.
641 * @addr: Pointer to allocated memory.
642 *
643 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
644 * It is slightly more efficient to use kfree() or vfree() if you are certain
645 * that you know which one to use.
646 *
647 * Context: Either preemptible task context or not-NMI interrupt.
648 */
kvfree(const void * addr)649 void kvfree(const void *addr)
650 {
651 if (is_vmalloc_addr(addr))
652 vfree(addr);
653 else
654 kfree(addr);
655 }
656 EXPORT_SYMBOL(kvfree);
657
658 /**
659 * kvfree_sensitive - Free a data object containing sensitive information.
660 * @addr: address of the data object to be freed.
661 * @len: length of the data object.
662 *
663 * Use the special memzero_explicit() function to clear the content of a
664 * kvmalloc'ed object containing sensitive data to make sure that the
665 * compiler won't optimize out the data clearing.
666 */
kvfree_sensitive(const void * addr,size_t len)667 void kvfree_sensitive(const void *addr, size_t len)
668 {
669 if (likely(!ZERO_OR_NULL_PTR(addr))) {
670 memzero_explicit((void *)addr, len);
671 kvfree(addr);
672 }
673 }
674 EXPORT_SYMBOL(kvfree_sensitive);
675
kvrealloc(const void * p,size_t oldsize,size_t newsize,gfp_t flags)676 void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
677 {
678 void *newp;
679
680 if (oldsize >= newsize)
681 return (void *)p;
682 newp = kvmalloc(newsize, flags);
683 if (!newp)
684 return NULL;
685 memcpy(newp, p, oldsize);
686 kvfree(p);
687 return newp;
688 }
689 EXPORT_SYMBOL(kvrealloc);
690
__page_rmapping(struct page * page)691 static inline void *__page_rmapping(struct page *page)
692 {
693 unsigned long mapping;
694
695 mapping = (unsigned long)page->mapping;
696 mapping &= ~PAGE_MAPPING_FLAGS;
697
698 return (void *)mapping;
699 }
700
701 /**
702 * __vmalloc_array - allocate memory for a virtually contiguous array.
703 * @n: number of elements.
704 * @size: element size.
705 * @flags: the type of memory to allocate (see kmalloc).
706 */
__vmalloc_array(size_t n,size_t size,gfp_t flags)707 void *__vmalloc_array(size_t n, size_t size, gfp_t flags)
708 {
709 size_t bytes;
710
711 if (unlikely(check_mul_overflow(n, size, &bytes)))
712 return NULL;
713 return __vmalloc(bytes, flags);
714 }
715 EXPORT_SYMBOL(__vmalloc_array);
716
717 /**
718 * vmalloc_array - allocate memory for a virtually contiguous array.
719 * @n: number of elements.
720 * @size: element size.
721 */
vmalloc_array(size_t n,size_t size)722 void *vmalloc_array(size_t n, size_t size)
723 {
724 return __vmalloc_array(n, size, GFP_KERNEL);
725 }
726 EXPORT_SYMBOL(vmalloc_array);
727
728 /**
729 * __vcalloc - allocate and zero memory for a virtually contiguous array.
730 * @n: number of elements.
731 * @size: element size.
732 * @flags: the type of memory to allocate (see kmalloc).
733 */
__vcalloc(size_t n,size_t size,gfp_t flags)734 void *__vcalloc(size_t n, size_t size, gfp_t flags)
735 {
736 return __vmalloc_array(n, size, flags | __GFP_ZERO);
737 }
738 EXPORT_SYMBOL(__vcalloc);
739
740 /**
741 * vcalloc - allocate and zero memory for a virtually contiguous array.
742 * @n: number of elements.
743 * @size: element size.
744 */
vcalloc(size_t n,size_t size)745 void *vcalloc(size_t n, size_t size)
746 {
747 return __vmalloc_array(n, size, GFP_KERNEL | __GFP_ZERO);
748 }
749 EXPORT_SYMBOL(vcalloc);
750
751 /* Neutral page->mapping pointer to address_space or anon_vma or other */
page_rmapping(struct page * page)752 void *page_rmapping(struct page *page)
753 {
754 page = compound_head(page);
755 return __page_rmapping(page);
756 }
757
758 /*
759 * Return true if this page is mapped into pagetables.
760 * For compound page it returns true if any subpage of compound page is mapped.
761 */
page_mapped(struct page * page)762 bool page_mapped(struct page *page)
763 {
764 int i;
765
766 if (likely(!PageCompound(page)))
767 return atomic_read(&page->_mapcount) >= 0;
768 page = compound_head(page);
769 if (atomic_read(compound_mapcount_ptr(page)) >= 0)
770 return true;
771 if (PageHuge(page))
772 return false;
773 for (i = 0; i < compound_nr(page); i++) {
774 if (atomic_read(&page[i]._mapcount) >= 0)
775 return true;
776 }
777 return false;
778 }
779 EXPORT_SYMBOL(page_mapped);
780
page_anon_vma(struct page * page)781 struct anon_vma *page_anon_vma(struct page *page)
782 {
783 unsigned long mapping;
784
785 page = compound_head(page);
786 mapping = (unsigned long)page->mapping;
787 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
788 return NULL;
789 return __page_rmapping(page);
790 }
791
page_mapping(struct page * page)792 struct address_space *page_mapping(struct page *page)
793 {
794 struct address_space *mapping;
795
796 page = compound_head(page);
797
798 /* This happens if someone calls flush_dcache_page on slab page */
799 if (unlikely(PageSlab(page)))
800 return NULL;
801
802 if (unlikely(PageSwapCache(page))) {
803 swp_entry_t entry;
804
805 entry.val = page_private(page);
806 return swap_address_space(entry);
807 }
808
809 mapping = page->mapping;
810 if ((unsigned long)mapping & PAGE_MAPPING_ANON)
811 return NULL;
812
813 return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
814 }
815 EXPORT_SYMBOL(page_mapping);
816
817 /*
818 * For file cache pages, return the address_space, otherwise return NULL
819 */
page_mapping_file(struct page * page)820 struct address_space *page_mapping_file(struct page *page)
821 {
822 if (unlikely(PageSwapCache(page)))
823 return NULL;
824 return page_mapping(page);
825 }
826
827 /* Slow path of page_mapcount() for compound pages */
__page_mapcount(struct page * page)828 int __page_mapcount(struct page *page)
829 {
830 int ret;
831
832 ret = atomic_read(&page->_mapcount) + 1;
833 /*
834 * For file THP page->_mapcount contains total number of mapping
835 * of the page: no need to look into compound_mapcount.
836 */
837 if (!PageAnon(page) && !PageHuge(page))
838 return ret;
839 page = compound_head(page);
840 ret += atomic_read(compound_mapcount_ptr(page)) + 1;
841 if (PageDoubleMap(page))
842 ret--;
843 return ret;
844 }
845 EXPORT_SYMBOL_GPL(__page_mapcount);
846
847 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
848 int sysctl_overcommit_ratio __read_mostly = 50;
849 unsigned long sysctl_overcommit_kbytes __read_mostly;
850 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
851 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
852 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
853
overcommit_ratio_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)854 int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
855 size_t *lenp, loff_t *ppos)
856 {
857 int ret;
858
859 ret = proc_dointvec(table, write, buffer, lenp, ppos);
860 if (ret == 0 && write)
861 sysctl_overcommit_kbytes = 0;
862 return ret;
863 }
864
sync_overcommit_as(struct work_struct * dummy)865 static void sync_overcommit_as(struct work_struct *dummy)
866 {
867 percpu_counter_sync(&vm_committed_as);
868 }
869
overcommit_policy_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)870 int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer,
871 size_t *lenp, loff_t *ppos)
872 {
873 struct ctl_table t;
874 int new_policy = -1;
875 int ret;
876
877 /*
878 * The deviation of sync_overcommit_as could be big with loose policy
879 * like OVERCOMMIT_ALWAYS/OVERCOMMIT_GUESS. When changing policy to
880 * strict OVERCOMMIT_NEVER, we need to reduce the deviation to comply
881 * with the strict "NEVER", and to avoid possible race condtion (even
882 * though user usually won't too frequently do the switching to policy
883 * OVERCOMMIT_NEVER), the switch is done in the following order:
884 * 1. changing the batch
885 * 2. sync percpu count on each CPU
886 * 3. switch the policy
887 */
888 if (write) {
889 t = *table;
890 t.data = &new_policy;
891 ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
892 if (ret || new_policy == -1)
893 return ret;
894
895 mm_compute_batch(new_policy);
896 if (new_policy == OVERCOMMIT_NEVER)
897 schedule_on_each_cpu(sync_overcommit_as);
898 sysctl_overcommit_memory = new_policy;
899 } else {
900 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
901 }
902
903 return ret;
904 }
905
overcommit_kbytes_handler(struct ctl_table * table,int write,void * buffer,size_t * lenp,loff_t * ppos)906 int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
907 size_t *lenp, loff_t *ppos)
908 {
909 int ret;
910
911 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
912 if (ret == 0 && write)
913 sysctl_overcommit_ratio = 0;
914 return ret;
915 }
916
917 /*
918 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
919 */
vm_commit_limit(void)920 unsigned long vm_commit_limit(void)
921 {
922 unsigned long allowed;
923
924 if (sysctl_overcommit_kbytes)
925 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
926 else
927 allowed = ((totalram_pages() - hugetlb_total_pages())
928 * sysctl_overcommit_ratio / 100);
929 allowed += total_swap_pages;
930
931 return allowed;
932 }
933
934 /*
935 * Make sure vm_committed_as in one cacheline and not cacheline shared with
936 * other variables. It can be updated by several CPUs frequently.
937 */
938 struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
939
940 /*
941 * The global memory commitment made in the system can be a metric
942 * that can be used to drive ballooning decisions when Linux is hosted
943 * as a guest. On Hyper-V, the host implements a policy engine for dynamically
944 * balancing memory across competing virtual machines that are hosted.
945 * Several metrics drive this policy engine including the guest reported
946 * memory commitment.
947 *
948 * The time cost of this is very low for small platforms, and for big
949 * platform like a 2S/36C/72T Skylake server, in worst case where
950 * vm_committed_as's spinlock is under severe contention, the time cost
951 * could be about 30~40 microseconds.
952 */
vm_memory_committed(void)953 unsigned long vm_memory_committed(void)
954 {
955 return percpu_counter_sum_positive(&vm_committed_as);
956 }
957 EXPORT_SYMBOL_GPL(vm_memory_committed);
958
959 /*
960 * Check that a process has enough memory to allocate a new virtual
961 * mapping. 0 means there is enough memory for the allocation to
962 * succeed and -ENOMEM implies there is not.
963 *
964 * We currently support three overcommit policies, which are set via the
965 * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
966 *
967 * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
968 * Additional code 2002 Jul 20 by Robert Love.
969 *
970 * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
971 *
972 * Note this is a helper function intended to be used by LSMs which
973 * wish to use this logic.
974 */
__vm_enough_memory(struct mm_struct * mm,long pages,int cap_sys_admin)975 int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
976 {
977 long allowed;
978
979 vm_acct_memory(pages);
980
981 /*
982 * Sometimes we want to use more memory than we have
983 */
984 if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
985 return 0;
986
987 if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
988 if (pages > totalram_pages() + total_swap_pages)
989 goto error;
990 return 0;
991 }
992
993 allowed = vm_commit_limit();
994 /*
995 * Reserve some for root
996 */
997 if (!cap_sys_admin)
998 allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
999
1000 /*
1001 * Don't let a single process grow so big a user can't recover
1002 */
1003 if (mm) {
1004 long reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
1005
1006 allowed -= min_t(long, mm->total_vm / 32, reserve);
1007 }
1008
1009 if (percpu_counter_read_positive(&vm_committed_as) < allowed)
1010 return 0;
1011 error:
1012 vm_unacct_memory(pages);
1013
1014 return -ENOMEM;
1015 }
1016
1017 /**
1018 * get_cmdline() - copy the cmdline value to a buffer.
1019 * @task: the task whose cmdline value to copy.
1020 * @buffer: the buffer to copy to.
1021 * @buflen: the length of the buffer. Larger cmdline values are truncated
1022 * to this length.
1023 *
1024 * Return: the size of the cmdline field copied. Note that the copy does
1025 * not guarantee an ending NULL byte.
1026 */
get_cmdline(struct task_struct * task,char * buffer,int buflen)1027 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
1028 {
1029 int res = 0;
1030 unsigned int len;
1031 struct mm_struct *mm = get_task_mm(task);
1032 unsigned long arg_start, arg_end, env_start, env_end;
1033 if (!mm)
1034 goto out;
1035 if (!mm->arg_end)
1036 goto out_mm; /* Shh! No looking before we're done */
1037
1038 spin_lock(&mm->arg_lock);
1039 arg_start = mm->arg_start;
1040 arg_end = mm->arg_end;
1041 env_start = mm->env_start;
1042 env_end = mm->env_end;
1043 spin_unlock(&mm->arg_lock);
1044
1045 len = arg_end - arg_start;
1046
1047 if (len > buflen)
1048 len = buflen;
1049
1050 res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
1051
1052 /*
1053 * If the nul at the end of args has been overwritten, then
1054 * assume application is using setproctitle(3).
1055 */
1056 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
1057 len = strnlen(buffer, res);
1058 if (len < res) {
1059 res = len;
1060 } else {
1061 len = env_end - env_start;
1062 if (len > buflen - res)
1063 len = buflen - res;
1064 res += access_process_vm(task, env_start,
1065 buffer+res, len,
1066 FOLL_FORCE);
1067 res = strnlen(buffer, res);
1068 }
1069 }
1070 out_mm:
1071 mmput(mm);
1072 out:
1073 return res;
1074 }
1075
memcmp_pages(struct page * page1,struct page * page2)1076 int __weak memcmp_pages(struct page *page1, struct page *page2)
1077 {
1078 char *addr1, *addr2;
1079 int ret;
1080
1081 addr1 = kmap_atomic(page1);
1082 addr2 = kmap_atomic(page2);
1083 ret = memcmp(addr1, addr2, PAGE_SIZE);
1084 kunmap_atomic(addr2);
1085 kunmap_atomic(addr1);
1086 return ret;
1087 }
1088