1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/page_size_compat.h>
21 #include <linux/swap.h>
22 #include <linux/syscalls.h>
23 #include <linux/capability.h>
24 #include <linux/init.h>
25 #include <linux/file.h>
26 #include <linux/fs.h>
27 #include <linux/personality.h>
28 #include <linux/security.h>
29 #include <linux/hugetlb.h>
30 #include <linux/shmem_fs.h>
31 #include <linux/profile.h>
32 #include <linux/export.h>
33 #include <linux/mount.h>
34 #include <linux/mempolicy.h>
35 #include <linux/rmap.h>
36 #include <linux/mmu_notifier.h>
37 #include <linux/mmdebug.h>
38 #include <linux/perf_event.h>
39 #include <linux/audit.h>
40 #include <linux/khugepaged.h>
41 #include <linux/uprobes.h>
42 #include <linux/notifier.h>
43 #include <linux/memory.h>
44 #include <linux/printk.h>
45 #include <linux/userfaultfd_k.h>
46 #include <linux/moduleparam.h>
47 #include <linux/pkeys.h>
48 #include <linux/oom.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ksm.h>
51 #include <linux/memfd.h>
52
53 #include <linux/uaccess.h>
54 #include <asm/cacheflush.h>
55 #include <asm/tlb.h>
56 #include <asm/mmu_context.h>
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/mmap.h>
60 #undef CREATE_TRACE_POINTS
61 #include <trace/hooks/mm.h>
62
63 #include "internal.h"
64
65 EXPORT_TRACEPOINT_SYMBOL_GPL(vm_unmapped_area);
66
67 #ifndef arch_mmap_check
68 #define arch_mmap_check(addr, len, flags) (0)
69 #endif
70
71 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
72 int mmap_rnd_bits_min __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MIN;
73 int mmap_rnd_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_BITS_MAX;
74 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
75 #endif
76 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
77 int mmap_rnd_compat_bits_min __ro_after_init = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
78 int mmap_rnd_compat_bits_max __ro_after_init = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
79 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
80 #endif
81
82 static bool ignore_rlimit_data;
83 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
84
85 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)86 void vma_set_page_prot(struct vm_area_struct *vma)
87 {
88 unsigned long vm_flags = vma->vm_flags;
89 pgprot_t vm_page_prot;
90
91 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
92 if (vma_wants_writenotify(vma, vm_page_prot)) {
93 vm_flags &= ~VM_SHARED;
94 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
95 }
96 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
97 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
98 }
99
100 /*
101 * check_brk_limits() - Use platform specific check of range & verify mlock
102 * limits.
103 * @addr: The address to check
104 * @len: The size of increase.
105 *
106 * Return: 0 on success.
107 */
check_brk_limits(unsigned long addr,unsigned long len)108 static int check_brk_limits(unsigned long addr, unsigned long len)
109 {
110 unsigned long mapped_addr;
111
112 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
113 if (IS_ERR_VALUE(mapped_addr))
114 return mapped_addr;
115
116 return mlock_future_ok(current->mm, current->mm->def_flags, len)
117 ? 0 : -EAGAIN;
118 }
119 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
120 unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk,unsigned long,brk)121 SYSCALL_DEFINE1(brk, unsigned long, brk)
122 {
123 unsigned long newbrk, oldbrk, origbrk;
124 struct mm_struct *mm = current->mm;
125 struct vm_area_struct *brkvma, *next = NULL;
126 unsigned long min_brk;
127 bool populate = false;
128 LIST_HEAD(uf);
129 struct vma_iterator vmi;
130
131 if (mmap_write_lock_killable(mm))
132 return -EINTR;
133
134 origbrk = mm->brk;
135
136 #ifdef CONFIG_COMPAT_BRK
137 /*
138 * CONFIG_COMPAT_BRK can still be overridden by setting
139 * randomize_va_space to 2, which will still cause mm->start_brk
140 * to be arbitrarily shifted
141 */
142 if (current->brk_randomized)
143 min_brk = mm->start_brk;
144 else
145 min_brk = mm->end_data;
146 #else
147 min_brk = mm->start_brk;
148 #endif
149 if (brk < min_brk)
150 goto out;
151
152 /*
153 * Check against rlimit here. If this check is done later after the test
154 * of oldbrk with newbrk then it can escape the test and let the data
155 * segment grow beyond its set limit the in case where the limit is
156 * not page aligned -Ram Gupta
157 */
158 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
159 mm->end_data, mm->start_data))
160 goto out;
161
162 newbrk = __PAGE_ALIGN(brk);
163 oldbrk = __PAGE_ALIGN(mm->brk);
164 if (oldbrk == newbrk) {
165 mm->brk = brk;
166 goto success;
167 }
168
169 /* Always allow shrinking brk. */
170 if (brk <= mm->brk) {
171 /* Search one past newbrk */
172 vma_iter_init(&vmi, mm, newbrk);
173 brkvma = vma_find(&vmi, oldbrk);
174 if (!brkvma || brkvma->vm_start >= oldbrk)
175 goto out; /* mapping intersects with an existing non-brk vma. */
176 /*
177 * mm->brk must be protected by write mmap_lock.
178 * do_vmi_align_munmap() will drop the lock on success, so
179 * update it before calling do_vma_munmap().
180 */
181 mm->brk = brk;
182 if (do_vmi_align_munmap(&vmi, brkvma, mm, newbrk, oldbrk, &uf,
183 /* unlock = */ true))
184 goto out;
185
186 goto success_unlocked;
187 }
188
189 if (check_brk_limits(oldbrk, newbrk - oldbrk))
190 goto out;
191
192 /*
193 * Only check if the next VMA is within the stack_guard_gap of the
194 * expansion area
195 */
196 vma_iter_init(&vmi, mm, oldbrk);
197 next = vma_find(&vmi, newbrk + __PAGE_SIZE + stack_guard_gap);
198 if (next && newbrk + __PAGE_SIZE > vm_start_gap(next))
199 goto out;
200
201 brkvma = vma_prev_limit(&vmi, mm->start_brk);
202 /* Ok, looks good - let it rip. */
203 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
204 goto out;
205
206 mm->brk = brk;
207 if (mm->def_flags & VM_LOCKED)
208 populate = true;
209
210 success:
211 mmap_write_unlock(mm);
212 success_unlocked:
213 userfaultfd_unmap_complete(mm, &uf);
214 if (populate)
215 mm_populate(oldbrk, newbrk - oldbrk);
216 return brk;
217
218 out:
219 mm->brk = origbrk;
220 mmap_write_unlock(mm);
221 return origbrk;
222 }
223
224 /*
225 * If a hint addr is less than mmap_min_addr change hint to be as
226 * low as possible but still greater than mmap_min_addr
227 */
round_hint_to_min(unsigned long hint)228 static inline unsigned long round_hint_to_min(unsigned long hint)
229 {
230 hint &= __PAGE_MASK;
231 if (((void *)hint != NULL) &&
232 (hint < mmap_min_addr))
233 return __PAGE_ALIGN(mmap_min_addr);
234 return hint;
235 }
236
mlock_future_ok(struct mm_struct * mm,unsigned long flags,unsigned long bytes)237 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
238 unsigned long bytes)
239 {
240 unsigned long locked_pages, limit_pages;
241
242 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
243 return true;
244
245 locked_pages = bytes >> PAGE_SHIFT;
246 locked_pages += mm->locked_vm;
247
248 limit_pages = rlimit(RLIMIT_MEMLOCK);
249 limit_pages >>= PAGE_SHIFT;
250
251 return locked_pages <= limit_pages;
252 }
253
file_mmap_size_max(struct file * file,struct inode * inode)254 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
255 {
256 if (S_ISREG(inode->i_mode))
257 return MAX_LFS_FILESIZE;
258
259 if (S_ISBLK(inode->i_mode))
260 return MAX_LFS_FILESIZE;
261
262 if (S_ISSOCK(inode->i_mode))
263 return MAX_LFS_FILESIZE;
264
265 /* Special "we do even unsigned file positions" case */
266 if (file->f_op->fop_flags & FOP_UNSIGNED_OFFSET)
267 return 0;
268
269 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
270 return ULONG_MAX;
271 }
272
file_mmap_ok(struct file * file,struct inode * inode,unsigned long pgoff,unsigned long len)273 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
274 unsigned long pgoff, unsigned long len)
275 {
276 u64 maxsize = file_mmap_size_max(file, inode);
277
278 if (maxsize && len > maxsize)
279 return false;
280 maxsize -= len;
281 if (pgoff > maxsize >> PAGE_SHIFT)
282 return false;
283 return true;
284 }
285
286 /*
287 * The caller must write-lock current->mm->mmap_lock.
288 */
do_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,vm_flags_t vm_flags,unsigned long pgoff,unsigned long * populate,struct list_head * uf)289 unsigned long do_mmap(struct file *file, unsigned long addr,
290 unsigned long len, unsigned long prot,
291 unsigned long flags, vm_flags_t vm_flags,
292 unsigned long pgoff, unsigned long *populate,
293 struct list_head *uf)
294 {
295 unsigned long file_backed_len = 0;
296 struct mm_struct *mm = current->mm;
297 int pkey = 0;
298
299 *populate = 0;
300
301 if (!len)
302 return -EINVAL;
303
304 /*
305 * Does the application expect PROT_READ to imply PROT_EXEC?
306 *
307 * (the exception is when the underlying filesystem is noexec
308 * mounted, in which case we don't add PROT_EXEC.)
309 */
310 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
311 if (!(file && path_noexec(&file->f_path)))
312 prot |= PROT_EXEC;
313
314 /* force arch specific MAP_FIXED handling in get_unmapped_area */
315 if (flags & MAP_FIXED_NOREPLACE)
316 flags |= MAP_FIXED;
317
318 if (!(flags & MAP_FIXED))
319 addr = round_hint_to_min(addr);
320
321 /* Careful about overflows.. */
322 len = __COMPAT_PAGE_ALIGN(len, flags);
323 if (!len)
324 return -ENOMEM;
325
326 /* offset overflow? */
327 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
328 return -EOVERFLOW;
329
330 /* Too many mappings? */
331 if (mm->map_count > sysctl_max_map_count)
332 return -ENOMEM;
333
334 /*
335 * addr is returned from get_unmapped_area,
336 * There are two cases:
337 * 1> MAP_FIXED == false
338 * unallocated memory, no need to check sealing.
339 * 1> MAP_FIXED == true
340 * sealing is checked inside mmap_region when
341 * do_vmi_munmap is called.
342 */
343
344 if (prot == PROT_EXEC) {
345 pkey = execute_only_pkey(mm);
346 if (pkey < 0)
347 pkey = 0;
348 }
349
350 /* Do simple checking here so the lower-level routines won't have
351 * to. we assume access permissions have been handled by the open
352 * of the memory object, so we don't do any here.
353 */
354 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(file, flags) |
355 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
356
357 /* Obtain the address to map to. we verify (or select) it and ensure
358 * that it represents a valid section of the address space.
359 */
360 addr = __get_unmapped_area(file, addr, len, pgoff, flags, vm_flags);
361 if (IS_ERR_VALUE(addr))
362 return addr;
363
364 if (flags & MAP_FIXED_NOREPLACE) {
365 if (find_vma_intersection(mm, addr, addr + len))
366 return -EEXIST;
367 }
368
369 if (flags & MAP_LOCKED)
370 if (!can_do_mlock())
371 return -EPERM;
372
373 if (!mlock_future_ok(mm, vm_flags, len))
374 return -EAGAIN;
375
376 if (file) {
377 struct inode *inode = file_inode(file);
378 unsigned int seals = memfd_file_seals(file);
379 unsigned long flags_mask;
380
381 if (!file_mmap_ok(file, inode, pgoff, len))
382 return -EOVERFLOW;
383
384 file_backed_len = __filemap_len(inode, pgoff, len, flags);
385
386 flags_mask = LEGACY_MAP_MASK;
387 if (file->f_op->fop_flags & FOP_MMAP_SYNC)
388 flags_mask |= MAP_SYNC;
389
390 switch (flags & MAP_TYPE) {
391 case MAP_SHARED:
392 /*
393 * Force use of MAP_SHARED_VALIDATE with non-legacy
394 * flags. E.g. MAP_SYNC is dangerous to use with
395 * MAP_SHARED as you don't know which consistency model
396 * you will get. We silently ignore unsupported flags
397 * with MAP_SHARED to preserve backward compatibility.
398 */
399 flags &= LEGACY_MAP_MASK;
400 fallthrough;
401 case MAP_SHARED_VALIDATE:
402 if (flags & ~flags_mask)
403 return -EOPNOTSUPP;
404 if (prot & PROT_WRITE) {
405 if (!(file->f_mode & FMODE_WRITE))
406 return -EACCES;
407 if (IS_SWAPFILE(file->f_mapping->host))
408 return -ETXTBSY;
409 }
410
411 /*
412 * Make sure we don't allow writing to an append-only
413 * file..
414 */
415 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
416 return -EACCES;
417
418 vm_flags |= VM_SHARED | VM_MAYSHARE;
419 if (!(file->f_mode & FMODE_WRITE))
420 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
421 else if (is_readonly_sealed(seals, vm_flags))
422 vm_flags &= ~VM_MAYWRITE;
423 fallthrough;
424 case MAP_PRIVATE:
425 if (!(file->f_mode & FMODE_READ))
426 return -EACCES;
427 if (path_noexec(&file->f_path)) {
428 if (vm_flags & VM_EXEC)
429 return -EPERM;
430 vm_flags &= ~VM_MAYEXEC;
431 }
432
433 if (!file->f_op->mmap)
434 return -ENODEV;
435 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
436 return -EINVAL;
437 break;
438
439 default:
440 return -EINVAL;
441 }
442 } else {
443 switch (flags & MAP_TYPE) {
444 case MAP_SHARED:
445 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
446 return -EINVAL;
447 /*
448 * Ignore pgoff.
449 */
450 pgoff = 0;
451 vm_flags |= VM_SHARED | VM_MAYSHARE;
452 break;
453 case MAP_DROPPABLE:
454 if (VM_DROPPABLE == VM_NONE)
455 return -ENOTSUPP;
456 /*
457 * A locked or stack area makes no sense to be droppable.
458 *
459 * Also, since droppable pages can just go away at any time
460 * it makes no sense to copy them on fork or dump them.
461 *
462 * And don't attempt to combine with hugetlb for now.
463 */
464 if (flags & (MAP_LOCKED | MAP_HUGETLB))
465 return -EINVAL;
466 if (vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
467 return -EINVAL;
468
469 vm_flags |= VM_DROPPABLE;
470
471 /*
472 * If the pages can be dropped, then it doesn't make
473 * sense to reserve them.
474 */
475 vm_flags |= VM_NORESERVE;
476
477 /*
478 * Likewise, they're volatile enough that they
479 * shouldn't survive forks or coredumps.
480 */
481 vm_flags |= VM_WIPEONFORK | VM_DONTDUMP;
482 fallthrough;
483 case MAP_PRIVATE:
484 /*
485 * Set pgoff according to addr for anon_vma.
486 */
487 pgoff = addr >> PAGE_SHIFT;
488 break;
489 default:
490 return -EINVAL;
491 }
492 }
493
494 /*
495 * Set 'VM_NORESERVE' if we should not account for the
496 * memory use of this mapping.
497 */
498 if (flags & MAP_NORESERVE) {
499 /* We honor MAP_NORESERVE if allowed to overcommit */
500 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
501 vm_flags |= VM_NORESERVE;
502
503 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
504 if (file && is_file_hugepages(file))
505 vm_flags |= VM_NORESERVE;
506 }
507
508 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
509 if (!IS_ERR_VALUE(addr) &&
510 ((vm_flags & VM_LOCKED) ||
511 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
512 *populate = len;
513
514 __filemap_fixup(addr, prot, file_backed_len, len);
515
516 return addr;
517 }
518
ksys_mmap_pgoff(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long pgoff)519 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
520 unsigned long prot, unsigned long flags,
521 unsigned long fd, unsigned long pgoff)
522 {
523 struct file *file = NULL;
524 unsigned long retval;
525
526 if (!(flags & MAP_ANONYMOUS)) {
527 audit_mmap_fd(fd, flags);
528 file = fget(fd);
529 if (!file)
530 return -EBADF;
531 if (is_file_hugepages(file)) {
532 len = ALIGN(len, huge_page_size(hstate_file(file)));
533 } else if (unlikely(flags & MAP_HUGETLB)) {
534 retval = -EINVAL;
535 goto out_fput;
536 }
537 } else if (flags & MAP_HUGETLB) {
538 struct hstate *hs;
539
540 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
541 if (!hs)
542 return -EINVAL;
543
544 len = ALIGN(len, huge_page_size(hs));
545 /*
546 * VM_NORESERVE is used because the reservations will be
547 * taken when vm_ops->mmap() is called
548 */
549 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
550 VM_NORESERVE,
551 HUGETLB_ANONHUGE_INODE,
552 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
553 if (IS_ERR(file))
554 return PTR_ERR(file);
555 }
556
557 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
558 out_fput:
559 if (file)
560 fput(file);
561 return retval;
562 }
563
SYSCALL_DEFINE6(mmap_pgoff,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,pgoff)564 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
565 unsigned long, prot, unsigned long, flags,
566 unsigned long, fd, unsigned long, pgoff)
567 {
568 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
569 }
570
571 #ifdef __ARCH_WANT_SYS_OLD_MMAP
572 struct mmap_arg_struct {
573 unsigned long addr;
574 unsigned long len;
575 unsigned long prot;
576 unsigned long flags;
577 unsigned long fd;
578 unsigned long offset;
579 };
580
SYSCALL_DEFINE1(old_mmap,struct mmap_arg_struct __user *,arg)581 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
582 {
583 struct mmap_arg_struct a;
584
585 if (copy_from_user(&a, arg, sizeof(a)))
586 return -EFAULT;
587 if (offset_in_page(a.offset))
588 return -EINVAL;
589
590 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
591 a.offset >> PAGE_SHIFT);
592 }
593 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
594
595 /*
596 * We account for memory if it's a private writeable mapping,
597 * not hugepages and VM_NORESERVE wasn't set.
598 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)599 static inline bool accountable_mapping(struct file *file, vm_flags_t vm_flags)
600 {
601 /*
602 * hugetlb has its own accounting separate from the core VM
603 * VM_HUGETLB may not be set yet so we cannot check for that flag.
604 */
605 if (file && is_file_hugepages(file))
606 return false;
607
608 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
609 }
610
611 /**
612 * unmapped_area() - Find an area between the low_limit and the high_limit with
613 * the correct alignment and offset, all from @info. Note: current->mm is used
614 * for the search.
615 *
616 * @info: The unmapped area information including the range [low_limit -
617 * high_limit), the alignment offset and mask.
618 *
619 * Return: A memory address or -ENOMEM.
620 */
unmapped_area(struct vm_unmapped_area_info * info)621 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
622 {
623 unsigned long length, gap;
624 unsigned long low_limit, high_limit;
625 struct vm_area_struct *tmp;
626 VMA_ITERATOR(vmi, current->mm, 0);
627
628 /* Adjust search length to account for worst case alignment overhead */
629 length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap);
630 if (length < info->length)
631 return -ENOMEM;
632
633 low_limit = info->low_limit;
634 if (low_limit < mmap_min_addr)
635 low_limit = mmap_min_addr;
636 high_limit = info->high_limit;
637 retry:
638 if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length))
639 return -ENOMEM;
640
641 /*
642 * Adjust for the gap first so it doesn't interfere with the
643 * later alignment. The first step is the minimum needed to
644 * fulill the start gap, the next steps is the minimum to align
645 * that. It is the minimum needed to fulill both.
646 */
647 gap = vma_iter_addr(&vmi) + info->start_gap;
648 gap += (info->align_offset - gap) & info->align_mask;
649 tmp = vma_next(&vmi);
650 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
651 if (vm_start_gap(tmp) < gap + length - 1) {
652 low_limit = tmp->vm_end;
653 vma_iter_reset(&vmi);
654 goto retry;
655 }
656 } else {
657 tmp = vma_prev(&vmi);
658 if (tmp && vm_end_gap(tmp) > gap) {
659 low_limit = vm_end_gap(tmp);
660 vma_iter_reset(&vmi);
661 goto retry;
662 }
663 }
664
665 return __PAGE_ALIGN(gap);
666 }
667
668 /**
669 * unmapped_area_topdown() - Find an area between the low_limit and the
670 * high_limit with the correct alignment and offset at the highest available
671 * address, all from @info. Note: current->mm is used for the search.
672 *
673 * @info: The unmapped area information including the range [low_limit -
674 * high_limit), the alignment offset and mask.
675 *
676 * Return: A memory address or -ENOMEM.
677 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)678 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
679 {
680 unsigned long length, gap, gap_end;
681 unsigned long low_limit, high_limit;
682 struct vm_area_struct *tmp;
683 VMA_ITERATOR(vmi, current->mm, 0);
684
685 /* Adjust search length to account for worst case alignment overhead */
686 length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask + info->start_gap);
687 if (length < info->length)
688 return -ENOMEM;
689
690 low_limit = info->low_limit;
691 if (low_limit < mmap_min_addr)
692 low_limit = mmap_min_addr;
693 high_limit = info->high_limit;
694 retry:
695 if (vma_iter_area_highest(&vmi, low_limit, high_limit, length))
696 return -ENOMEM;
697
698 gap = vma_iter_end(&vmi) - info->length;
699 gap -= (gap - info->align_offset) & info->align_mask;
700 gap_end = vma_iter_end(&vmi);
701 tmp = vma_next(&vmi);
702 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
703 if (vm_start_gap(tmp) < gap_end) {
704 high_limit = vm_start_gap(tmp);
705 vma_iter_reset(&vmi);
706 goto retry;
707 }
708 } else {
709 tmp = vma_prev(&vmi);
710 if (tmp && vm_end_gap(tmp) > gap) {
711 high_limit = tmp->vm_start;
712 vma_iter_reset(&vmi);
713 goto retry;
714 }
715 }
716
717 return __PAGE_ALIGN(gap);
718 }
719
720 /*
721 * Determine if the allocation needs to ensure that there is no
722 * existing mapping within it's guard gaps, for use as start_gap.
723 */
stack_guard_placement(vm_flags_t vm_flags)724 static inline unsigned long stack_guard_placement(vm_flags_t vm_flags)
725 {
726 if (vm_flags & VM_SHADOW_STACK)
727 return PAGE_SIZE;
728
729 return 0;
730 }
731
732 /*
733 * Search for an unmapped address range.
734 *
735 * We are looking for a range that:
736 * - does not intersect with any VMA;
737 * - is contained within the [low_limit, high_limit) interval;
738 * - is at least the desired size.
739 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
740 */
vm_unmapped_area(struct vm_unmapped_area_info * info)741 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
742 {
743 unsigned long addr;
744
745 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
746 addr = unmapped_area_topdown(info);
747 else
748 addr = unmapped_area(info);
749
750 trace_vm_unmapped_area(addr, info);
751 return addr;
752 }
753 EXPORT_SYMBOL_GPL(vm_unmapped_area);
754
755 /* Get an address range which is currently unmapped.
756 * For shmat() with addr=0.
757 *
758 * Ugly calling convention alert:
759 * Return value with the low bits set means error value,
760 * ie
761 * if (ret & ~PAGE_MASK)
762 * error = ret;
763 *
764 * This function "knows" that -ENOMEM has the bits set.
765 */
766 unsigned long
generic_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)767 generic_get_unmapped_area(struct file *filp, unsigned long addr,
768 unsigned long len, unsigned long pgoff,
769 unsigned long flags, vm_flags_t vm_flags)
770 {
771 struct mm_struct *mm = current->mm;
772 struct vm_area_struct *vma, *prev;
773 struct vm_unmapped_area_info info = {};
774 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
775
776 if (len > mmap_end - mmap_min_addr)
777 return -ENOMEM;
778
779 if (flags & MAP_FIXED)
780 return addr;
781
782 if (addr) {
783 addr = PAGE_ALIGN(addr);
784 vma = find_vma_prev(mm, addr, &prev);
785 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
786 (!vma || addr + len <= vm_start_gap(vma)) &&
787 (!prev || addr >= vm_end_gap(prev)))
788 return addr;
789 }
790
791 info.length = len;
792 info.low_limit = mm->mmap_base;
793 info.high_limit = mmap_end;
794 info.start_gap = stack_guard_placement(vm_flags);
795 return vm_unmapped_area(&info);
796 }
797
798 #ifndef HAVE_ARCH_UNMAPPED_AREA
799 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)800 arch_get_unmapped_area(struct file *filp, unsigned long addr,
801 unsigned long len, unsigned long pgoff,
802 unsigned long flags, vm_flags_t vm_flags)
803 {
804 return generic_get_unmapped_area(filp, addr, len, pgoff, flags,
805 vm_flags);
806 }
807 #endif
808
809 /*
810 * This mmap-allocator allocates new areas top-down from below the
811 * stack's low limit (the base):
812 */
813 unsigned long
generic_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)814 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
815 unsigned long len, unsigned long pgoff,
816 unsigned long flags, vm_flags_t vm_flags)
817 {
818 struct vm_area_struct *vma, *prev;
819 struct mm_struct *mm = current->mm;
820 struct vm_unmapped_area_info info = {};
821 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
822
823 /* requested length too big for entire address space */
824 if (len > mmap_end - mmap_min_addr)
825 return -ENOMEM;
826
827 if (flags & MAP_FIXED)
828 return addr;
829
830 /* requesting a specific address */
831 if (addr) {
832 addr = PAGE_ALIGN(addr);
833 vma = find_vma_prev(mm, addr, &prev);
834 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
835 (!vma || addr + len <= vm_start_gap(vma)) &&
836 (!prev || addr >= vm_end_gap(prev)))
837 return addr;
838 }
839
840 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
841 info.length = len;
842 info.low_limit = PAGE_SIZE;
843 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
844 info.start_gap = stack_guard_placement(vm_flags);
845 addr = vm_unmapped_area(&info);
846
847 /*
848 * A failed mmap() very likely causes application failure,
849 * so fall back to the bottom-up function here. This scenario
850 * can happen with large stack limits and large mmap()
851 * allocations.
852 */
853 if (offset_in_page(addr)) {
854 VM_BUG_ON(addr != -ENOMEM);
855 info.flags = 0;
856 info.low_limit = TASK_UNMAPPED_BASE;
857 info.high_limit = mmap_end;
858 addr = vm_unmapped_area(&info);
859 }
860
861 return addr;
862 }
863
864 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
865 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)866 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
867 unsigned long len, unsigned long pgoff,
868 unsigned long flags, vm_flags_t vm_flags)
869 {
870 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags,
871 vm_flags);
872 }
873 #endif
874
mm_get_unmapped_area_vmflags(struct mm_struct * mm,struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)875 unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp,
876 unsigned long addr, unsigned long len,
877 unsigned long pgoff, unsigned long flags,
878 vm_flags_t vm_flags)
879 {
880 if (test_bit(MMF_TOPDOWN, &mm->flags))
881 return arch_get_unmapped_area_topdown(filp, addr, len, pgoff,
882 flags, vm_flags);
883 return arch_get_unmapped_area(filp, addr, len, pgoff, flags, vm_flags);
884 }
885
886 unsigned long
__get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)887 __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
888 unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags)
889 {
890 unsigned long (*get_area)(struct file *, unsigned long,
891 unsigned long, unsigned long, unsigned long)
892 = NULL;
893
894 unsigned long error = arch_mmap_check(addr, len, flags);
895 if (error)
896 return error;
897
898 /* Careful about overflows.. */
899 if (len > TASK_SIZE)
900 return -ENOMEM;
901
902 if (file) {
903 if (file->f_op->get_unmapped_area)
904 get_area = file->f_op->get_unmapped_area;
905 } else if (flags & MAP_SHARED) {
906 /*
907 * mmap_region() will call shmem_zero_setup() to create a file,
908 * so use shmem's get_unmapped_area in case it can be huge.
909 */
910 get_area = shmem_get_unmapped_area;
911 }
912
913 /* Always treat pgoff as zero for anonymous memory. */
914 if (!file)
915 pgoff = 0;
916
917 if (get_area) {
918 addr = get_area(file, addr, len, pgoff, flags);
919 } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)
920 && !addr /* no hint */
921 && IS_ALIGNED(len, PMD_SIZE)) {
922 /* Ensures that larger anonymous mappings are THP aligned. */
923 addr = thp_get_unmapped_area_vmflags(file, addr, len,
924 pgoff, flags, vm_flags);
925 } else {
926 addr = mm_get_unmapped_area_vmflags(current->mm, file, addr, len,
927 pgoff, flags, vm_flags);
928 }
929 if (IS_ERR_VALUE(addr))
930 return addr;
931
932 if (addr > TASK_SIZE - len)
933 return -ENOMEM;
934 if (offset_in_page(addr))
935 return -EINVAL;
936
937 error = security_mmap_addr(addr);
938 return error ? error : addr;
939 }
940 EXPORT_SYMBOL(__get_unmapped_area);
941
942 unsigned long
mm_get_unmapped_area(struct mm_struct * mm,struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)943 mm_get_unmapped_area(struct mm_struct *mm, struct file *file,
944 unsigned long addr, unsigned long len,
945 unsigned long pgoff, unsigned long flags)
946 {
947 if (test_bit(MMF_TOPDOWN, &mm->flags))
948 return arch_get_unmapped_area_topdown(file, addr, len, pgoff, flags, 0);
949 return arch_get_unmapped_area(file, addr, len, pgoff, flags, 0);
950 }
951 EXPORT_SYMBOL(mm_get_unmapped_area);
952
953 /**
954 * find_vma_intersection() - Look up the first VMA which intersects the interval
955 * @mm: The process address space.
956 * @start_addr: The inclusive start user address.
957 * @end_addr: The exclusive end user address.
958 *
959 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
960 * start_addr < end_addr.
961 */
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)962 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
963 unsigned long start_addr,
964 unsigned long end_addr)
965 {
966 unsigned long index = start_addr;
967
968 mmap_assert_locked(mm);
969 return mt_find(&mm->mm_mt, &index, end_addr - 1);
970 }
971 EXPORT_SYMBOL(find_vma_intersection);
972
973 /**
974 * find_vma() - Find the VMA for a given address, or the next VMA.
975 * @mm: The mm_struct to check
976 * @addr: The address
977 *
978 * Returns: The VMA associated with addr, or the next VMA.
979 * May return %NULL in the case of no VMA at addr or above.
980 */
find_vma(struct mm_struct * mm,unsigned long addr)981 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
982 {
983 unsigned long index = addr;
984
985 mmap_assert_locked(mm);
986 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
987 }
988 EXPORT_SYMBOL(find_vma);
989
990 /**
991 * find_vma_prev() - Find the VMA for a given address, or the next vma and
992 * set %pprev to the previous VMA, if any.
993 * @mm: The mm_struct to check
994 * @addr: The address
995 * @pprev: The pointer to set to the previous VMA
996 *
997 * Note that RCU lock is missing here since the external mmap_lock() is used
998 * instead.
999 *
1000 * Returns: The VMA associated with @addr, or the next vma.
1001 * May return %NULL in the case of no vma at addr or above.
1002 */
1003 struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)1004 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1005 struct vm_area_struct **pprev)
1006 {
1007 struct vm_area_struct *vma;
1008 VMA_ITERATOR(vmi, mm, addr);
1009
1010 vma = vma_iter_load(&vmi);
1011 *pprev = vma_prev(&vmi);
1012 if (!vma)
1013 vma = vma_next(&vmi);
1014 return vma;
1015 }
1016
1017 /*
1018 * Verify that the stack growth is acceptable and
1019 * update accounting. This is shared with both the
1020 * grow-up and grow-down cases.
1021 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)1022 static int acct_stack_growth(struct vm_area_struct *vma,
1023 unsigned long size, unsigned long grow)
1024 {
1025 struct mm_struct *mm = vma->vm_mm;
1026 unsigned long new_start;
1027
1028 /* address space limit tests */
1029 if (!may_expand_vm(mm, vma->vm_flags, grow))
1030 return -ENOMEM;
1031
1032 /* Stack limit test */
1033 if (size > rlimit(RLIMIT_STACK))
1034 return -ENOMEM;
1035
1036 /* mlock limit tests */
1037 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1038 return -ENOMEM;
1039
1040 /* Check to ensure the stack will not grow into a hugetlb-only region */
1041 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1042 vma->vm_end - size;
1043 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1044 return -EFAULT;
1045
1046 /*
1047 * Overcommit.. This must be the final test, as it will
1048 * update security statistics.
1049 */
1050 if (security_vm_enough_memory_mm(mm, grow))
1051 return -ENOMEM;
1052
1053 return 0;
1054 }
1055
1056 #if defined(CONFIG_STACK_GROWSUP)
1057 /*
1058 * PA-RISC uses this for its stack.
1059 * vma is the last one with address > vma->vm_end. Have to extend vma.
1060 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)1061 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1062 {
1063 struct mm_struct *mm = vma->vm_mm;
1064 struct vm_area_struct *next;
1065 unsigned long gap_addr;
1066 int error = 0;
1067 VMA_ITERATOR(vmi, mm, vma->vm_start);
1068
1069 if (!(vma->vm_flags & VM_GROWSUP))
1070 return -EFAULT;
1071
1072 /* Guard against exceeding limits of the address space. */
1073 address &= PAGE_MASK;
1074 if (address >= (TASK_SIZE & PAGE_MASK))
1075 return -ENOMEM;
1076 address += PAGE_SIZE;
1077
1078 /* Enforce stack_guard_gap */
1079 gap_addr = address + stack_guard_gap;
1080
1081 /* Guard against overflow */
1082 if (gap_addr < address || gap_addr > TASK_SIZE)
1083 gap_addr = TASK_SIZE;
1084
1085 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
1086 if (next && vma_is_accessible(next)) {
1087 if (!(next->vm_flags & VM_GROWSUP))
1088 return -ENOMEM;
1089 /* Check that both stack segments have the same anon_vma? */
1090 }
1091
1092 if (next)
1093 vma_iter_prev_range_limit(&vmi, address);
1094
1095 vma_iter_config(&vmi, vma->vm_start, address);
1096 if (vma_iter_prealloc(&vmi, vma))
1097 return -ENOMEM;
1098
1099 /* We must make sure the anon_vma is allocated. */
1100 if (unlikely(anon_vma_prepare(vma))) {
1101 vma_iter_free(&vmi);
1102 return -ENOMEM;
1103 }
1104
1105 /* Lock the VMA before expanding to prevent concurrent page faults */
1106 vma_start_write(vma);
1107 /*
1108 * vma->vm_start/vm_end cannot change under us because the caller
1109 * is required to hold the mmap_lock in read mode. We need the
1110 * anon_vma lock to serialize against concurrent expand_stacks.
1111 */
1112 anon_vma_lock_write(vma->anon_vma);
1113
1114 /* Somebody else might have raced and expanded it already */
1115 if (address > vma->vm_end) {
1116 unsigned long size, grow;
1117
1118 size = address - vma->vm_start;
1119 grow = (address - vma->vm_end) >> PAGE_SHIFT;
1120
1121 error = -ENOMEM;
1122 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
1123 error = acct_stack_growth(vma, size, grow);
1124 if (!error) {
1125 /*
1126 * We only hold a shared mmap_lock lock here, so
1127 * we need to protect against concurrent vma
1128 * expansions. anon_vma_lock_write() doesn't
1129 * help here, as we don't guarantee that all
1130 * growable vmas in a mm share the same root
1131 * anon vma. So, we reuse mm->page_table_lock
1132 * to guard against concurrent vma expansions.
1133 */
1134 spin_lock(&mm->page_table_lock);
1135 if (vma->vm_flags & VM_LOCKED)
1136 mm->locked_vm += grow;
1137 vm_stat_account(mm, vma->vm_flags, grow);
1138 anon_vma_interval_tree_pre_update_vma(vma);
1139 vma->vm_end = address;
1140 /* Overwrite old entry in mtree. */
1141 vma_iter_store_overwrite(&vmi, vma);
1142 anon_vma_interval_tree_post_update_vma(vma);
1143 spin_unlock(&mm->page_table_lock);
1144
1145 perf_event_mmap(vma);
1146 }
1147 }
1148 }
1149 anon_vma_unlock_write(vma->anon_vma);
1150 vma_iter_free(&vmi);
1151 validate_mm(mm);
1152 return error;
1153 }
1154 #endif /* CONFIG_STACK_GROWSUP */
1155
1156 /*
1157 * vma is the first one with address < vma->vm_start. Have to extend vma.
1158 * mmap_lock held for writing.
1159 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)1160 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
1161 {
1162 struct mm_struct *mm = vma->vm_mm;
1163 struct vm_area_struct *prev;
1164 int error = 0;
1165 VMA_ITERATOR(vmi, mm, vma->vm_start);
1166
1167 if (!(vma->vm_flags & VM_GROWSDOWN))
1168 return -EFAULT;
1169
1170 address &= __PAGE_MASK;
1171 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
1172 return -EPERM;
1173
1174 /* Enforce stack_guard_gap */
1175 prev = vma_prev(&vmi);
1176 /* Check that both stack segments have the same anon_vma? */
1177 if (prev) {
1178 if (!(prev->vm_flags & VM_GROWSDOWN) &&
1179 vma_is_accessible(prev) &&
1180 (address - prev->vm_end < stack_guard_gap))
1181 return -ENOMEM;
1182 }
1183
1184 if (prev)
1185 vma_iter_next_range_limit(&vmi, vma->vm_start);
1186
1187 vma_iter_config(&vmi, address, vma->vm_end);
1188 if (vma_iter_prealloc(&vmi, vma))
1189 return -ENOMEM;
1190
1191 /* We must make sure the anon_vma is allocated. */
1192 if (unlikely(anon_vma_prepare(vma))) {
1193 vma_iter_free(&vmi);
1194 return -ENOMEM;
1195 }
1196
1197 /* Lock the VMA before expanding to prevent concurrent page faults */
1198 vma_start_write(vma);
1199 /*
1200 * vma->vm_start/vm_end cannot change under us because the caller
1201 * is required to hold the mmap_lock in read mode. We need the
1202 * anon_vma lock to serialize against concurrent expand_stacks.
1203 */
1204 anon_vma_lock_write(vma->anon_vma);
1205
1206 /* Somebody else might have raced and expanded it already */
1207 if (address < vma->vm_start) {
1208 unsigned long size, grow;
1209
1210 size = vma->vm_end - address;
1211 grow = (vma->vm_start - address) >> PAGE_SHIFT;
1212
1213 error = -ENOMEM;
1214 if (grow <= vma->vm_pgoff) {
1215 error = acct_stack_growth(vma, size, grow);
1216 if (!error) {
1217 /*
1218 * We only hold a shared mmap_lock lock here, so
1219 * we need to protect against concurrent vma
1220 * expansions. anon_vma_lock_write() doesn't
1221 * help here, as we don't guarantee that all
1222 * growable vmas in a mm share the same root
1223 * anon vma. So, we reuse mm->page_table_lock
1224 * to guard against concurrent vma expansions.
1225 */
1226 spin_lock(&mm->page_table_lock);
1227 if (vma->vm_flags & VM_LOCKED)
1228 mm->locked_vm += grow;
1229 vm_stat_account(mm, vma->vm_flags, grow);
1230 anon_vma_interval_tree_pre_update_vma(vma);
1231 vma->vm_start = address;
1232 vma->vm_pgoff -= grow;
1233 /* Overwrite old entry in mtree. */
1234 vma_iter_store_overwrite(&vmi, vma);
1235 anon_vma_interval_tree_post_update_vma(vma);
1236 spin_unlock(&mm->page_table_lock);
1237
1238 perf_event_mmap(vma);
1239 }
1240 }
1241 }
1242 anon_vma_unlock_write(vma->anon_vma);
1243 vma_iter_free(&vmi);
1244 validate_mm(mm);
1245 return error;
1246 }
1247
1248 /* enforced gap between the expanding stack and other mappings. */
1249 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
1250
cmdline_parse_stack_guard_gap(char * p)1251 static int __init cmdline_parse_stack_guard_gap(char *p)
1252 {
1253 unsigned long val;
1254 char *endptr;
1255
1256 val = simple_strtoul(p, &endptr, 10);
1257 if (!*endptr)
1258 stack_guard_gap = val << PAGE_SHIFT;
1259
1260 return 1;
1261 }
1262 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
1263
1264 #ifdef CONFIG_STACK_GROWSUP
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)1265 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1266 {
1267 return expand_upwards(vma, address);
1268 }
1269
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)1270 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1271 {
1272 struct vm_area_struct *vma, *prev;
1273
1274 addr &= PAGE_MASK;
1275 vma = find_vma_prev(mm, addr, &prev);
1276 if (vma && (vma->vm_start <= addr))
1277 return vma;
1278 if (!prev)
1279 return NULL;
1280 if (expand_stack_locked(prev, addr))
1281 return NULL;
1282 if (prev->vm_flags & VM_LOCKED)
1283 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
1284 return prev;
1285 }
1286 #else
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)1287 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
1288 {
1289 return expand_downwards(vma, address);
1290 }
1291
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)1292 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
1293 {
1294 struct vm_area_struct *vma;
1295 unsigned long start;
1296
1297 addr &= PAGE_MASK;
1298 vma = find_vma(mm, addr);
1299 if (!vma)
1300 return NULL;
1301 if (vma->vm_start <= addr)
1302 return vma;
1303 start = vma->vm_start;
1304 if (expand_stack_locked(vma, addr))
1305 return NULL;
1306 if (vma->vm_flags & VM_LOCKED)
1307 populate_vma_page_range(vma, addr, start, NULL);
1308 return vma;
1309 }
1310 #endif
1311
1312 #if defined(CONFIG_STACK_GROWSUP)
1313
1314 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
1315 #define vma_expand_down(vma, addr) (-EFAULT)
1316
1317 #else
1318
1319 #define vma_expand_up(vma,addr) (-EFAULT)
1320 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
1321
1322 #endif
1323
1324 /*
1325 * expand_stack(): legacy interface for page faulting. Don't use unless
1326 * you have to.
1327 *
1328 * This is called with the mm locked for reading, drops the lock, takes
1329 * the lock for writing, tries to look up a vma again, expands it if
1330 * necessary, and downgrades the lock to reading again.
1331 *
1332 * If no vma is found or it can't be expanded, it returns NULL and has
1333 * dropped the lock.
1334 */
expand_stack(struct mm_struct * mm,unsigned long addr)1335 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
1336 {
1337 struct vm_area_struct *vma, *prev;
1338
1339 mmap_read_unlock(mm);
1340 if (mmap_write_lock_killable(mm))
1341 return NULL;
1342
1343 vma = find_vma_prev(mm, addr, &prev);
1344 if (vma && vma->vm_start <= addr)
1345 goto success;
1346
1347 if (prev && !vma_expand_up(prev, addr)) {
1348 vma = prev;
1349 goto success;
1350 }
1351
1352 if (vma && !vma_expand_down(vma, addr))
1353 goto success;
1354
1355 mmap_write_unlock(mm);
1356 return NULL;
1357
1358 success:
1359 mmap_write_downgrade(mm);
1360 return vma;
1361 }
1362
1363 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
1364 * @mm: The mm_struct
1365 * @start: The start address to munmap
1366 * @len: The length to be munmapped.
1367 * @uf: The userfaultfd list_head
1368 *
1369 * Return: 0 on success, error otherwise.
1370 */
do_munmap(struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf)1371 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
1372 struct list_head *uf)
1373 {
1374 VMA_ITERATOR(vmi, mm, start);
1375
1376 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
1377 }
1378
__mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)1379 static unsigned long __mmap_region(struct file *file, unsigned long addr,
1380 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1381 struct list_head *uf)
1382 {
1383 struct mm_struct *mm = current->mm;
1384 struct vm_area_struct *vma = NULL;
1385 pgoff_t pglen = PHYS_PFN(len);
1386 unsigned long charged = 0;
1387 struct vma_munmap_struct vms;
1388 struct ma_state mas_detach;
1389 struct maple_tree mt_detach;
1390 unsigned long end = addr + len;
1391 int error;
1392 VMA_ITERATOR(vmi, mm, addr);
1393 VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
1394
1395 vmg.file = file;
1396 /* Find the first overlapping VMA */
1397 vma = vma_find(&vmi, end);
1398 init_vma_munmap(&vms, &vmi, vma, addr, end, uf, /* unlock = */ false);
1399 if (vma) {
1400 mt_init_flags(&mt_detach, vmi.mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
1401 mt_on_stack(mt_detach);
1402 mas_init(&mas_detach, &mt_detach, /* addr = */ 0);
1403 /* Prepare to unmap any existing mapping in the area */
1404 error = vms_gather_munmap_vmas(&vms, &mas_detach);
1405 if (error)
1406 goto gather_failed;
1407
1408 vmg.next = vms.next;
1409 vmg.prev = vms.prev;
1410 vma = NULL;
1411 } else {
1412 vmg.next = vma_iter_next_rewind(&vmi, &vmg.prev);
1413 }
1414
1415 /* Check against address space limit. */
1416 if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) {
1417 error = -ENOMEM;
1418 goto abort_munmap;
1419 }
1420
1421 /*
1422 * Private writable mapping: check memory availability
1423 */
1424 if (accountable_mapping(file, vm_flags)) {
1425 charged = pglen;
1426 charged -= vms.nr_accounted;
1427 if (charged) {
1428 error = security_vm_enough_memory_mm(mm, charged);
1429 if (error)
1430 goto abort_munmap;
1431 }
1432
1433 vms.nr_accounted = 0;
1434 vm_flags |= VM_ACCOUNT;
1435 vmg.flags = vm_flags;
1436 }
1437
1438 /*
1439 * clear PTEs while the vma is still in the tree so that rmap
1440 * cannot race with the freeing later in the truncate scenario.
1441 * This is also needed for mmap_file(), which is why vm_ops
1442 * close function is called.
1443 */
1444 vms_clean_up_area(&vms, &mas_detach);
1445 vma = vma_merge_new_range(&vmg);
1446 if (vma)
1447 goto expanded;
1448 /*
1449 * Determine the object being mapped and call the appropriate
1450 * specific mapper. the address has already been validated, but
1451 * not unmapped, but the maps are removed from the list.
1452 */
1453 vma = vm_area_alloc(mm);
1454 if (!vma) {
1455 error = -ENOMEM;
1456 goto unacct_error;
1457 }
1458
1459 vma_iter_config(&vmi, addr, end);
1460 vma_set_range(vma, addr, end, pgoff);
1461 vm_flags_init(vma, vm_flags);
1462 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1463
1464 if (vma_iter_prealloc(&vmi, vma)) {
1465 error = -ENOMEM;
1466 goto free_vma;
1467 }
1468
1469 if (file) {
1470 vma->vm_file = get_file(file);
1471 error = mmap_file(file, vma);
1472 if (error)
1473 goto unmap_and_free_file_vma;
1474
1475 /* Drivers cannot alter the address of the VMA. */
1476 WARN_ON_ONCE(addr != vma->vm_start);
1477 /*
1478 * Drivers should not permit writability when previously it was
1479 * disallowed.
1480 */
1481 VM_WARN_ON_ONCE(vm_flags != vma->vm_flags &&
1482 !(vm_flags & VM_MAYWRITE) &&
1483 (vma->vm_flags & VM_MAYWRITE));
1484
1485 vma_iter_config(&vmi, addr, end);
1486 /*
1487 * If vm_flags changed after mmap_file(), we should try merge
1488 * vma again as we may succeed this time.
1489 */
1490 if (unlikely(vm_flags != vma->vm_flags && vmg.prev)) {
1491 struct vm_area_struct *merge;
1492
1493 vmg.flags = vma->vm_flags;
1494 /* If this fails, state is reset ready for a reattempt. */
1495 merge = vma_merge_new_range(&vmg);
1496
1497 if (merge) {
1498 /*
1499 * ->mmap() can change vma->vm_file and fput
1500 * the original file. So fput the vma->vm_file
1501 * here or we would add an extra fput for file
1502 * and cause general protection fault
1503 * ultimately.
1504 */
1505 fput(vma->vm_file);
1506 vm_area_free(vma);
1507 vma = merge;
1508 /* Update vm_flags to pick up the change. */
1509 vm_flags = vma->vm_flags;
1510 goto file_expanded;
1511 }
1512
1513 /*
1514 * In the unlikely even that more memory was needed, but
1515 * not available for the vma merge, the vma iterator
1516 * will have no memory reserved for the write we told
1517 * the driver was happening. To keep up the ruse,
1518 * ensure the allocation for the store succeeds.
1519 */
1520 if (vmg_nomem(&vmg)) {
1521 mas_preallocate(&vmi.mas, vma,
1522 GFP_KERNEL|__GFP_NOFAIL);
1523 }
1524 }
1525
1526 vm_flags = vma->vm_flags;
1527 } else if (vm_flags & VM_SHARED) {
1528 error = shmem_zero_setup(vma);
1529 if (error)
1530 goto free_iter_vma;
1531 } else {
1532 vma_set_anonymous(vma);
1533 }
1534
1535 #ifdef CONFIG_SPARC64
1536 /* TODO: Fix SPARC ADI! */
1537 WARN_ON_ONCE(!arch_validate_flags(vm_flags));
1538 #endif
1539
1540 /* Lock the VMA since it is modified after insertion into VMA tree */
1541 vma_start_write(vma);
1542 vma_iter_store_new(&vmi, vma);
1543 mm->map_count++;
1544 vma_link_file(vma);
1545
1546 /*
1547 * vma_merge_new_range() calls khugepaged_enter_vma() too, the below
1548 * call covers the non-merge case.
1549 */
1550 khugepaged_enter_vma(vma, vma->vm_flags);
1551
1552 file_expanded:
1553 file = vma->vm_file;
1554 ksm_add_vma(vma);
1555 expanded:
1556 perf_event_mmap(vma);
1557
1558 /* Unmap any existing mapping in the area */
1559 vms_complete_munmap_vmas(&vms, &mas_detach);
1560
1561 vm_stat_account(mm, vm_flags, pglen);
1562 if (vm_flags & VM_LOCKED) {
1563 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1564 is_vm_hugetlb_page(vma) ||
1565 vma == get_gate_vma(current->mm))
1566 vm_flags_clear(vma, VM_LOCKED_MASK);
1567 else
1568 mm->locked_vm += pglen;
1569 }
1570
1571 if (file)
1572 uprobe_mmap(vma);
1573
1574 /*
1575 * New (or expanded) vma always get soft dirty status.
1576 * Otherwise user-space soft-dirty page tracker won't
1577 * be able to distinguish situation when vma area unmapped,
1578 * then new mapped in-place (which must be aimed as
1579 * a completely new data area).
1580 */
1581 vm_flags_set(vma, VM_SOFTDIRTY);
1582
1583 vma_set_page_prot(vma);
1584
1585 trace_android_vh_mmap_region(vma, addr);
1586
1587 return addr;
1588
1589 unmap_and_free_file_vma:
1590 fput(vma->vm_file);
1591 vma->vm_file = NULL;
1592
1593 vma_iter_set(&vmi, vma->vm_end);
1594 /* Undo any partial mapping done by a device driver. */
1595 unmap_region(&vmi.mas, vma, vmg.prev, vmg.next);
1596 free_iter_vma:
1597 vma_iter_free(&vmi);
1598 free_vma:
1599 vm_area_free(vma);
1600 unacct_error:
1601 if (charged)
1602 vm_unacct_memory(charged);
1603
1604 abort_munmap:
1605 vms_abort_munmap_vmas(&vms, &mas_detach);
1606 gather_failed:
1607 return error;
1608 }
1609
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)1610 unsigned long mmap_region(struct file *file, unsigned long addr,
1611 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1612 struct list_head *uf)
1613 {
1614 unsigned long ret;
1615 bool writable_file_mapping = false;
1616
1617 /* Check to see if MDWE is applicable. */
1618 if (map_deny_write_exec(vm_flags, vm_flags))
1619 return -EACCES;
1620
1621 /* Allow architectures to sanity-check the vm_flags. */
1622 if (!arch_validate_flags(vm_flags))
1623 return -EINVAL;
1624
1625 /* Map writable and ensure this isn't a sealed memfd. */
1626 if (file && is_shared_maywrite(vm_flags)) {
1627 int error = mapping_map_writable(file->f_mapping);
1628
1629 if (error)
1630 return error;
1631 writable_file_mapping = true;
1632 }
1633
1634 ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf);
1635
1636 /* Clear our write mapping regardless of error. */
1637 if (writable_file_mapping)
1638 mapping_unmap_writable(file->f_mapping);
1639
1640 validate_mm(current->mm);
1641 return ret;
1642 }
1643
__vm_munmap(unsigned long start,size_t len,bool unlock)1644 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
1645 {
1646 int ret;
1647 struct mm_struct *mm = current->mm;
1648 LIST_HEAD(uf);
1649 VMA_ITERATOR(vmi, mm, start);
1650
1651 if (mmap_write_lock_killable(mm))
1652 return -EINTR;
1653
1654 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
1655 if (ret || !unlock)
1656 mmap_write_unlock(mm);
1657
1658 userfaultfd_unmap_complete(mm, &uf);
1659 return ret;
1660 }
1661
vm_munmap(unsigned long start,size_t len)1662 int vm_munmap(unsigned long start, size_t len)
1663 {
1664 return __vm_munmap(start, len, false);
1665 }
1666 EXPORT_SYMBOL(vm_munmap);
1667
SYSCALL_DEFINE2(munmap,unsigned long,addr,size_t,len)1668 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1669 {
1670 addr = untagged_addr(addr);
1671
1672 if (!__PAGE_ALIGNED(addr))
1673 return -EINVAL;
1674
1675 len = __PAGE_ALIGN(len);
1676
1677 profile_munmap(addr);
1678 return __vm_munmap(addr, len, true);
1679 }
1680
1681
1682 /*
1683 * Emulation of deprecated remap_file_pages() syscall.
1684 */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)1685 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
1686 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
1687 {
1688
1689 struct mm_struct *mm = current->mm;
1690 struct vm_area_struct *vma;
1691 unsigned long populate = 0;
1692 unsigned long ret = -EINVAL;
1693 struct file *file;
1694 vm_flags_t vm_flags;
1695
1696 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
1697 current->comm, current->pid);
1698
1699 if (prot)
1700 return ret;
1701 start = start & PAGE_MASK;
1702 size = size & PAGE_MASK;
1703
1704 if (start + size <= start)
1705 return ret;
1706
1707 /* Does pgoff wrap? */
1708 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
1709 return ret;
1710
1711 if (mmap_read_lock_killable(mm))
1712 return -EINTR;
1713
1714 /*
1715 * Look up VMA under read lock first so we can perform the security
1716 * without holding locks (which can be problematic). We reacquire a
1717 * write lock later and check nothing changed underneath us.
1718 */
1719 vma = vma_lookup(mm, start);
1720
1721 if (!vma || !(vma->vm_flags & VM_SHARED)) {
1722 mmap_read_unlock(mm);
1723 return -EINVAL;
1724 }
1725
1726 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
1727 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
1728 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
1729
1730 flags &= MAP_NONBLOCK;
1731 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
1732 if (vma->vm_flags & VM_LOCKED)
1733 flags |= MAP_LOCKED;
1734
1735 /* Save vm_flags used to calculate prot and flags, and recheck later. */
1736 vm_flags = vma->vm_flags;
1737 file = get_file(vma->vm_file);
1738
1739 mmap_read_unlock(mm);
1740
1741 /* Call outside mmap_lock to be consistent with other callers. */
1742 ret = security_mmap_file(file, prot, flags);
1743 if (ret) {
1744 fput(file);
1745 return ret;
1746 }
1747
1748 ret = -EINVAL;
1749
1750 /* OK security check passed, take write lock + let it rip. */
1751 if (mmap_write_lock_killable(mm)) {
1752 fput(file);
1753 return -EINTR;
1754 }
1755
1756 vma = vma_lookup(mm, start);
1757
1758 if (!vma)
1759 goto out;
1760
1761 /* Make sure things didn't change under us. */
1762 if (vma->vm_flags != vm_flags)
1763 goto out;
1764 if (vma->vm_file != file)
1765 goto out;
1766
1767 if (start + size > vma->vm_end) {
1768 VMA_ITERATOR(vmi, mm, vma->vm_end);
1769 struct vm_area_struct *next, *prev = vma;
1770
1771 for_each_vma_range(vmi, next, start + size) {
1772 /* hole between vmas ? */
1773 if (next->vm_start != prev->vm_end)
1774 goto out;
1775
1776 if (next->vm_file != vma->vm_file)
1777 goto out;
1778
1779 if (next->vm_flags != vma->vm_flags)
1780 goto out;
1781
1782 if (start + size <= next->vm_end)
1783 break;
1784
1785 prev = next;
1786 }
1787
1788 if (!next)
1789 goto out;
1790 }
1791
1792 ret = do_mmap(vma->vm_file, start, size,
1793 prot, flags, 0, pgoff, &populate, NULL);
1794 out:
1795 mmap_write_unlock(mm);
1796 fput(file);
1797 if (populate)
1798 mm_populate(ret, populate);
1799 if (!IS_ERR_VALUE(ret))
1800 ret = 0;
1801 return ret;
1802 }
1803
1804 /*
1805 * do_brk_flags() - Increase the brk vma if the flags match.
1806 * @vmi: The vma iterator
1807 * @addr: The start address
1808 * @len: The length of the increase
1809 * @vma: The vma,
1810 * @flags: The VMA Flags
1811 *
1812 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
1813 * do not match then create a new anonymous VMA. Eventually we may be able to
1814 * do some brk-specific accounting here.
1815 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,unsigned long flags)1816 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
1817 unsigned long addr, unsigned long len, unsigned long flags)
1818 {
1819 struct mm_struct *mm = current->mm;
1820
1821 /*
1822 * Check against address space limits by the changed size
1823 * Note: This happens *after* clearing old mappings in some code paths.
1824 */
1825 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
1826 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
1827 return -ENOMEM;
1828
1829 if (mm->map_count > sysctl_max_map_count)
1830 return -ENOMEM;
1831
1832 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
1833 return -ENOMEM;
1834
1835 /*
1836 * Expand the existing vma if possible; Note that singular lists do not
1837 * occur after forking, so the expand will only happen on new VMAs.
1838 */
1839 if (vma && vma->vm_end == addr) {
1840 VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr));
1841
1842 vmg.prev = vma;
1843 /* vmi is positioned at prev, which this mode expects. */
1844 vmg.merge_flags = VMG_FLAG_JUST_EXPAND;
1845
1846 if (vma_merge_new_range(&vmg))
1847 goto out;
1848 else if (vmg_nomem(&vmg))
1849 goto unacct_fail;
1850 }
1851
1852 if (vma)
1853 vma_iter_next_range(vmi);
1854 /* create a vma struct for an anonymous mapping */
1855 vma = vm_area_alloc(mm);
1856 if (!vma)
1857 goto unacct_fail;
1858
1859 vma_set_anonymous(vma);
1860 vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
1861 vm_flags_init(vma, flags);
1862 vma->vm_page_prot = vm_get_page_prot(flags);
1863 vma_start_write(vma);
1864 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
1865 goto mas_store_fail;
1866
1867 mm->map_count++;
1868 validate_mm(mm);
1869 ksm_add_vma(vma);
1870 out:
1871 perf_event_mmap(vma);
1872 mm->total_vm += len >> PAGE_SHIFT;
1873 mm->data_vm += len >> PAGE_SHIFT;
1874 if (flags & VM_LOCKED)
1875 mm->locked_vm += (len >> PAGE_SHIFT);
1876 vm_flags_set(vma, VM_SOFTDIRTY);
1877 return 0;
1878
1879 mas_store_fail:
1880 vm_area_free(vma);
1881 unacct_fail:
1882 vm_unacct_memory(len >> PAGE_SHIFT);
1883 return -ENOMEM;
1884 }
1885
vm_brk_flags(unsigned long addr,unsigned long request,unsigned long flags)1886 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
1887 {
1888 struct mm_struct *mm = current->mm;
1889 struct vm_area_struct *vma = NULL;
1890 unsigned long len;
1891 int ret;
1892 bool populate;
1893 LIST_HEAD(uf);
1894 VMA_ITERATOR(vmi, mm, addr);
1895
1896 len = PAGE_ALIGN(request);
1897 if (len < request)
1898 return -ENOMEM;
1899 if (!len)
1900 return 0;
1901
1902 /* Until we need other flags, refuse anything except VM_EXEC. */
1903 if ((flags & (~VM_EXEC)) != 0)
1904 return -EINVAL;
1905
1906 if (mmap_write_lock_killable(mm))
1907 return -EINTR;
1908
1909 ret = check_brk_limits(addr, len);
1910 if (ret)
1911 goto limits_failed;
1912
1913 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
1914 if (ret)
1915 goto munmap_failed;
1916
1917 vma = vma_prev(&vmi);
1918 ret = do_brk_flags(&vmi, vma, addr, len, flags);
1919 populate = ((mm->def_flags & VM_LOCKED) != 0);
1920 mmap_write_unlock(mm);
1921 userfaultfd_unmap_complete(mm, &uf);
1922 if (populate && !ret)
1923 mm_populate(addr, len);
1924 return ret;
1925
1926 munmap_failed:
1927 limits_failed:
1928 mmap_write_unlock(mm);
1929 return ret;
1930 }
1931 EXPORT_SYMBOL(vm_brk_flags);
1932
1933 /* Release all mmaps. */
exit_mmap(struct mm_struct * mm)1934 void exit_mmap(struct mm_struct *mm)
1935 {
1936 struct mmu_gather tlb;
1937 struct vm_area_struct *vma;
1938 unsigned long nr_accounted = 0;
1939 VMA_ITERATOR(vmi, mm, 0);
1940 int count = 0;
1941
1942 /* mm's last user has gone, and its about to be pulled down */
1943 mmu_notifier_release(mm);
1944
1945 mmap_read_lock(mm);
1946 arch_exit_mmap(mm);
1947
1948 vma = vma_next(&vmi);
1949 if (!vma || unlikely(xa_is_zero(vma))) {
1950 /* Can happen if dup_mmap() received an OOM */
1951 mmap_read_unlock(mm);
1952 mmap_write_lock(mm);
1953 goto destroy;
1954 }
1955
1956 lru_add_drain();
1957 flush_cache_mm(mm);
1958 tlb_gather_mmu_fullmm(&tlb, mm);
1959 trace_android_vh_swapmem_gather_init(mm);
1960 /* update_hiwater_rss(mm) here? but nobody should be looking */
1961 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
1962 unmap_vmas(&tlb, &vmi.mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
1963 trace_android_vh_swapmem_gather_finish(mm);
1964 mmap_read_unlock(mm);
1965
1966 /*
1967 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
1968 * because the memory has been already freed.
1969 */
1970 set_bit(MMF_OOM_SKIP, &mm->flags);
1971 mmap_write_lock(mm);
1972 mt_clear_in_rcu(&mm->mm_mt);
1973 vma_iter_set(&vmi, vma->vm_end);
1974 free_pgtables(&tlb, &vmi.mas, vma, FIRST_USER_ADDRESS,
1975 USER_PGTABLES_CEILING, true);
1976 tlb_finish_mmu(&tlb);
1977
1978 /*
1979 * Walk the list again, actually closing and freeing it, with preemption
1980 * enabled, without holding any MM locks besides the unreachable
1981 * mmap_write_lock.
1982 */
1983 vma_iter_set(&vmi, vma->vm_end);
1984 do {
1985 if (vma->vm_flags & VM_ACCOUNT)
1986 nr_accounted += vma_pages(vma);
1987 vma_mark_detached(vma);
1988 remove_vma(vma);
1989 count++;
1990 cond_resched();
1991 vma = vma_next(&vmi);
1992 } while (vma && likely(!xa_is_zero(vma)));
1993
1994 BUG_ON(count != mm->map_count);
1995
1996 trace_exit_mmap(mm);
1997 destroy:
1998 __mt_destroy(&mm->mm_mt);
1999 mmap_write_unlock(mm);
2000 vm_unacct_memory(nr_accounted);
2001 }
2002
2003 /* Insert vm structure into process list sorted by address
2004 * and into the inode's i_mmap tree. If vm_file is non-NULL
2005 * then i_mmap_rwsem is taken here.
2006 */
insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)2007 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2008 {
2009 unsigned long charged = vma_pages(vma);
2010
2011
2012 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
2013 return -ENOMEM;
2014
2015 if ((vma->vm_flags & VM_ACCOUNT) &&
2016 security_vm_enough_memory_mm(mm, charged))
2017 return -ENOMEM;
2018
2019 /*
2020 * The vm_pgoff of a purely anonymous vma should be irrelevant
2021 * until its first write fault, when page's anon_vma and index
2022 * are set. But now set the vm_pgoff it will almost certainly
2023 * end up with (unless mremap moves it elsewhere before that
2024 * first wfault), so /proc/pid/maps tells a consistent story.
2025 *
2026 * By setting it to reflect the virtual start address of the
2027 * vma, merges and splits can happen in a seamless way, just
2028 * using the existing file pgoff checks and manipulations.
2029 * Similarly in do_mmap and in do_brk_flags.
2030 */
2031 if (vma_is_anonymous(vma)) {
2032 BUG_ON(vma->anon_vma);
2033 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2034 }
2035
2036 if (vma_link(mm, vma)) {
2037 if (vma->vm_flags & VM_ACCOUNT)
2038 vm_unacct_memory(charged);
2039 return -ENOMEM;
2040 }
2041
2042 return 0;
2043 }
2044
2045 /*
2046 * Return true if the calling process may expand its vm space by the passed
2047 * number of pages
2048 */
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)2049 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
2050 {
2051 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
2052 return false;
2053
2054 if (is_data_mapping(flags) &&
2055 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
2056 /* Workaround for Valgrind */
2057 if (rlimit(RLIMIT_DATA) == 0 &&
2058 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
2059 return true;
2060
2061 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
2062 current->comm, current->pid,
2063 (mm->data_vm + npages) << PAGE_SHIFT,
2064 rlimit(RLIMIT_DATA),
2065 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
2066
2067 if (!ignore_rlimit_data)
2068 return false;
2069 }
2070
2071 return true;
2072 }
2073
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)2074 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
2075 {
2076 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
2077
2078 if (is_exec_mapping(flags))
2079 mm->exec_vm += npages;
2080 else if (is_stack_mapping(flags))
2081 mm->stack_vm += npages;
2082 else if (is_data_mapping(flags))
2083 mm->data_vm += npages;
2084 }
2085
2086 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
2087
2088 /*
2089 * Close hook, called for unmap() and on the old vma for mremap().
2090 *
2091 * Having a close hook prevents vma merging regardless of flags.
2092 */
special_mapping_close(struct vm_area_struct * vma)2093 static void special_mapping_close(struct vm_area_struct *vma)
2094 {
2095 const struct vm_special_mapping *sm = vma->vm_private_data;
2096
2097 if (sm->close)
2098 sm->close(sm, vma);
2099 }
2100
special_mapping_name(struct vm_area_struct * vma)2101 static const char *special_mapping_name(struct vm_area_struct *vma)
2102 {
2103 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
2104 }
2105
special_mapping_mremap(struct vm_area_struct * new_vma)2106 static int special_mapping_mremap(struct vm_area_struct *new_vma)
2107 {
2108 struct vm_special_mapping *sm = new_vma->vm_private_data;
2109
2110 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
2111 return -EFAULT;
2112
2113 if (sm->mremap)
2114 return sm->mremap(sm, new_vma);
2115
2116 return 0;
2117 }
2118
special_mapping_split(struct vm_area_struct * vma,unsigned long addr)2119 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
2120 {
2121 /*
2122 * Forbid splitting special mappings - kernel has expectations over
2123 * the number of pages in mapping. Together with VM_DONTEXPAND
2124 * the size of vma should stay the same over the special mapping's
2125 * lifetime.
2126 */
2127 return -EINVAL;
2128 }
2129
2130 static const struct vm_operations_struct special_mapping_vmops = {
2131 .close = special_mapping_close,
2132 .fault = special_mapping_fault,
2133 .mremap = special_mapping_mremap,
2134 .name = special_mapping_name,
2135 /* vDSO code relies that VVAR can't be accessed remotely */
2136 .access = NULL,
2137 .may_split = special_mapping_split,
2138 };
2139
special_mapping_fault(struct vm_fault * vmf)2140 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
2141 {
2142 struct vm_area_struct *vma = vmf->vma;
2143 pgoff_t pgoff;
2144 struct page **pages;
2145 struct vm_special_mapping *sm = vma->vm_private_data;
2146
2147 if (sm->fault)
2148 return sm->fault(sm, vmf->vma, vmf);
2149
2150 pages = sm->pages;
2151
2152 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
2153 pgoff--;
2154
2155 if (*pages) {
2156 struct page *page = *pages;
2157 get_page(page);
2158 vmf->page = page;
2159 return 0;
2160 }
2161
2162 return VM_FAULT_SIGBUS;
2163 }
2164
__install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,void * priv,const struct vm_operations_struct * ops)2165 static struct vm_area_struct *__install_special_mapping(
2166 struct mm_struct *mm,
2167 unsigned long addr, unsigned long len,
2168 unsigned long vm_flags, void *priv,
2169 const struct vm_operations_struct *ops)
2170 {
2171 int ret;
2172 struct vm_area_struct *vma;
2173
2174 vma = vm_area_alloc(mm);
2175 if (unlikely(vma == NULL))
2176 return ERR_PTR(-ENOMEM);
2177
2178 vma_set_range(vma, addr, addr + len, 0);
2179 vm_flags_init(vma, (vm_flags | mm->def_flags |
2180 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
2181 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2182
2183 vma->vm_ops = ops;
2184 vma->vm_private_data = priv;
2185
2186 ret = insert_vm_struct(mm, vma);
2187 if (ret)
2188 goto out;
2189
2190 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
2191
2192 perf_event_mmap(vma);
2193
2194 return vma;
2195
2196 out:
2197 vm_area_free(vma);
2198 return ERR_PTR(ret);
2199 }
2200
vma_is_special_mapping(const struct vm_area_struct * vma,const struct vm_special_mapping * sm)2201 bool vma_is_special_mapping(const struct vm_area_struct *vma,
2202 const struct vm_special_mapping *sm)
2203 {
2204 return vma->vm_private_data == sm &&
2205 vma->vm_ops == &special_mapping_vmops;
2206 }
2207
2208 /*
2209 * Called with mm->mmap_lock held for writing.
2210 * Insert a new vma covering the given region, with the given flags.
2211 * Its pages are supplied by the given array of struct page *.
2212 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2213 * The region past the last page supplied will always produce SIGBUS.
2214 * The array pointer and the pages it points to are assumed to stay alive
2215 * for as long as this mapping might exist.
2216 */
_install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,const struct vm_special_mapping * spec)2217 struct vm_area_struct *_install_special_mapping(
2218 struct mm_struct *mm,
2219 unsigned long addr, unsigned long len,
2220 unsigned long vm_flags, const struct vm_special_mapping *spec)
2221 {
2222 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
2223 &special_mapping_vmops);
2224 }
2225
2226 /*
2227 * initialise the percpu counter for VM
2228 */
mmap_init(void)2229 void __init mmap_init(void)
2230 {
2231 int ret;
2232
2233 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
2234 VM_BUG_ON(ret);
2235 }
2236
2237 /*
2238 * Initialise sysctl_user_reserve_kbytes.
2239 *
2240 * This is intended to prevent a user from starting a single memory hogging
2241 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
2242 * mode.
2243 *
2244 * The default value is min(3% of free memory, 128MB)
2245 * 128MB is enough to recover with sshd/login, bash, and top/kill.
2246 */
init_user_reserve(void)2247 static int init_user_reserve(void)
2248 {
2249 unsigned long free_kbytes;
2250
2251 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2252
2253 sysctl_user_reserve_kbytes = min(free_kbytes / 32, SZ_128K);
2254 return 0;
2255 }
2256 subsys_initcall(init_user_reserve);
2257
2258 /*
2259 * Initialise sysctl_admin_reserve_kbytes.
2260 *
2261 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
2262 * to log in and kill a memory hogging process.
2263 *
2264 * Systems with more than 256MB will reserve 8MB, enough to recover
2265 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
2266 * only reserve 3% of free pages by default.
2267 */
init_admin_reserve(void)2268 static int init_admin_reserve(void)
2269 {
2270 unsigned long free_kbytes;
2271
2272 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2273
2274 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, SZ_8K);
2275 return 0;
2276 }
2277 subsys_initcall(init_admin_reserve);
2278
2279 /*
2280 * Reinititalise user and admin reserves if memory is added or removed.
2281 *
2282 * The default user reserve max is 128MB, and the default max for the
2283 * admin reserve is 8MB. These are usually, but not always, enough to
2284 * enable recovery from a memory hogging process using login/sshd, a shell,
2285 * and tools like top. It may make sense to increase or even disable the
2286 * reserve depending on the existence of swap or variations in the recovery
2287 * tools. So, the admin may have changed them.
2288 *
2289 * If memory is added and the reserves have been eliminated or increased above
2290 * the default max, then we'll trust the admin.
2291 *
2292 * If memory is removed and there isn't enough free memory, then we
2293 * need to reset the reserves.
2294 *
2295 * Otherwise keep the reserve set by the admin.
2296 */
reserve_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)2297 static int reserve_mem_notifier(struct notifier_block *nb,
2298 unsigned long action, void *data)
2299 {
2300 unsigned long tmp, free_kbytes;
2301
2302 switch (action) {
2303 case MEM_ONLINE:
2304 /* Default max is 128MB. Leave alone if modified by operator. */
2305 tmp = sysctl_user_reserve_kbytes;
2306 if (tmp > 0 && tmp < SZ_128K)
2307 init_user_reserve();
2308
2309 /* Default max is 8MB. Leave alone if modified by operator. */
2310 tmp = sysctl_admin_reserve_kbytes;
2311 if (tmp > 0 && tmp < SZ_8K)
2312 init_admin_reserve();
2313
2314 break;
2315 case MEM_OFFLINE:
2316 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
2317
2318 if (sysctl_user_reserve_kbytes > free_kbytes) {
2319 init_user_reserve();
2320 pr_info("vm.user_reserve_kbytes reset to %lu\n",
2321 sysctl_user_reserve_kbytes);
2322 }
2323
2324 if (sysctl_admin_reserve_kbytes > free_kbytes) {
2325 init_admin_reserve();
2326 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
2327 sysctl_admin_reserve_kbytes);
2328 }
2329 break;
2330 default:
2331 break;
2332 }
2333 return NOTIFY_OK;
2334 }
2335
init_reserve_notifier(void)2336 static int __meminit init_reserve_notifier(void)
2337 {
2338 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
2339 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
2340
2341 return 0;
2342 }
2343 subsys_initcall(init_reserve_notifier);
2344
2345 /*
2346 * Relocate a VMA downwards by shift bytes. There cannot be any VMAs between
2347 * this VMA and its relocated range, which will now reside at [vma->vm_start -
2348 * shift, vma->vm_end - shift).
2349 *
2350 * This function is almost certainly NOT what you want for anything other than
2351 * early executable temporary stack relocation.
2352 */
relocate_vma_down(struct vm_area_struct * vma,unsigned long shift)2353 int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
2354 {
2355 /*
2356 * The process proceeds as follows:
2357 *
2358 * 1) Use shift to calculate the new vma endpoints.
2359 * 2) Extend vma to cover both the old and new ranges. This ensures the
2360 * arguments passed to subsequent functions are consistent.
2361 * 3) Move vma's page tables to the new range.
2362 * 4) Free up any cleared pgd range.
2363 * 5) Shrink the vma to cover only the new range.
2364 */
2365
2366 struct mm_struct *mm = vma->vm_mm;
2367 unsigned long old_start = vma->vm_start;
2368 unsigned long old_end = vma->vm_end;
2369 unsigned long length = old_end - old_start;
2370 unsigned long new_start = old_start - shift;
2371 unsigned long new_end = old_end - shift;
2372 VMA_ITERATOR(vmi, mm, new_start);
2373 VMG_STATE(vmg, mm, &vmi, new_start, old_end, 0, vma->vm_pgoff);
2374 struct vm_area_struct *next;
2375 struct mmu_gather tlb;
2376
2377 BUG_ON(new_start > new_end);
2378
2379 /*
2380 * ensure there are no vmas between where we want to go
2381 * and where we are
2382 */
2383 if (vma != vma_next(&vmi))
2384 return -EFAULT;
2385
2386 vma_iter_prev_range(&vmi);
2387 /*
2388 * cover the whole range: [new_start, old_end)
2389 */
2390 vmg.vma = vma;
2391 if (vma_expand(&vmg))
2392 return -ENOMEM;
2393
2394 /*
2395 * move the page tables downwards, on failure we rely on
2396 * process cleanup to remove whatever mess we made.
2397 */
2398 if (length != move_page_tables(vma, old_start,
2399 vma, new_start, length, false, true))
2400 return -ENOMEM;
2401
2402 lru_add_drain();
2403 tlb_gather_mmu(&tlb, mm);
2404 next = vma_next(&vmi);
2405 if (new_end > old_start) {
2406 /*
2407 * when the old and new regions overlap clear from new_end.
2408 */
2409 free_pgd_range(&tlb, new_end, old_end, new_end,
2410 next ? next->vm_start : USER_PGTABLES_CEILING);
2411 } else {
2412 /*
2413 * otherwise, clean from old_start; this is done to not touch
2414 * the address space in [new_end, old_start) some architectures
2415 * have constraints on va-space that make this illegal (IA64) -
2416 * for the others its just a little faster.
2417 */
2418 free_pgd_range(&tlb, old_start, old_end, new_end,
2419 next ? next->vm_start : USER_PGTABLES_CEILING);
2420 }
2421 tlb_finish_mmu(&tlb);
2422
2423 vma_prev(&vmi);
2424 /* Shrink the vma to just the new range */
2425 return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff);
2426 }
2427