1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/mmap.c
4 *
5 * Written by obz.
6 *
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/mm_inline.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/page_size_compat.h>
21 #include <linux/swap.h>
22 #include <linux/syscalls.h>
23 #include <linux/capability.h>
24 #include <linux/init.h>
25 #include <linux/file.h>
26 #include <linux/fs.h>
27 #include <linux/pgsize_migration.h>
28 #include <linux/personality.h>
29 #include <linux/security.h>
30 #include <linux/hugetlb.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/profile.h>
33 #include <linux/export.h>
34 #include <linux/mount.h>
35 #include <linux/mempolicy.h>
36 #include <linux/rmap.h>
37 #include <linux/mmu_notifier.h>
38 #include <linux/mmdebug.h>
39 #include <linux/perf_event.h>
40 #include <linux/audit.h>
41 #include <linux/khugepaged.h>
42 #include <linux/uprobes.h>
43 #include <linux/notifier.h>
44 #include <linux/memory.h>
45 #include <linux/printk.h>
46 #include <linux/userfaultfd_k.h>
47 #include <linux/moduleparam.h>
48 #include <linux/pkeys.h>
49 #include <linux/oom.h>
50 #include <linux/sched/mm.h>
51 #include <linux/ksm.h>
52
53 #include <linux/uaccess.h>
54 #include <asm/cacheflush.h>
55 #include <asm/tlb.h>
56 #include <asm/mmu_context.h>
57
58 #define CREATE_TRACE_POINTS
59 #include <trace/events/mmap.h>
60 #undef CREATE_TRACE_POINTS
61 #include <trace/hooks/mm.h>
62
63 #include "internal.h"
64
65 EXPORT_TRACEPOINT_SYMBOL_GPL(vm_unmapped_area);
66
67 #ifndef arch_mmap_check
68 #define arch_mmap_check(addr, len, flags) (0)
69 #endif
70
71 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
72 int mmap_rnd_bits_min __read_mostly = CONFIG_ARCH_MMAP_RND_BITS_MIN;
73 int mmap_rnd_bits_max __read_mostly = CONFIG_ARCH_MMAP_RND_BITS_MAX;
74 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
75 #endif
76 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
77 int mmap_rnd_compat_bits_min __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
78 int mmap_rnd_compat_bits_max __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
79 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
80 #endif
81
82 static bool ignore_rlimit_data;
83 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
84
85 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
86 struct vm_area_struct *vma, struct vm_area_struct *prev,
87 struct vm_area_struct *next, unsigned long start,
88 unsigned long end, unsigned long tree_end, bool mm_wr_locked);
89
vm_pgprot_modify(pgprot_t oldprot,unsigned long vm_flags)90 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
91 {
92 return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
93 }
94
95 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
vma_set_page_prot(struct vm_area_struct * vma)96 void vma_set_page_prot(struct vm_area_struct *vma)
97 {
98 unsigned long vm_flags = vma->vm_flags;
99 pgprot_t vm_page_prot;
100
101 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
102 if (vma_wants_writenotify(vma, vm_page_prot)) {
103 vm_flags &= ~VM_SHARED;
104 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
105 }
106 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
107 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
108 }
109
110 /*
111 * Requires inode->i_mapping->i_mmap_rwsem
112 */
__remove_shared_vm_struct(struct vm_area_struct * vma,struct file * file,struct address_space * mapping)113 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
114 struct file *file, struct address_space *mapping)
115 {
116 if (vma->vm_flags & VM_SHARED)
117 mapping_unmap_writable(mapping);
118
119 flush_dcache_mmap_lock(mapping);
120 vma_interval_tree_remove(vma, &mapping->i_mmap);
121 flush_dcache_mmap_unlock(mapping);
122 }
123
124 /*
125 * Unlink a file-based vm structure from its interval tree, to hide
126 * vma from rmap and vmtruncate before freeing its page tables.
127 */
unlink_file_vma(struct vm_area_struct * vma)128 void unlink_file_vma(struct vm_area_struct *vma)
129 {
130 struct file *file = vma->vm_file;
131
132 if (file) {
133 struct address_space *mapping = file->f_mapping;
134 i_mmap_lock_write(mapping);
135 __remove_shared_vm_struct(vma, file, mapping);
136 i_mmap_unlock_write(mapping);
137 }
138 }
139
140 /*
141 * Close a vm structure and free it.
142 */
remove_vma(struct vm_area_struct * vma,bool unreachable)143 static void remove_vma(struct vm_area_struct *vma, bool unreachable)
144 {
145 might_sleep();
146 if (vma->vm_ops && vma->vm_ops->close)
147 vma->vm_ops->close(vma);
148 if (vma->vm_file)
149 fput(vma->vm_file);
150 mpol_put(vma_policy(vma));
151 if (unreachable)
152 __vm_area_free(vma);
153 else
154 vm_area_free(vma);
155 }
156
vma_prev_limit(struct vma_iterator * vmi,unsigned long min)157 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
158 unsigned long min)
159 {
160 return mas_prev(&vmi->mas, min);
161 }
162
163 /*
164 * check_brk_limits() - Use platform specific check of range & verify mlock
165 * limits.
166 * @addr: The address to check
167 * @len: The size of increase.
168 *
169 * Return: 0 on success.
170 */
check_brk_limits(unsigned long addr,unsigned long len)171 static int check_brk_limits(unsigned long addr, unsigned long len)
172 {
173 unsigned long mapped_addr;
174
175 mapped_addr = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
176 if (IS_ERR_VALUE(mapped_addr))
177 return mapped_addr;
178
179 return mlock_future_ok(current->mm, current->mm->def_flags, len)
180 ? 0 : -EAGAIN;
181 }
182 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
183 unsigned long addr, unsigned long request, unsigned long flags);
SYSCALL_DEFINE1(brk,unsigned long,brk)184 SYSCALL_DEFINE1(brk, unsigned long, brk)
185 {
186 unsigned long newbrk, oldbrk, origbrk;
187 struct mm_struct *mm = current->mm;
188 struct vm_area_struct *brkvma, *next = NULL;
189 unsigned long min_brk;
190 bool populate = false;
191 LIST_HEAD(uf);
192 struct vma_iterator vmi;
193
194 if (mmap_write_lock_killable(mm))
195 return -EINTR;
196
197 origbrk = mm->brk;
198
199 #ifdef CONFIG_COMPAT_BRK
200 /*
201 * CONFIG_COMPAT_BRK can still be overridden by setting
202 * randomize_va_space to 2, which will still cause mm->start_brk
203 * to be arbitrarily shifted
204 */
205 if (current->brk_randomized)
206 min_brk = mm->start_brk;
207 else
208 min_brk = mm->end_data;
209 #else
210 min_brk = mm->start_brk;
211 #endif
212 if (brk < min_brk)
213 goto out;
214
215 /*
216 * Check against rlimit here. If this check is done later after the test
217 * of oldbrk with newbrk then it can escape the test and let the data
218 * segment grow beyond its set limit the in case where the limit is
219 * not page aligned -Ram Gupta
220 */
221 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
222 mm->end_data, mm->start_data))
223 goto out;
224
225 newbrk = __PAGE_ALIGN(brk);
226 oldbrk = __PAGE_ALIGN(mm->brk);
227 if (oldbrk == newbrk) {
228 mm->brk = brk;
229 goto success;
230 }
231
232 /* Always allow shrinking brk. */
233 if (brk <= mm->brk) {
234 /* Search one past newbrk */
235 vma_iter_init(&vmi, mm, newbrk);
236 brkvma = vma_find(&vmi, oldbrk);
237 if (!brkvma || brkvma->vm_start >= oldbrk)
238 goto out; /* mapping intersects with an existing non-brk vma. */
239 /*
240 * mm->brk must be protected by write mmap_lock.
241 * do_vma_munmap() will drop the lock on success, so update it
242 * before calling do_vma_munmap().
243 */
244 mm->brk = brk;
245 if (do_vma_munmap(&vmi, brkvma, newbrk, oldbrk, &uf, true))
246 goto out;
247
248 goto success_unlocked;
249 }
250
251 if (check_brk_limits(oldbrk, newbrk - oldbrk))
252 goto out;
253
254 /*
255 * Only check if the next VMA is within the stack_guard_gap of the
256 * expansion area
257 */
258 vma_iter_init(&vmi, mm, oldbrk);
259 next = vma_find(&vmi, newbrk + __PAGE_SIZE + stack_guard_gap);
260 if (next && newbrk + __PAGE_SIZE > vm_start_gap(next))
261 goto out;
262
263 brkvma = vma_prev_limit(&vmi, mm->start_brk);
264 /* Ok, looks good - let it rip. */
265 if (do_brk_flags(&vmi, brkvma, oldbrk, newbrk - oldbrk, 0) < 0)
266 goto out;
267
268 mm->brk = brk;
269 if (mm->def_flags & VM_LOCKED)
270 populate = true;
271
272 success:
273 mmap_write_unlock(mm);
274 success_unlocked:
275 userfaultfd_unmap_complete(mm, &uf);
276 if (populate)
277 mm_populate(oldbrk, newbrk - oldbrk);
278 return brk;
279
280 out:
281 mm->brk = origbrk;
282 mmap_write_unlock(mm);
283 return origbrk;
284 }
285
286 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
validate_mm(struct mm_struct * mm)287 static void validate_mm(struct mm_struct *mm)
288 {
289 int bug = 0;
290 int i = 0;
291 struct vm_area_struct *vma;
292 VMA_ITERATOR(vmi, mm, 0);
293
294 mt_validate(&mm->mm_mt);
295 for_each_vma(vmi, vma) {
296 #ifdef CONFIG_DEBUG_VM_RB
297 struct anon_vma *anon_vma = vma->anon_vma;
298 struct anon_vma_chain *avc;
299 #endif
300 unsigned long vmi_start, vmi_end;
301 bool warn = 0;
302
303 vmi_start = vma_iter_addr(&vmi);
304 vmi_end = vma_iter_end(&vmi);
305 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
306 warn = 1;
307
308 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
309 warn = 1;
310
311 if (warn) {
312 pr_emerg("issue in %s\n", current->comm);
313 dump_stack();
314 dump_vma(vma);
315 pr_emerg("tree range: %px start %lx end %lx\n", vma,
316 vmi_start, vmi_end - 1);
317 vma_iter_dump_tree(&vmi);
318 }
319
320 #ifdef CONFIG_DEBUG_VM_RB
321 if (anon_vma) {
322 anon_vma_lock_read(anon_vma);
323 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
324 anon_vma_interval_tree_verify(avc);
325 anon_vma_unlock_read(anon_vma);
326 }
327 #endif
328 i++;
329 }
330 if (i != mm->map_count) {
331 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
332 bug = 1;
333 }
334 VM_BUG_ON_MM(bug, mm);
335 }
336
337 #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */
338 #define validate_mm(mm) do { } while (0)
339 #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */
340
341 /*
342 * vma has some anon_vma assigned, and is already inserted on that
343 * anon_vma's interval trees.
344 *
345 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
346 * vma must be removed from the anon_vma's interval trees using
347 * anon_vma_interval_tree_pre_update_vma().
348 *
349 * After the update, the vma will be reinserted using
350 * anon_vma_interval_tree_post_update_vma().
351 *
352 * The entire update must be protected by exclusive mmap_lock and by
353 * the root anon_vma's mutex.
354 */
355 static inline void
anon_vma_interval_tree_pre_update_vma(struct vm_area_struct * vma)356 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
357 {
358 struct anon_vma_chain *avc;
359
360 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
361 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
362 }
363
364 static inline void
anon_vma_interval_tree_post_update_vma(struct vm_area_struct * vma)365 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
366 {
367 struct anon_vma_chain *avc;
368
369 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
370 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
371 }
372
count_vma_pages_range(struct mm_struct * mm,unsigned long addr,unsigned long end)373 static unsigned long count_vma_pages_range(struct mm_struct *mm,
374 unsigned long addr, unsigned long end)
375 {
376 VMA_ITERATOR(vmi, mm, addr);
377 struct vm_area_struct *vma;
378 unsigned long nr_pages = 0;
379
380 for_each_vma_range(vmi, vma, end) {
381 unsigned long vm_start = max(addr, vma->vm_start);
382 unsigned long vm_end = min(end, vma->vm_end);
383
384 nr_pages += PHYS_PFN(vm_end - vm_start);
385 }
386
387 return nr_pages;
388 }
389
__vma_link_file(struct vm_area_struct * vma,struct address_space * mapping)390 static void __vma_link_file(struct vm_area_struct *vma,
391 struct address_space *mapping)
392 {
393 if (vma->vm_flags & VM_SHARED)
394 mapping_allow_writable(mapping);
395
396 flush_dcache_mmap_lock(mapping);
397 vma_interval_tree_insert(vma, &mapping->i_mmap);
398 flush_dcache_mmap_unlock(mapping);
399 }
400
vma_link(struct mm_struct * mm,struct vm_area_struct * vma)401 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
402 {
403 VMA_ITERATOR(vmi, mm, 0);
404 struct address_space *mapping = NULL;
405
406 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
407 if (vma_iter_prealloc(&vmi, vma))
408 return -ENOMEM;
409
410 vma_start_write(vma);
411
412 vma_iter_store(&vmi, vma);
413
414 if (vma->vm_file) {
415 mapping = vma->vm_file->f_mapping;
416 i_mmap_lock_write(mapping);
417 __vma_link_file(vma, mapping);
418 i_mmap_unlock_write(mapping);
419 }
420
421 mm->map_count++;
422 validate_mm(mm);
423 return 0;
424 }
425
426 /*
427 * init_multi_vma_prep() - Initializer for struct vma_prepare
428 * @vp: The vma_prepare struct
429 * @vma: The vma that will be altered once locked
430 * @next: The next vma if it is to be adjusted
431 * @remove: The first vma to be removed
432 * @remove2: The second vma to be removed
433 */
init_multi_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma,struct vm_area_struct * next,struct vm_area_struct * remove,struct vm_area_struct * remove2)434 static inline void init_multi_vma_prep(struct vma_prepare *vp,
435 struct vm_area_struct *vma, struct vm_area_struct *next,
436 struct vm_area_struct *remove, struct vm_area_struct *remove2)
437 {
438 memset(vp, 0, sizeof(struct vma_prepare));
439 vp->vma = vma;
440 vp->anon_vma = vma->anon_vma;
441 vp->remove = remove;
442 vp->remove2 = remove2;
443 vp->adj_next = next;
444 if (!vp->anon_vma && next)
445 vp->anon_vma = next->anon_vma;
446
447 vp->file = vma->vm_file;
448 if (vp->file)
449 vp->mapping = vma->vm_file->f_mapping;
450
451 }
452
453 /*
454 * init_vma_prep() - Initializer wrapper for vma_prepare struct
455 * @vp: The vma_prepare struct
456 * @vma: The vma that will be altered once locked
457 */
init_vma_prep(struct vma_prepare * vp,struct vm_area_struct * vma)458 static inline void init_vma_prep(struct vma_prepare *vp,
459 struct vm_area_struct *vma)
460 {
461 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
462 }
463
464
465 /*
466 * vma_prepare() - Helper function for handling locking VMAs prior to altering
467 * @vp: The initialized vma_prepare struct
468 */
vma_prepare(struct vma_prepare * vp)469 static inline void vma_prepare(struct vma_prepare *vp)
470 {
471 if (vp->file) {
472 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
473
474 if (vp->adj_next)
475 uprobe_munmap(vp->adj_next, vp->adj_next->vm_start,
476 vp->adj_next->vm_end);
477
478 i_mmap_lock_write(vp->mapping);
479 if (vp->insert && vp->insert->vm_file) {
480 /*
481 * Put into interval tree now, so instantiated pages
482 * are visible to arm/parisc __flush_dcache_page
483 * throughout; but we cannot insert into address
484 * space until vma start or end is updated.
485 */
486 __vma_link_file(vp->insert,
487 vp->insert->vm_file->f_mapping);
488 }
489 }
490
491 if (vp->anon_vma) {
492 anon_vma_lock_write(vp->anon_vma);
493 anon_vma_interval_tree_pre_update_vma(vp->vma);
494 if (vp->adj_next)
495 anon_vma_interval_tree_pre_update_vma(vp->adj_next);
496 }
497
498 if (vp->file) {
499 flush_dcache_mmap_lock(vp->mapping);
500 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
501 if (vp->adj_next)
502 vma_interval_tree_remove(vp->adj_next,
503 &vp->mapping->i_mmap);
504 }
505
506 }
507
508 /*
509 * vma_complete- Helper function for handling the unlocking after altering VMAs,
510 * or for inserting a VMA.
511 *
512 * @vp: The vma_prepare struct
513 * @vmi: The vma iterator
514 * @mm: The mm_struct
515 */
vma_complete(struct vma_prepare * vp,struct vma_iterator * vmi,struct mm_struct * mm)516 static inline void vma_complete(struct vma_prepare *vp,
517 struct vma_iterator *vmi, struct mm_struct *mm)
518 {
519 if (vp->file) {
520 if (vp->adj_next)
521 vma_interval_tree_insert(vp->adj_next,
522 &vp->mapping->i_mmap);
523 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
524 flush_dcache_mmap_unlock(vp->mapping);
525 }
526
527 if (vp->remove && vp->file) {
528 __remove_shared_vm_struct(vp->remove, vp->file, vp->mapping);
529 if (vp->remove2)
530 __remove_shared_vm_struct(vp->remove2, vp->file,
531 vp->mapping);
532 } else if (vp->insert) {
533 /*
534 * split_vma has split insert from vma, and needs
535 * us to insert it before dropping the locks
536 * (it may either follow vma or precede it).
537 */
538 vma_iter_store(vmi, vp->insert);
539 mm->map_count++;
540 }
541
542 if (vp->anon_vma) {
543 anon_vma_interval_tree_post_update_vma(vp->vma);
544 if (vp->adj_next)
545 anon_vma_interval_tree_post_update_vma(vp->adj_next);
546 anon_vma_unlock_write(vp->anon_vma);
547 }
548
549 if (vp->file) {
550 i_mmap_unlock_write(vp->mapping);
551 uprobe_mmap(vp->vma);
552
553 if (vp->adj_next)
554 uprobe_mmap(vp->adj_next);
555 }
556
557 if (vp->remove) {
558 again:
559 vma_mark_detached(vp->remove, true);
560 if (vp->file) {
561 uprobe_munmap(vp->remove, vp->remove->vm_start,
562 vp->remove->vm_end);
563 fput(vp->file);
564 }
565 if (vp->remove->anon_vma)
566 anon_vma_merge(vp->vma, vp->remove);
567 mm->map_count--;
568 mpol_put(vma_policy(vp->remove));
569 if (!vp->remove2)
570 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
571 vm_area_free(vp->remove);
572
573 /*
574 * In mprotect's case 6 (see comments on vma_merge),
575 * we are removing both mid and next vmas
576 */
577 if (vp->remove2) {
578 vp->remove = vp->remove2;
579 vp->remove2 = NULL;
580 goto again;
581 }
582 }
583 if (vp->insert && vp->file)
584 uprobe_mmap(vp->insert);
585 validate_mm(mm);
586 }
587
588 /*
589 * dup_anon_vma() - Helper function to duplicate anon_vma
590 * @dst: The destination VMA
591 * @src: The source VMA
592 * @dup: Pointer to the destination VMA when successful.
593 *
594 * Returns: 0 on success.
595 */
dup_anon_vma(struct vm_area_struct * dst,struct vm_area_struct * src,struct vm_area_struct ** dup)596 static inline int dup_anon_vma(struct vm_area_struct *dst,
597 struct vm_area_struct *src, struct vm_area_struct **dup)
598 {
599 /*
600 * Easily overlooked: when mprotect shifts the boundary, make sure the
601 * expanding vma has anon_vma set if the shrinking vma had, to cover any
602 * anon pages imported.
603 */
604 if (src->anon_vma && !dst->anon_vma) {
605 int ret;
606
607 vma_assert_write_locked(dst);
608 dst->anon_vma = src->anon_vma;
609 ret = anon_vma_clone(dst, src);
610 if (ret)
611 return ret;
612
613 *dup = dst;
614 }
615
616 return 0;
617 }
618
619 /*
620 * vma_expand - Expand an existing VMA
621 *
622 * @vmi: The vma iterator
623 * @vma: The vma to expand
624 * @start: The start of the vma
625 * @end: The exclusive end of the vma
626 * @pgoff: The page offset of vma
627 * @next: The current of next vma.
628 *
629 * Expand @vma to @start and @end. Can expand off the start and end. Will
630 * expand over @next if it's different from @vma and @end == @next->vm_end.
631 * Checking if the @vma can expand and merge with @next needs to be handled by
632 * the caller.
633 *
634 * Returns: 0 on success
635 */
vma_expand(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff,struct vm_area_struct * next)636 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
637 unsigned long start, unsigned long end, pgoff_t pgoff,
638 struct vm_area_struct *next)
639 {
640 struct vm_area_struct *anon_dup = NULL;
641 bool remove_next = false;
642 struct vma_prepare vp;
643
644 vma_start_write(vma);
645 if (next && (vma != next) && (end == next->vm_end)) {
646 int ret;
647
648 remove_next = true;
649 vma_start_write(next);
650 ret = dup_anon_vma(vma, next, &anon_dup);
651 if (ret)
652 return ret;
653 }
654
655 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
656 /* Not merging but overwriting any part of next is not handled. */
657 VM_WARN_ON(next && !vp.remove &&
658 next != vma && end > next->vm_start);
659 /* Only handles expanding */
660 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
661
662 /* Note: vma iterator must be pointing to 'start' */
663 vma_iter_config(vmi, start, end);
664 if (vma_iter_prealloc(vmi, vma))
665 goto nomem;
666
667 vma_prepare(&vp);
668 vma_adjust_trans_huge(vma, start, end, 0);
669 vma->vm_start = start;
670 vma->vm_end = end;
671 vma->vm_pgoff = pgoff;
672 vma_iter_store(vmi, vma);
673
674 vma_complete(&vp, vmi, vma->vm_mm);
675 return 0;
676
677 nomem:
678 if (anon_dup)
679 unlink_anon_vmas(anon_dup);
680 return -ENOMEM;
681 }
682
683 /*
684 * vma_shrink() - Reduce an existing VMAs memory area
685 * @vmi: The vma iterator
686 * @vma: The VMA to modify
687 * @start: The new start
688 * @end: The new end
689 *
690 * Returns: 0 on success, -ENOMEM otherwise
691 */
vma_shrink(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,pgoff_t pgoff)692 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
693 unsigned long start, unsigned long end, pgoff_t pgoff)
694 {
695 struct vma_prepare vp;
696
697 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
698
699 if (vma->vm_start < start)
700 vma_iter_config(vmi, vma->vm_start, start);
701 else
702 vma_iter_config(vmi, end, vma->vm_end);
703
704 if (vma_iter_prealloc(vmi, NULL))
705 return -ENOMEM;
706
707 vma_start_write(vma);
708
709 init_vma_prep(&vp, vma);
710 vma_prepare(&vp);
711 vma_adjust_trans_huge(vma, start, end, 0);
712
713 vma_iter_clear(vmi);
714 vma->vm_start = start;
715 vma->vm_end = end;
716 vma->vm_pgoff = pgoff;
717 vma_complete(&vp, vmi, vma->vm_mm);
718 return 0;
719 }
720
721 /*
722 * If the vma has a ->close operation then the driver probably needs to release
723 * per-vma resources, so we don't attempt to merge those if the caller indicates
724 * the current vma may be removed as part of the merge.
725 */
is_mergeable_vma(struct vm_area_struct * vma,struct file * file,unsigned long vm_flags,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name,bool may_remove_vma)726 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
727 struct file *file, unsigned long vm_flags,
728 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
729 struct anon_vma_name *anon_name, bool may_remove_vma)
730 {
731 /*
732 * VM_SOFTDIRTY should not prevent from VMA merging, if we
733 * match the flags but dirty bit -- the caller should mark
734 * merged VMA as dirty. If dirty bit won't be excluded from
735 * comparison, we increase pressure on the memory system forcing
736 * the kernel to generate new VMAs when old one could be
737 * extended instead.
738 */
739 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
740 return false;
741 if (vma->vm_file != file)
742 return false;
743 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
744 return false;
745 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
746 return false;
747 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
748 return false;
749 if (!is_mergable_pad_vma(vma, vm_flags))
750 return false;
751 return true;
752 }
753
is_mergeable_anon_vma(struct anon_vma * anon_vma1,struct anon_vma * anon_vma2,struct vm_area_struct * vma)754 static inline bool is_mergeable_anon_vma(struct anon_vma *anon_vma1,
755 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
756 {
757 /*
758 * The list_is_singular() test is to avoid merging VMA cloned from
759 * parents. This can improve scalability caused by anon_vma lock.
760 */
761 if ((!anon_vma1 || !anon_vma2) && (!vma ||
762 list_is_singular(&vma->anon_vma_chain)))
763 return true;
764 return anon_vma1 == anon_vma2;
765 }
766
767 /*
768 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
769 * in front of (at a lower virtual address and file offset than) the vma.
770 *
771 * We cannot merge two vmas if they have differently assigned (non-NULL)
772 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
773 *
774 * We don't check here for the merged mmap wrapping around the end of pagecache
775 * indices (16TB on ia32) because do_mmap() does not permit mmap's which
776 * wrap, nor mmaps which cover the final page at index -1UL.
777 *
778 * We assume the vma may be removed as part of the merge.
779 */
780 static bool
can_vma_merge_before(struct vm_area_struct * vma,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t vm_pgoff,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)781 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
782 struct anon_vma *anon_vma, struct file *file,
783 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
784 struct anon_vma_name *anon_name)
785 {
786 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
787 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
788 if (vma->vm_pgoff == vm_pgoff)
789 return true;
790 }
791 return false;
792 }
793
794 /*
795 * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
796 * beyond (at a higher virtual address and file offset than) the vma.
797 *
798 * We cannot merge two vmas if they have differently assigned (non-NULL)
799 * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
800 *
801 * We assume that vma is not removed as part of the merge.
802 */
803 static bool
can_vma_merge_after(struct vm_area_struct * vma,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t vm_pgoff,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)804 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
805 struct anon_vma *anon_vma, struct file *file,
806 pgoff_t vm_pgoff, struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
807 struct anon_vma_name *anon_name)
808 {
809 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
810 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
811 pgoff_t vm_pglen;
812 vm_pglen = vma_pages(vma);
813 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
814 return true;
815 }
816 return false;
817 }
818
819 /*
820 * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
821 * figure out whether that can be merged with its predecessor or its
822 * successor. Or both (it neatly fills a hole).
823 *
824 * In most cases - when called for mmap, brk or mremap - [addr,end) is
825 * certain not to be mapped by the time vma_merge is called; but when
826 * called for mprotect, it is certain to be already mapped (either at
827 * an offset within prev, or at the start of next), and the flags of
828 * this area are about to be changed to vm_flags - and the no-change
829 * case has already been eliminated.
830 *
831 * The following mprotect cases have to be considered, where **** is
832 * the area passed down from mprotect_fixup, never extending beyond one
833 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
834 * at the same address as **** and is of the same or larger span, and
835 * NNNN the next vma after ****:
836 *
837 * **** **** ****
838 * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPCCCCCC
839 * cannot merge might become might become
840 * PPNNNNNNNNNN PPPPPPPPPPCC
841 * mmap, brk or case 4 below case 5 below
842 * mremap move:
843 * **** ****
844 * PPPP NNNN PPPPCCCCNNNN
845 * might become might become
846 * PPPPPPPPPPPP 1 or PPPPPPPPPPPP 6 or
847 * PPPPPPPPNNNN 2 or PPPPPPPPNNNN 7 or
848 * PPPPNNNNNNNN 3 PPPPNNNNNNNN 8
849 *
850 * It is important for case 8 that the vma CCCC overlapping the
851 * region **** is never going to extended over NNNN. Instead NNNN must
852 * be extended in region **** and CCCC must be removed. This way in
853 * all cases where vma_merge succeeds, the moment vma_merge drops the
854 * rmap_locks, the properties of the merged vma will be already
855 * correct for the whole merged range. Some of those properties like
856 * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
857 * be correct for the whole merged range immediately after the
858 * rmap_locks are released. Otherwise if NNNN would be removed and
859 * CCCC would be extended over the NNNN range, remove_migration_ptes
860 * or other rmap walkers (if working on addresses beyond the "end"
861 * parameter) may establish ptes with the wrong permissions of CCCC
862 * instead of the right permissions of NNNN.
863 *
864 * In the code below:
865 * PPPP is represented by *prev
866 * CCCC is represented by *curr or not represented at all (NULL)
867 * NNNN is represented by *next or not represented at all (NULL)
868 * **** is not represented - it will be merged and the vma containing the
869 * area is returned, or the function will return NULL
870 */
vma_merge(struct vma_iterator * vmi,struct mm_struct * mm,struct vm_area_struct * prev,unsigned long addr,unsigned long end,unsigned long vm_flags,struct anon_vma * anon_vma,struct file * file,pgoff_t pgoff,struct mempolicy * policy,struct vm_userfaultfd_ctx vm_userfaultfd_ctx,struct anon_vma_name * anon_name)871 struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
872 struct vm_area_struct *prev, unsigned long addr,
873 unsigned long end, unsigned long vm_flags,
874 struct anon_vma *anon_vma, struct file *file,
875 pgoff_t pgoff, struct mempolicy *policy,
876 struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
877 struct anon_vma_name *anon_name)
878 {
879 struct vm_area_struct *curr, *next, *res;
880 struct vm_area_struct *vma, *adjust, *remove, *remove2;
881 struct vm_area_struct *anon_dup = NULL;
882 struct vma_prepare vp;
883 pgoff_t vma_pgoff;
884 int err = 0;
885 bool merge_prev = false;
886 bool merge_next = false;
887 bool vma_expanded = false;
888 unsigned long vma_start = addr;
889 unsigned long vma_end = end;
890 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
891 long adj_start = 0;
892
893 /*
894 * We later require that vma->vm_flags == vm_flags,
895 * so this tests vma->vm_flags & VM_SPECIAL, too.
896 */
897 if (vm_flags & VM_SPECIAL)
898 return NULL;
899
900 /* Does the input range span an existing VMA? (cases 5 - 8) */
901 curr = find_vma_intersection(mm, prev ? prev->vm_end : 0, end);
902
903 if (!curr || /* cases 1 - 4 */
904 end == curr->vm_end) /* cases 6 - 8, adjacent VMA */
905 next = vma_lookup(mm, end);
906 else
907 next = NULL; /* case 5 */
908
909 if (prev) {
910 vma_start = prev->vm_start;
911 vma_pgoff = prev->vm_pgoff;
912
913 /* Can we merge the predecessor? */
914 if (addr == prev->vm_end && mpol_equal(vma_policy(prev), policy)
915 && can_vma_merge_after(prev, vm_flags, anon_vma, file,
916 pgoff, vm_userfaultfd_ctx, anon_name)) {
917 merge_prev = true;
918 vma_prev(vmi);
919 }
920 }
921
922 /* Can we merge the successor? */
923 if (next && mpol_equal(policy, vma_policy(next)) &&
924 can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen,
925 vm_userfaultfd_ctx, anon_name)) {
926 merge_next = true;
927 }
928
929 /* Verify some invariant that must be enforced by the caller. */
930 VM_WARN_ON(prev && addr <= prev->vm_start);
931 VM_WARN_ON(curr && (addr != curr->vm_start || end > curr->vm_end));
932 VM_WARN_ON(addr >= end);
933
934 if (!merge_prev && !merge_next)
935 return NULL; /* Not mergeable. */
936
937 if (merge_prev)
938 vma_start_write(prev);
939
940 res = vma = prev;
941 remove = remove2 = adjust = NULL;
942
943 /* Can we merge both the predecessor and the successor? */
944 if (merge_prev && merge_next &&
945 is_mergeable_anon_vma(prev->anon_vma, next->anon_vma, NULL)) {
946 vma_start_write(next);
947 remove = next; /* case 1 */
948 vma_end = next->vm_end;
949 err = dup_anon_vma(prev, next, &anon_dup);
950 if (curr) { /* case 6 */
951 vma_start_write(curr);
952 remove = curr;
953 remove2 = next;
954 if (!next->anon_vma)
955 err = dup_anon_vma(prev, curr, &anon_dup);
956 }
957 } else if (merge_prev) { /* case 2 */
958 if (curr) {
959 vma_start_write(curr);
960 if (end == curr->vm_end) { /* case 7 */
961 /*
962 * can_vma_merge_after() assumed we would not be
963 * removing prev vma, so it skipped the check
964 * for vm_ops->close, but we are removing curr
965 */
966 if (curr->vm_ops && curr->vm_ops->close)
967 err = -EINVAL;
968 remove = curr;
969 } else { /* case 5 */
970 adjust = curr;
971 adj_start = (end - curr->vm_start);
972 }
973 if (!err)
974 err = dup_anon_vma(prev, curr, &anon_dup);
975 }
976 } else { /* merge_next */
977 vma_start_write(next);
978 res = next;
979 if (prev && addr < prev->vm_end) { /* case 4 */
980 vma_start_write(prev);
981 vma_end = addr;
982 adjust = next;
983 adj_start = -(prev->vm_end - addr);
984 err = dup_anon_vma(next, prev, &anon_dup);
985 } else {
986 /*
987 * Note that cases 3 and 8 are the ONLY ones where prev
988 * is permitted to be (but is not necessarily) NULL.
989 */
990 vma = next; /* case 3 */
991 vma_start = addr;
992 vma_end = next->vm_end;
993 vma_pgoff = next->vm_pgoff - pglen;
994 if (curr) { /* case 8 */
995 vma_pgoff = curr->vm_pgoff;
996 vma_start_write(curr);
997 remove = curr;
998 err = dup_anon_vma(next, curr, &anon_dup);
999 }
1000 }
1001 }
1002
1003 /* Error in anon_vma clone. */
1004 if (err)
1005 goto anon_vma_fail;
1006
1007 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1008 vma_expanded = true;
1009
1010 if (vma_expanded) {
1011 vma_iter_config(vmi, vma_start, vma_end);
1012 } else {
1013 vma_iter_config(vmi, adjust->vm_start + adj_start,
1014 adjust->vm_end);
1015 }
1016
1017 if (vma_iter_prealloc(vmi, vma))
1018 goto prealloc_fail;
1019
1020 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1021 VM_WARN_ON(vp.anon_vma && adjust && adjust->anon_vma &&
1022 vp.anon_vma != adjust->anon_vma);
1023
1024 vma_prepare(&vp);
1025 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1026
1027 vma->vm_start = vma_start;
1028 vma->vm_end = vma_end;
1029 vma->vm_pgoff = vma_pgoff;
1030
1031 if (vma_expanded)
1032 vma_iter_store(vmi, vma);
1033
1034 if (adj_start) {
1035 adjust->vm_start += adj_start;
1036 adjust->vm_pgoff += adj_start >> PAGE_SHIFT;
1037 if (adj_start < 0) {
1038 WARN_ON(vma_expanded);
1039 vma_iter_store(vmi, next);
1040 }
1041 }
1042
1043 vma_complete(&vp, vmi, mm);
1044 khugepaged_enter_vma(res, vm_flags);
1045 return res;
1046
1047 prealloc_fail:
1048 if (anon_dup)
1049 unlink_anon_vmas(anon_dup);
1050
1051 anon_vma_fail:
1052 vma_iter_set(vmi, addr);
1053 vma_iter_load(vmi);
1054 return NULL;
1055 }
1056
1057 /*
1058 * Rough compatibility check to quickly see if it's even worth looking
1059 * at sharing an anon_vma.
1060 *
1061 * They need to have the same vm_file, and the flags can only differ
1062 * in things that mprotect may change.
1063 *
1064 * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1065 * we can merge the two vma's. For example, we refuse to merge a vma if
1066 * there is a vm_ops->close() function, because that indicates that the
1067 * driver is doing some kind of reference counting. But that doesn't
1068 * really matter for the anon_vma sharing case.
1069 */
anon_vma_compatible(struct vm_area_struct * a,struct vm_area_struct * b)1070 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1071 {
1072 return a->vm_end == b->vm_start &&
1073 mpol_equal(vma_policy(a), vma_policy(b)) &&
1074 a->vm_file == b->vm_file &&
1075 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) &&
1076 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1077 }
1078
1079 /*
1080 * Do some basic sanity checking to see if we can re-use the anon_vma
1081 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1082 * the same as 'old', the other will be the new one that is trying
1083 * to share the anon_vma.
1084 *
1085 * NOTE! This runs with mmap_lock held for reading, so it is possible that
1086 * the anon_vma of 'old' is concurrently in the process of being set up
1087 * by another page fault trying to merge _that_. But that's ok: if it
1088 * is being set up, that automatically means that it will be a singleton
1089 * acceptable for merging, so we can do all of this optimistically. But
1090 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1091 *
1092 * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1093 * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1094 * is to return an anon_vma that is "complex" due to having gone through
1095 * a fork).
1096 *
1097 * We also make sure that the two vma's are compatible (adjacent,
1098 * and with the same memory policies). That's all stable, even with just
1099 * a read lock on the mmap_lock.
1100 */
reusable_anon_vma(struct vm_area_struct * old,struct vm_area_struct * a,struct vm_area_struct * b)1101 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1102 {
1103 if (anon_vma_compatible(a, b)) {
1104 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1105
1106 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1107 return anon_vma;
1108 }
1109 return NULL;
1110 }
1111
1112 /*
1113 * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1114 * neighbouring vmas for a suitable anon_vma, before it goes off
1115 * to allocate a new anon_vma. It checks because a repetitive
1116 * sequence of mprotects and faults may otherwise lead to distinct
1117 * anon_vmas being allocated, preventing vma merge in subsequent
1118 * mprotect.
1119 */
find_mergeable_anon_vma(struct vm_area_struct * vma)1120 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1121 {
1122 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1123 struct anon_vma *anon_vma = NULL;
1124 struct vm_area_struct *prev, *next;
1125
1126 /* Try next first. */
1127 next = mas_walk(&mas);
1128 if (next) {
1129 anon_vma = reusable_anon_vma(next, vma, next);
1130 if (anon_vma)
1131 return anon_vma;
1132 }
1133
1134 prev = mas_prev(&mas, 0);
1135 VM_BUG_ON_VMA(prev != vma, vma);
1136 prev = mas_prev(&mas, 0);
1137 /* Try prev next. */
1138 if (prev)
1139 anon_vma = reusable_anon_vma(prev, prev, vma);
1140
1141 /*
1142 * We might reach here with anon_vma == NULL if we can't find
1143 * any reusable anon_vma.
1144 * There's no absolute need to look only at touching neighbours:
1145 * we could search further afield for "compatible" anon_vmas.
1146 * But it would probably just be a waste of time searching,
1147 * or lead to too many vmas hanging off the same anon_vma.
1148 * We're trying to allow mprotect remerging later on,
1149 * not trying to minimize memory used for anon_vmas.
1150 */
1151 return anon_vma;
1152 }
1153
1154 /*
1155 * If a hint addr is less than mmap_min_addr change hint to be as
1156 * low as possible but still greater than mmap_min_addr
1157 */
round_hint_to_min(unsigned long hint)1158 static inline unsigned long round_hint_to_min(unsigned long hint)
1159 {
1160 hint &= __PAGE_MASK;
1161 if (((void *)hint != NULL) &&
1162 (hint < mmap_min_addr))
1163 return __PAGE_ALIGN(mmap_min_addr);
1164 return hint;
1165 }
1166
mlock_future_ok(struct mm_struct * mm,unsigned long flags,unsigned long bytes)1167 bool mlock_future_ok(struct mm_struct *mm, unsigned long flags,
1168 unsigned long bytes)
1169 {
1170 unsigned long locked_pages, limit_pages;
1171
1172 if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK))
1173 return true;
1174
1175 locked_pages = bytes >> PAGE_SHIFT;
1176 locked_pages += mm->locked_vm;
1177
1178 limit_pages = rlimit(RLIMIT_MEMLOCK);
1179 limit_pages >>= PAGE_SHIFT;
1180
1181 return locked_pages <= limit_pages;
1182 }
1183
file_mmap_size_max(struct file * file,struct inode * inode)1184 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1185 {
1186 if (S_ISREG(inode->i_mode))
1187 return MAX_LFS_FILESIZE;
1188
1189 if (S_ISBLK(inode->i_mode))
1190 return MAX_LFS_FILESIZE;
1191
1192 if (S_ISSOCK(inode->i_mode))
1193 return MAX_LFS_FILESIZE;
1194
1195 /* Special "we do even unsigned file positions" case */
1196 if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1197 return 0;
1198
1199 /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1200 return ULONG_MAX;
1201 }
1202
file_mmap_ok(struct file * file,struct inode * inode,unsigned long pgoff,unsigned long len)1203 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1204 unsigned long pgoff, unsigned long len)
1205 {
1206 u64 maxsize = file_mmap_size_max(file, inode);
1207
1208 if (maxsize && len > maxsize)
1209 return false;
1210 maxsize -= len;
1211 if (pgoff > maxsize >> PAGE_SHIFT)
1212 return false;
1213 return true;
1214 }
1215
1216 /*
1217 * The caller must write-lock current->mm->mmap_lock.
1218 */
do_mmap(struct file * file,unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,vm_flags_t vm_flags,unsigned long pgoff,unsigned long * populate,struct list_head * uf)1219 unsigned long do_mmap(struct file *file, unsigned long addr,
1220 unsigned long len, unsigned long prot,
1221 unsigned long flags, vm_flags_t vm_flags,
1222 unsigned long pgoff, unsigned long *populate,
1223 struct list_head *uf)
1224 {
1225 unsigned long old_len;
1226 struct mm_struct *mm = current->mm;
1227 int pkey = 0;
1228
1229 *populate = 0;
1230
1231 if (!len)
1232 return -EINVAL;
1233
1234 /*
1235 * Does the application expect PROT_READ to imply PROT_EXEC?
1236 *
1237 * (the exception is when the underlying filesystem is noexec
1238 * mounted, in which case we dont add PROT_EXEC.)
1239 */
1240 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1241 if (!(file && path_noexec(&file->f_path)))
1242 prot |= PROT_EXEC;
1243
1244 /* force arch specific MAP_FIXED handling in get_unmapped_area */
1245 if (flags & MAP_FIXED_NOREPLACE)
1246 flags |= MAP_FIXED;
1247
1248 if (!(flags & MAP_FIXED))
1249 addr = round_hint_to_min(addr);
1250
1251 /* Careful about overflows.. */
1252 len = __COMPAT_PAGE_ALIGN(len, flags);
1253 if (!len)
1254 return -ENOMEM;
1255
1256 /* Save the requested len */
1257 old_len = len;
1258
1259 /* offset overflow? */
1260 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1261 return -EOVERFLOW;
1262
1263 /* Too many mappings? */
1264 if (mm->map_count > sysctl_max_map_count)
1265 return -ENOMEM;
1266
1267 /* Obtain the address to map to. we verify (or select) it and ensure
1268 * that it represents a valid section of the address space.
1269 */
1270 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1271 if (IS_ERR_VALUE(addr))
1272 return addr;
1273
1274 if (flags & MAP_FIXED_NOREPLACE) {
1275 if (find_vma_intersection(mm, addr, addr + len))
1276 return -EEXIST;
1277 }
1278
1279 /*
1280 * addr is returned from get_unmapped_area,
1281 * There are two cases:
1282 * 1> MAP_FIXED == false
1283 * unallocated memory, no need to check sealing.
1284 * 1> MAP_FIXED == true
1285 * sealing is checked inside mmap_region when
1286 * do_vmi_munmap is called.
1287 */
1288
1289 if (prot == PROT_EXEC) {
1290 pkey = execute_only_pkey(mm);
1291 if (pkey < 0)
1292 pkey = 0;
1293 }
1294
1295 /* Do simple checking here so the lower-level routines won't have
1296 * to. we assume access permissions have been handled by the open
1297 * of the memory object, so we don't do any here.
1298 */
1299 vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1300 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1301
1302 if (flags & MAP_LOCKED)
1303 if (!can_do_mlock())
1304 return -EPERM;
1305
1306 if (!mlock_future_ok(mm, vm_flags, len))
1307 return -EAGAIN;
1308
1309 if (file) {
1310 struct inode *inode = file_inode(file);
1311 unsigned long flags_mask;
1312
1313 if (!file_mmap_ok(file, inode, pgoff, len))
1314 return -EOVERFLOW;
1315
1316 len = __filemap_len(inode, pgoff, len, flags);
1317
1318 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1319
1320 switch (flags & MAP_TYPE) {
1321 case MAP_SHARED:
1322 /*
1323 * Force use of MAP_SHARED_VALIDATE with non-legacy
1324 * flags. E.g. MAP_SYNC is dangerous to use with
1325 * MAP_SHARED as you don't know which consistency model
1326 * you will get. We silently ignore unsupported flags
1327 * with MAP_SHARED to preserve backward compatibility.
1328 */
1329 flags &= LEGACY_MAP_MASK;
1330 fallthrough;
1331 case MAP_SHARED_VALIDATE:
1332 if (flags & ~flags_mask)
1333 return -EOPNOTSUPP;
1334 if (prot & PROT_WRITE) {
1335 if (!(file->f_mode & FMODE_WRITE))
1336 return -EACCES;
1337 if (IS_SWAPFILE(file->f_mapping->host))
1338 return -ETXTBSY;
1339 }
1340
1341 /*
1342 * Make sure we don't allow writing to an append-only
1343 * file..
1344 */
1345 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1346 return -EACCES;
1347
1348 vm_flags |= VM_SHARED | VM_MAYSHARE;
1349 if (!(file->f_mode & FMODE_WRITE))
1350 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1351 fallthrough;
1352 case MAP_PRIVATE:
1353 if (!(file->f_mode & FMODE_READ))
1354 return -EACCES;
1355 if (path_noexec(&file->f_path)) {
1356 if (vm_flags & VM_EXEC)
1357 return -EPERM;
1358 vm_flags &= ~VM_MAYEXEC;
1359 }
1360
1361 if (!file->f_op->mmap)
1362 return -ENODEV;
1363 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1364 return -EINVAL;
1365 break;
1366
1367 default:
1368 return -EINVAL;
1369 }
1370 } else {
1371 switch (flags & MAP_TYPE) {
1372 case MAP_SHARED:
1373 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1374 return -EINVAL;
1375 /*
1376 * Ignore pgoff.
1377 */
1378 pgoff = 0;
1379 vm_flags |= VM_SHARED | VM_MAYSHARE;
1380 break;
1381 case MAP_PRIVATE:
1382 /*
1383 * Set pgoff according to addr for anon_vma.
1384 */
1385 pgoff = addr >> PAGE_SHIFT;
1386 break;
1387 default:
1388 return -EINVAL;
1389 }
1390 }
1391
1392 /*
1393 * Set 'VM_NORESERVE' if we should not account for the
1394 * memory use of this mapping.
1395 */
1396 if (flags & MAP_NORESERVE) {
1397 /* We honor MAP_NORESERVE if allowed to overcommit */
1398 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1399 vm_flags |= VM_NORESERVE;
1400
1401 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1402 if (file && is_file_hugepages(file))
1403 vm_flags |= VM_NORESERVE;
1404 }
1405
1406 addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1407 if (!IS_ERR_VALUE(addr) &&
1408 ((vm_flags & VM_LOCKED) ||
1409 (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1410 *populate = len;
1411
1412 __filemap_fixup(addr, prot, old_len, len);
1413
1414 return addr;
1415 }
1416
ksys_mmap_pgoff(unsigned long addr,unsigned long len,unsigned long prot,unsigned long flags,unsigned long fd,unsigned long pgoff)1417 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1418 unsigned long prot, unsigned long flags,
1419 unsigned long fd, unsigned long pgoff)
1420 {
1421 struct file *file = NULL;
1422 unsigned long retval;
1423
1424 if (!(flags & MAP_ANONYMOUS)) {
1425 audit_mmap_fd(fd, flags);
1426 file = fget(fd);
1427 if (!file)
1428 return -EBADF;
1429 if (is_file_hugepages(file)) {
1430 len = ALIGN(len, huge_page_size(hstate_file(file)));
1431 } else if (unlikely(flags & MAP_HUGETLB)) {
1432 retval = -EINVAL;
1433 goto out_fput;
1434 }
1435 } else if (flags & MAP_HUGETLB) {
1436 struct hstate *hs;
1437
1438 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1439 if (!hs)
1440 return -EINVAL;
1441
1442 len = ALIGN(len, huge_page_size(hs));
1443 /*
1444 * VM_NORESERVE is used because the reservations will be
1445 * taken when vm_ops->mmap() is called
1446 */
1447 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1448 VM_NORESERVE,
1449 HUGETLB_ANONHUGE_INODE,
1450 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1451 if (IS_ERR(file))
1452 return PTR_ERR(file);
1453 }
1454
1455 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1456 out_fput:
1457 if (file)
1458 fput(file);
1459 return retval;
1460 }
1461
SYSCALL_DEFINE6(mmap_pgoff,unsigned long,addr,unsigned long,len,unsigned long,prot,unsigned long,flags,unsigned long,fd,unsigned long,pgoff)1462 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1463 unsigned long, prot, unsigned long, flags,
1464 unsigned long, fd, unsigned long, pgoff)
1465 {
1466 return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1467 }
1468
1469 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1470 struct mmap_arg_struct {
1471 unsigned long addr;
1472 unsigned long len;
1473 unsigned long prot;
1474 unsigned long flags;
1475 unsigned long fd;
1476 unsigned long offset;
1477 };
1478
SYSCALL_DEFINE1(old_mmap,struct mmap_arg_struct __user *,arg)1479 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1480 {
1481 struct mmap_arg_struct a;
1482
1483 if (copy_from_user(&a, arg, sizeof(a)))
1484 return -EFAULT;
1485 if (offset_in_page(a.offset))
1486 return -EINVAL;
1487
1488 return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1489 a.offset >> PAGE_SHIFT);
1490 }
1491 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1492
vm_ops_needs_writenotify(const struct vm_operations_struct * vm_ops)1493 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops)
1494 {
1495 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite);
1496 }
1497
vma_is_shared_writable(struct vm_area_struct * vma)1498 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1499 {
1500 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1501 (VM_WRITE | VM_SHARED);
1502 }
1503
vma_fs_can_writeback(struct vm_area_struct * vma)1504 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1505 {
1506 /* No managed pages to writeback. */
1507 if (vma->vm_flags & VM_PFNMAP)
1508 return false;
1509
1510 return vma->vm_file && vma->vm_file->f_mapping &&
1511 mapping_can_writeback(vma->vm_file->f_mapping);
1512 }
1513
1514 /*
1515 * Does this VMA require the underlying folios to have their dirty state
1516 * tracked?
1517 */
vma_needs_dirty_tracking(struct vm_area_struct * vma)1518 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1519 {
1520 /* Only shared, writable VMAs require dirty tracking. */
1521 if (!vma_is_shared_writable(vma))
1522 return false;
1523
1524 /* Does the filesystem need to be notified? */
1525 if (vm_ops_needs_writenotify(vma->vm_ops))
1526 return true;
1527
1528 /*
1529 * Even if the filesystem doesn't indicate a need for writenotify, if it
1530 * can writeback, dirty tracking is still required.
1531 */
1532 return vma_fs_can_writeback(vma);
1533 }
1534
1535 /*
1536 * Some shared mappings will want the pages marked read-only
1537 * to track write events. If so, we'll downgrade vm_page_prot
1538 * to the private version (using protection_map[] without the
1539 * VM_SHARED bit).
1540 */
vma_wants_writenotify(struct vm_area_struct * vma,pgprot_t vm_page_prot)1541 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1542 {
1543 /* If it was private or non-writable, the write bit is already clear */
1544 if (!vma_is_shared_writable(vma))
1545 return 0;
1546
1547 /* The backer wishes to know when pages are first written to? */
1548 if (vm_ops_needs_writenotify(vma->vm_ops))
1549 return 1;
1550
1551 /* The open routine did something to the protections that pgprot_modify
1552 * won't preserve? */
1553 if (pgprot_val(vm_page_prot) !=
1554 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1555 return 0;
1556
1557 /*
1558 * Do we need to track softdirty? hugetlb does not support softdirty
1559 * tracking yet.
1560 */
1561 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1562 return 1;
1563
1564 /* Do we need write faults for uffd-wp tracking? */
1565 if (userfaultfd_wp(vma))
1566 return 1;
1567
1568 /* Can the mapping track the dirty pages? */
1569 return vma_fs_can_writeback(vma);
1570 }
1571
1572 /*
1573 * We account for memory if it's a private writeable mapping,
1574 * not hugepages and VM_NORESERVE wasn't set.
1575 */
accountable_mapping(struct file * file,vm_flags_t vm_flags)1576 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1577 {
1578 /*
1579 * hugetlb has its own accounting separate from the core VM
1580 * VM_HUGETLB may not be set yet so we cannot check for that flag.
1581 */
1582 if (file && is_file_hugepages(file))
1583 return 0;
1584
1585 return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1586 }
1587
1588 /**
1589 * unmapped_area() - Find an area between the low_limit and the high_limit with
1590 * the correct alignment and offset, all from @info. Note: current->mm is used
1591 * for the search.
1592 *
1593 * @info: The unmapped area information including the range [low_limit -
1594 * high_limit), the alignment offset and mask.
1595 *
1596 * Return: A memory address or -ENOMEM.
1597 */
unmapped_area(struct vm_unmapped_area_info * info)1598 static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1599 {
1600 unsigned long length, gap;
1601 unsigned long low_limit, high_limit;
1602 struct vm_area_struct *tmp;
1603
1604 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1605
1606 /* Adjust search length to account for worst case alignment overhead */
1607 length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask);
1608 if (length < info->length)
1609 return -ENOMEM;
1610
1611 low_limit = info->low_limit;
1612 if (low_limit < mmap_min_addr)
1613 low_limit = mmap_min_addr;
1614 high_limit = info->high_limit;
1615 retry:
1616 if (mas_empty_area(&mas, low_limit, high_limit - 1, length))
1617 return -ENOMEM;
1618
1619 gap = mas.index;
1620 gap += (info->align_offset - gap) & info->align_mask;
1621 tmp = mas_next(&mas, ULONG_MAX);
1622 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1623 if (vm_start_gap(tmp) < gap + length - 1) {
1624 low_limit = tmp->vm_end;
1625 mas_reset(&mas);
1626 goto retry;
1627 }
1628 } else {
1629 tmp = mas_prev(&mas, 0);
1630 if (tmp && vm_end_gap(tmp) > gap) {
1631 low_limit = vm_end_gap(tmp);
1632 mas_reset(&mas);
1633 goto retry;
1634 }
1635 }
1636
1637 return __PAGE_ALIGN(gap);
1638 }
1639
1640 /**
1641 * unmapped_area_topdown() - Find an area between the low_limit and the
1642 * high_limit with the correct alignment and offset at the highest available
1643 * address, all from @info. Note: current->mm is used for the search.
1644 *
1645 * @info: The unmapped area information including the range [low_limit -
1646 * high_limit), the alignment offset and mask.
1647 *
1648 * Return: A memory address or -ENOMEM.
1649 */
unmapped_area_topdown(struct vm_unmapped_area_info * info)1650 static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1651 {
1652 unsigned long length, gap, gap_end;
1653 unsigned long low_limit, high_limit;
1654 struct vm_area_struct *tmp;
1655
1656 MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
1657 /* Adjust search length to account for worst case alignment overhead */
1658 length = __PAGE_SIZE_ROUND_UP_ADJ(info->length + info->align_mask);
1659 if (length < info->length)
1660 return -ENOMEM;
1661
1662 low_limit = info->low_limit;
1663 if (low_limit < mmap_min_addr)
1664 low_limit = mmap_min_addr;
1665 high_limit = info->high_limit;
1666 retry:
1667 if (mas_empty_area_rev(&mas, low_limit, high_limit - 1, length))
1668 return -ENOMEM;
1669
1670 gap = mas.last + 1 - info->length;
1671 gap -= (gap - info->align_offset) & info->align_mask;
1672 gap_end = mas.last;
1673 tmp = mas_next(&mas, ULONG_MAX);
1674 if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */
1675 if (vm_start_gap(tmp) <= gap_end) {
1676 high_limit = vm_start_gap(tmp);
1677 mas_reset(&mas);
1678 goto retry;
1679 }
1680 } else {
1681 tmp = mas_prev(&mas, 0);
1682 if (tmp && vm_end_gap(tmp) > gap) {
1683 high_limit = tmp->vm_start;
1684 mas_reset(&mas);
1685 goto retry;
1686 }
1687 }
1688
1689 return __PAGE_ALIGN(gap);
1690 }
1691
1692 /*
1693 * Search for an unmapped address range.
1694 *
1695 * We are looking for a range that:
1696 * - does not intersect with any VMA;
1697 * - is contained within the [low_limit, high_limit) interval;
1698 * - is at least the desired size.
1699 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1700 */
vm_unmapped_area(struct vm_unmapped_area_info * info)1701 unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
1702 {
1703 unsigned long addr;
1704
1705 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
1706 addr = unmapped_area_topdown(info);
1707 else
1708 addr = unmapped_area(info);
1709
1710 trace_vm_unmapped_area(addr, info);
1711 return addr;
1712 }
1713 EXPORT_SYMBOL_GPL(vm_unmapped_area);
1714
1715 /* Get an address range which is currently unmapped.
1716 * For shmat() with addr=0.
1717 *
1718 * Ugly calling convention alert:
1719 * Return value with the low bits set means error value,
1720 * ie
1721 * if (ret & ~PAGE_MASK)
1722 * error = ret;
1723 *
1724 * This function "knows" that -ENOMEM has the bits set.
1725 */
1726 unsigned long
generic_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1727 generic_get_unmapped_area(struct file *filp, unsigned long addr,
1728 unsigned long len, unsigned long pgoff,
1729 unsigned long flags)
1730 {
1731 struct mm_struct *mm = current->mm;
1732 struct vm_area_struct *vma, *prev;
1733 struct vm_unmapped_area_info info;
1734 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1735
1736 if (len > mmap_end - mmap_min_addr)
1737 return -ENOMEM;
1738
1739 if (flags & MAP_FIXED)
1740 return addr;
1741
1742 if (addr) {
1743 addr = PAGE_ALIGN(addr);
1744 vma = find_vma_prev(mm, addr, &prev);
1745 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1746 (!vma || addr + len <= vm_start_gap(vma)) &&
1747 (!prev || addr >= vm_end_gap(prev)))
1748 return addr;
1749 }
1750
1751 info.flags = 0;
1752 info.length = len;
1753 info.low_limit = mm->mmap_base;
1754 info.high_limit = mmap_end;
1755 info.align_mask = 0;
1756 info.align_offset = 0;
1757 return vm_unmapped_area(&info);
1758 }
1759
1760 #ifndef HAVE_ARCH_UNMAPPED_AREA
1761 unsigned long
arch_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1762 arch_get_unmapped_area(struct file *filp, unsigned long addr,
1763 unsigned long len, unsigned long pgoff,
1764 unsigned long flags)
1765 {
1766 return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
1767 }
1768 #endif
1769
1770 /*
1771 * This mmap-allocator allocates new areas top-down from below the
1772 * stack's low limit (the base):
1773 */
1774 unsigned long
generic_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1775 generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1776 unsigned long len, unsigned long pgoff,
1777 unsigned long flags)
1778 {
1779 struct vm_area_struct *vma, *prev;
1780 struct mm_struct *mm = current->mm;
1781 struct vm_unmapped_area_info info;
1782 const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
1783
1784 /* requested length too big for entire address space */
1785 if (len > mmap_end - mmap_min_addr)
1786 return -ENOMEM;
1787
1788 if (flags & MAP_FIXED)
1789 return addr;
1790
1791 /* requesting a specific address */
1792 if (addr) {
1793 addr = PAGE_ALIGN(addr);
1794 vma = find_vma_prev(mm, addr, &prev);
1795 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
1796 (!vma || addr + len <= vm_start_gap(vma)) &&
1797 (!prev || addr >= vm_end_gap(prev)))
1798 return addr;
1799 }
1800
1801 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1802 info.length = len;
1803 info.low_limit = PAGE_SIZE;
1804 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
1805 info.align_mask = 0;
1806 info.align_offset = 0;
1807 addr = vm_unmapped_area(&info);
1808
1809 /*
1810 * A failed mmap() very likely causes application failure,
1811 * so fall back to the bottom-up function here. This scenario
1812 * can happen with large stack limits and large mmap()
1813 * allocations.
1814 */
1815 if (offset_in_page(addr)) {
1816 VM_BUG_ON(addr != -ENOMEM);
1817 info.flags = 0;
1818 info.low_limit = TASK_UNMAPPED_BASE;
1819 info.high_limit = mmap_end;
1820 addr = vm_unmapped_area(&info);
1821 }
1822
1823 return addr;
1824 }
1825
1826 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1827 unsigned long
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1828 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
1829 unsigned long len, unsigned long pgoff,
1830 unsigned long flags)
1831 {
1832 return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
1833 }
1834 #endif
1835
1836 unsigned long
get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)1837 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1838 unsigned long pgoff, unsigned long flags)
1839 {
1840 unsigned long (*get_area)(struct file *, unsigned long,
1841 unsigned long, unsigned long, unsigned long);
1842
1843 unsigned long error = arch_mmap_check(addr, len, flags);
1844 if (error)
1845 return error;
1846
1847 /* Careful about overflows.. */
1848 if (len > TASK_SIZE)
1849 return -ENOMEM;
1850
1851 get_area = current->mm->get_unmapped_area;
1852 if (file) {
1853 if (file->f_op->get_unmapped_area)
1854 get_area = file->f_op->get_unmapped_area;
1855 } else if (flags & MAP_SHARED) {
1856 /*
1857 * mmap_region() will call shmem_zero_setup() to create a file,
1858 * so use shmem's get_unmapped_area in case it can be huge.
1859 * do_mmap() will clear pgoff, so match alignment.
1860 */
1861 pgoff = 0;
1862 get_area = shmem_get_unmapped_area;
1863 }
1864
1865 addr = get_area(file, addr, len, pgoff, flags);
1866 if (IS_ERR_VALUE(addr))
1867 return addr;
1868
1869 if (addr > TASK_SIZE - len)
1870 return -ENOMEM;
1871 if (offset_in_page(addr))
1872 return -EINVAL;
1873
1874 error = security_mmap_addr(addr);
1875 return error ? error : addr;
1876 }
1877
1878 EXPORT_SYMBOL(get_unmapped_area);
1879
1880 /**
1881 * find_vma_intersection() - Look up the first VMA which intersects the interval
1882 * @mm: The process address space.
1883 * @start_addr: The inclusive start user address.
1884 * @end_addr: The exclusive end user address.
1885 *
1886 * Returns: The first VMA within the provided range, %NULL otherwise. Assumes
1887 * start_addr < end_addr.
1888 */
find_vma_intersection(struct mm_struct * mm,unsigned long start_addr,unsigned long end_addr)1889 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
1890 unsigned long start_addr,
1891 unsigned long end_addr)
1892 {
1893 unsigned long index = start_addr;
1894
1895 mmap_assert_locked(mm);
1896 return mt_find(&mm->mm_mt, &index, end_addr - 1);
1897 }
1898 EXPORT_SYMBOL(find_vma_intersection);
1899
1900 /**
1901 * find_vma() - Find the VMA for a given address, or the next VMA.
1902 * @mm: The mm_struct to check
1903 * @addr: The address
1904 *
1905 * Returns: The VMA associated with addr, or the next VMA.
1906 * May return %NULL in the case of no VMA at addr or above.
1907 */
find_vma(struct mm_struct * mm,unsigned long addr)1908 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1909 {
1910 unsigned long index = addr;
1911
1912 mmap_assert_locked(mm);
1913 return mt_find(&mm->mm_mt, &index, ULONG_MAX);
1914 }
1915 EXPORT_SYMBOL(find_vma);
1916
1917 /**
1918 * find_vma_prev() - Find the VMA for a given address, or the next vma and
1919 * set %pprev to the previous VMA, if any.
1920 * @mm: The mm_struct to check
1921 * @addr: The address
1922 * @pprev: The pointer to set to the previous VMA
1923 *
1924 * Note that RCU lock is missing here since the external mmap_lock() is used
1925 * instead.
1926 *
1927 * Returns: The VMA associated with @addr, or the next vma.
1928 * May return %NULL in the case of no vma at addr or above.
1929 */
1930 struct vm_area_struct *
find_vma_prev(struct mm_struct * mm,unsigned long addr,struct vm_area_struct ** pprev)1931 find_vma_prev(struct mm_struct *mm, unsigned long addr,
1932 struct vm_area_struct **pprev)
1933 {
1934 struct vm_area_struct *vma;
1935 MA_STATE(mas, &mm->mm_mt, addr, addr);
1936
1937 vma = mas_walk(&mas);
1938 *pprev = mas_prev(&mas, 0);
1939 if (!vma)
1940 vma = mas_next(&mas, ULONG_MAX);
1941 return vma;
1942 }
1943
1944 /*
1945 * Verify that the stack growth is acceptable and
1946 * update accounting. This is shared with both the
1947 * grow-up and grow-down cases.
1948 */
acct_stack_growth(struct vm_area_struct * vma,unsigned long size,unsigned long grow)1949 static int acct_stack_growth(struct vm_area_struct *vma,
1950 unsigned long size, unsigned long grow)
1951 {
1952 struct mm_struct *mm = vma->vm_mm;
1953 unsigned long new_start;
1954
1955 /* address space limit tests */
1956 if (!may_expand_vm(mm, vma->vm_flags, grow))
1957 return -ENOMEM;
1958
1959 /* Stack limit test */
1960 if (size > rlimit(RLIMIT_STACK))
1961 return -ENOMEM;
1962
1963 /* mlock limit tests */
1964 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1965 return -ENOMEM;
1966
1967 /* Check to ensure the stack will not grow into a hugetlb-only region */
1968 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1969 vma->vm_end - size;
1970 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1971 return -EFAULT;
1972
1973 /*
1974 * Overcommit.. This must be the final test, as it will
1975 * update security statistics.
1976 */
1977 if (security_vm_enough_memory_mm(mm, grow))
1978 return -ENOMEM;
1979
1980 return 0;
1981 }
1982
1983 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
1984 /*
1985 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
1986 * vma is the last one with address > vma->vm_end. Have to extend vma.
1987 */
expand_upwards(struct vm_area_struct * vma,unsigned long address)1988 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1989 {
1990 struct mm_struct *mm = vma->vm_mm;
1991 struct vm_area_struct *next;
1992 unsigned long gap_addr;
1993 int error = 0;
1994 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1995
1996 if (!(vma->vm_flags & VM_GROWSUP))
1997 return -EFAULT;
1998
1999 /* Guard against exceeding limits of the address space. */
2000 address &= PAGE_MASK;
2001 if (address >= (TASK_SIZE & PAGE_MASK))
2002 return -ENOMEM;
2003 address += PAGE_SIZE;
2004
2005 /* Enforce stack_guard_gap */
2006 gap_addr = address + stack_guard_gap;
2007
2008 /* Guard against overflow */
2009 if (gap_addr < address || gap_addr > TASK_SIZE)
2010 gap_addr = TASK_SIZE;
2011
2012 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
2013 if (next && vma_is_accessible(next)) {
2014 if (!(next->vm_flags & VM_GROWSUP))
2015 return -ENOMEM;
2016 /* Check that both stack segments have the same anon_vma? */
2017 }
2018
2019 if (next)
2020 mas_prev_range(&mas, address);
2021
2022 __mas_set_range(&mas, vma->vm_start, address - 1);
2023 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2024 return -ENOMEM;
2025
2026 /* We must make sure the anon_vma is allocated. */
2027 if (unlikely(anon_vma_prepare(vma))) {
2028 mas_destroy(&mas);
2029 return -ENOMEM;
2030 }
2031
2032 /* Lock the VMA before expanding to prevent concurrent page faults */
2033 vma_start_write(vma);
2034 /*
2035 * vma->vm_start/vm_end cannot change under us because the caller
2036 * is required to hold the mmap_lock in read mode. We need the
2037 * anon_vma lock to serialize against concurrent expand_stacks.
2038 */
2039 anon_vma_lock_write(vma->anon_vma);
2040
2041 /* Somebody else might have raced and expanded it already */
2042 if (address > vma->vm_end) {
2043 unsigned long size, grow;
2044
2045 size = address - vma->vm_start;
2046 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2047
2048 error = -ENOMEM;
2049 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2050 error = acct_stack_growth(vma, size, grow);
2051 if (!error) {
2052 /*
2053 * We only hold a shared mmap_lock lock here, so
2054 * we need to protect against concurrent vma
2055 * expansions. anon_vma_lock_write() doesn't
2056 * help here, as we don't guarantee that all
2057 * growable vmas in a mm share the same root
2058 * anon vma. So, we reuse mm->page_table_lock
2059 * to guard against concurrent vma expansions.
2060 */
2061 spin_lock(&mm->page_table_lock);
2062 if (vma->vm_flags & VM_LOCKED)
2063 mm->locked_vm += grow;
2064 vm_stat_account(mm, vma->vm_flags, grow);
2065 anon_vma_interval_tree_pre_update_vma(vma);
2066 vma->vm_end = address;
2067 /* Overwrite old entry in mtree. */
2068 mas_store_prealloc(&mas, vma);
2069 anon_vma_interval_tree_post_update_vma(vma);
2070 spin_unlock(&mm->page_table_lock);
2071
2072 perf_event_mmap(vma);
2073 }
2074 }
2075 }
2076 anon_vma_unlock_write(vma->anon_vma);
2077 khugepaged_enter_vma(vma, vma->vm_flags);
2078 mas_destroy(&mas);
2079 validate_mm(mm);
2080 return error;
2081 }
2082 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2083
2084 /*
2085 * vma is the first one with address < vma->vm_start. Have to extend vma.
2086 * mmap_lock held for writing.
2087 */
expand_downwards(struct vm_area_struct * vma,unsigned long address)2088 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2089 {
2090 struct mm_struct *mm = vma->vm_mm;
2091 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2092 struct vm_area_struct *prev;
2093 int error = 0;
2094
2095 if (!(vma->vm_flags & VM_GROWSDOWN))
2096 return -EFAULT;
2097
2098 address &= __PAGE_MASK;
2099 if (address < mmap_min_addr || address < FIRST_USER_ADDRESS)
2100 return -EPERM;
2101
2102 /* Enforce stack_guard_gap */
2103 prev = mas_prev(&mas, 0);
2104 /* Check that both stack segments have the same anon_vma? */
2105 if (prev) {
2106 if (!(prev->vm_flags & VM_GROWSDOWN) &&
2107 vma_is_accessible(prev) &&
2108 (address - prev->vm_end < stack_guard_gap))
2109 return -ENOMEM;
2110 }
2111
2112 if (prev)
2113 mas_next_range(&mas, vma->vm_start);
2114
2115 __mas_set_range(&mas, address, vma->vm_end - 1);
2116 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2117 return -ENOMEM;
2118
2119 /* We must make sure the anon_vma is allocated. */
2120 if (unlikely(anon_vma_prepare(vma))) {
2121 mas_destroy(&mas);
2122 return -ENOMEM;
2123 }
2124
2125 /* Lock the VMA before expanding to prevent concurrent page faults */
2126 vma_start_write(vma);
2127 /*
2128 * vma->vm_start/vm_end cannot change under us because the caller
2129 * is required to hold the mmap_lock in read mode. We need the
2130 * anon_vma lock to serialize against concurrent expand_stacks.
2131 */
2132 anon_vma_lock_write(vma->anon_vma);
2133
2134 /* Somebody else might have raced and expanded it already */
2135 if (address < vma->vm_start) {
2136 unsigned long size, grow;
2137
2138 size = vma->vm_end - address;
2139 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2140
2141 error = -ENOMEM;
2142 if (grow <= vma->vm_pgoff) {
2143 error = acct_stack_growth(vma, size, grow);
2144 if (!error) {
2145 /*
2146 * We only hold a shared mmap_lock lock here, so
2147 * we need to protect against concurrent vma
2148 * expansions. anon_vma_lock_write() doesn't
2149 * help here, as we don't guarantee that all
2150 * growable vmas in a mm share the same root
2151 * anon vma. So, we reuse mm->page_table_lock
2152 * to guard against concurrent vma expansions.
2153 */
2154 spin_lock(&mm->page_table_lock);
2155 if (vma->vm_flags & VM_LOCKED)
2156 mm->locked_vm += grow;
2157 vm_stat_account(mm, vma->vm_flags, grow);
2158 anon_vma_interval_tree_pre_update_vma(vma);
2159 vma->vm_start = address;
2160 vma->vm_pgoff -= grow;
2161 /* Overwrite old entry in mtree. */
2162 mas_store_prealloc(&mas, vma);
2163 anon_vma_interval_tree_post_update_vma(vma);
2164 spin_unlock(&mm->page_table_lock);
2165
2166 perf_event_mmap(vma);
2167 }
2168 }
2169 }
2170 anon_vma_unlock_write(vma->anon_vma);
2171 khugepaged_enter_vma(vma, vma->vm_flags);
2172 mas_destroy(&mas);
2173 validate_mm(mm);
2174 return error;
2175 }
2176
2177 /* enforced gap between the expanding stack and other mappings. */
2178 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2179
cmdline_parse_stack_guard_gap(char * p)2180 static int __init cmdline_parse_stack_guard_gap(char *p)
2181 {
2182 unsigned long val;
2183 char *endptr;
2184
2185 val = simple_strtoul(p, &endptr, 10);
2186 if (!*endptr)
2187 stack_guard_gap = val << PAGE_SHIFT;
2188
2189 return 1;
2190 }
2191 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2192
2193 #ifdef CONFIG_STACK_GROWSUP
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)2194 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2195 {
2196 return expand_upwards(vma, address);
2197 }
2198
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)2199 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2200 {
2201 struct vm_area_struct *vma, *prev;
2202
2203 addr &= PAGE_MASK;
2204 vma = find_vma_prev(mm, addr, &prev);
2205 if (vma && (vma->vm_start <= addr))
2206 return vma;
2207 if (!prev)
2208 return NULL;
2209 if (expand_stack_locked(prev, addr))
2210 return NULL;
2211 if (prev->vm_flags & VM_LOCKED)
2212 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2213 return prev;
2214 }
2215 #else
expand_stack_locked(struct vm_area_struct * vma,unsigned long address)2216 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2217 {
2218 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2219 return -EINVAL;
2220 return expand_downwards(vma, address);
2221 }
2222
find_extend_vma_locked(struct mm_struct * mm,unsigned long addr)2223 struct vm_area_struct *find_extend_vma_locked(struct mm_struct *mm, unsigned long addr)
2224 {
2225 struct vm_area_struct *vma;
2226 unsigned long start;
2227
2228 addr &= PAGE_MASK;
2229 vma = find_vma(mm, addr);
2230 if (!vma)
2231 return NULL;
2232 if (vma->vm_start <= addr)
2233 return vma;
2234 start = vma->vm_start;
2235 if (expand_stack_locked(vma, addr))
2236 return NULL;
2237 if (vma->vm_flags & VM_LOCKED)
2238 populate_vma_page_range(vma, addr, start, NULL);
2239 return vma;
2240 }
2241 #endif
2242
2243 /*
2244 * IA64 has some horrid mapping rules: it can expand both up and down,
2245 * but with various special rules.
2246 *
2247 * We'll get rid of this architecture eventually, so the ugliness is
2248 * temporary.
2249 */
2250 #ifdef CONFIG_IA64
vma_expand_ok(struct vm_area_struct * vma,unsigned long addr)2251 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2252 {
2253 return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2254 REGION_OFFSET(addr) < RGN_MAP_LIMIT;
2255 }
2256
2257 /*
2258 * IA64 stacks grow down, but there's a special register backing store
2259 * that can grow up. Only sequentially, though, so the new address must
2260 * match vm_end.
2261 */
vma_expand_up(struct vm_area_struct * vma,unsigned long addr)2262 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2263 {
2264 if (!vma_expand_ok(vma, addr))
2265 return -EFAULT;
2266 if (vma->vm_end != (addr & PAGE_MASK))
2267 return -EFAULT;
2268 return expand_upwards(vma, addr);
2269 }
2270
vma_expand_down(struct vm_area_struct * vma,unsigned long addr)2271 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2272 {
2273 if (!vma_expand_ok(vma, addr))
2274 return -EFAULT;
2275 return expand_downwards(vma, addr);
2276 }
2277
2278 #elif defined(CONFIG_STACK_GROWSUP)
2279
2280 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2281 #define vma_expand_down(vma, addr) (-EFAULT)
2282
2283 #else
2284
2285 #define vma_expand_up(vma,addr) (-EFAULT)
2286 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2287
2288 #endif
2289
2290 /*
2291 * expand_stack(): legacy interface for page faulting. Don't use unless
2292 * you have to.
2293 *
2294 * This is called with the mm locked for reading, drops the lock, takes
2295 * the lock for writing, tries to look up a vma again, expands it if
2296 * necessary, and downgrades the lock to reading again.
2297 *
2298 * If no vma is found or it can't be expanded, it returns NULL and has
2299 * dropped the lock.
2300 */
expand_stack(struct mm_struct * mm,unsigned long addr)2301 struct vm_area_struct *expand_stack(struct mm_struct *mm, unsigned long addr)
2302 {
2303 struct vm_area_struct *vma, *prev;
2304
2305 mmap_read_unlock(mm);
2306 if (mmap_write_lock_killable(mm))
2307 return NULL;
2308
2309 vma = find_vma_prev(mm, addr, &prev);
2310 if (vma && vma->vm_start <= addr)
2311 goto success;
2312
2313 if (prev && !vma_expand_up(prev, addr)) {
2314 vma = prev;
2315 goto success;
2316 }
2317
2318 if (vma && !vma_expand_down(vma, addr))
2319 goto success;
2320
2321 mmap_write_unlock(mm);
2322 return NULL;
2323
2324 success:
2325 mmap_write_downgrade(mm);
2326 return vma;
2327 }
2328
2329 /*
2330 * Ok - we have the memory areas we should free on a maple tree so release them,
2331 * and do the vma updates.
2332 *
2333 * Called with the mm semaphore held.
2334 */
remove_mt(struct mm_struct * mm,struct ma_state * mas)2335 static inline void remove_mt(struct mm_struct *mm, struct ma_state *mas)
2336 {
2337 unsigned long nr_accounted = 0;
2338 struct vm_area_struct *vma;
2339
2340 /* Update high watermark before we lower total_vm */
2341 update_hiwater_vm(mm);
2342 mas_for_each(mas, vma, ULONG_MAX) {
2343 long nrpages = vma_pages(vma);
2344
2345 if (vma->vm_flags & VM_ACCOUNT)
2346 nr_accounted += nrpages;
2347 vm_stat_account(mm, vma->vm_flags, -nrpages);
2348 remove_vma(vma, false);
2349 }
2350 vm_unacct_memory(nr_accounted);
2351 }
2352
2353 /*
2354 * Get rid of page table information in the indicated region.
2355 *
2356 * Called with the mm semaphore held.
2357 */
unmap_region(struct mm_struct * mm,struct ma_state * mas,struct vm_area_struct * vma,struct vm_area_struct * prev,struct vm_area_struct * next,unsigned long start,unsigned long end,unsigned long tree_end,bool mm_wr_locked)2358 static void unmap_region(struct mm_struct *mm, struct ma_state *mas,
2359 struct vm_area_struct *vma, struct vm_area_struct *prev,
2360 struct vm_area_struct *next, unsigned long start,
2361 unsigned long end, unsigned long tree_end, bool mm_wr_locked)
2362 {
2363 struct mmu_gather tlb;
2364 unsigned long mt_start = mas->index;
2365
2366 lru_add_drain();
2367 tlb_gather_mmu(&tlb, mm);
2368 update_hiwater_rss(mm);
2369 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2370 mas_set(mas, mt_start);
2371 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2372 next ? next->vm_start : USER_PGTABLES_CEILING,
2373 mm_wr_locked);
2374 tlb_finish_mmu(&tlb);
2375 }
2376
2377 /*
2378 * __split_vma() bypasses sysctl_max_map_count checking. We use this where it
2379 * has already been checked or doesn't make sense to fail.
2380 * VMA Iterator will point to the end VMA.
2381 */
__split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)2382 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2383 unsigned long addr, int new_below)
2384 {
2385 struct vma_prepare vp;
2386 struct vm_area_struct *new;
2387 int err;
2388
2389 WARN_ON(vma->vm_start >= addr);
2390 WARN_ON(vma->vm_end <= addr);
2391
2392 if (vma->vm_ops && vma->vm_ops->may_split) {
2393 err = vma->vm_ops->may_split(vma, addr);
2394 if (err)
2395 return err;
2396 }
2397
2398 new = vm_area_dup(vma);
2399 if (!new)
2400 return -ENOMEM;
2401
2402 if (new_below) {
2403 new->vm_end = addr;
2404 } else {
2405 new->vm_start = addr;
2406 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2407 }
2408
2409 err = -ENOMEM;
2410 vma_iter_config(vmi, new->vm_start, new->vm_end);
2411 if (vma_iter_prealloc(vmi, new))
2412 goto out_free_vma;
2413
2414 err = vma_dup_policy(vma, new);
2415 if (err)
2416 goto out_free_vmi;
2417
2418 err = anon_vma_clone(new, vma);
2419 if (err)
2420 goto out_free_mpol;
2421
2422 if (new->vm_file)
2423 get_file(new->vm_file);
2424
2425 if (new->vm_ops && new->vm_ops->open)
2426 new->vm_ops->open(new);
2427
2428 vma_start_write(vma);
2429 vma_start_write(new);
2430
2431 init_vma_prep(&vp, vma);
2432 vp.insert = new;
2433 vma_prepare(&vp);
2434 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2435
2436 if (new_below) {
2437 vma->vm_start = addr;
2438 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2439 } else {
2440 vma->vm_end = addr;
2441 }
2442
2443 /* vma_complete stores the new vma */
2444 vma_complete(&vp, vmi, vma->vm_mm);
2445
2446 /* Success. */
2447 if (new_below)
2448 vma_next(vmi);
2449
2450 split_pad_vma(vma, new, addr, new_below);
2451 return 0;
2452
2453 out_free_mpol:
2454 mpol_put(vma_policy(new));
2455 out_free_vmi:
2456 vma_iter_free(vmi);
2457 out_free_vma:
2458 vm_area_free(new);
2459 return err;
2460 }
2461
2462 /*
2463 * Split a vma into two pieces at address 'addr', a new vma is allocated
2464 * either for the first part or the tail.
2465 */
split_vma(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,int new_below)2466 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2467 unsigned long addr, int new_below)
2468 {
2469 if (vma->vm_mm->map_count >= sysctl_max_map_count)
2470 return -ENOMEM;
2471
2472 return __split_vma(vmi, vma, addr, new_below);
2473 }
2474
2475 /*
2476 * do_vmi_align_munmap() - munmap the aligned region from @start to @end.
2477 * @vmi: The vma iterator
2478 * @vma: The starting vm_area_struct
2479 * @mm: The mm_struct
2480 * @start: The aligned start address to munmap.
2481 * @end: The aligned end address to munmap.
2482 * @uf: The userfaultfd list_head
2483 * @unlock: Set to true to drop the mmap_lock. unlocking only happens on
2484 * success.
2485 *
2486 * Return: 0 on success and drops the lock if so directed, error and leaves the
2487 * lock held otherwise.
2488 */
2489 static int
do_vmi_align_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,struct mm_struct * mm,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)2490 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2491 struct mm_struct *mm, unsigned long start,
2492 unsigned long end, struct list_head *uf, bool unlock)
2493 {
2494 struct vm_area_struct *prev, *next = NULL;
2495 struct maple_tree mt_detach;
2496 int count = 0;
2497 int error = -ENOMEM;
2498 unsigned long locked_vm = 0;
2499 MA_STATE(mas_detach, &mt_detach, 0, 0);
2500 mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK);
2501 mt_on_stack(mt_detach);
2502
2503 /*
2504 * If we need to split any vma, do it now to save pain later.
2505 *
2506 * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2507 * unmapped vm_area_struct will remain in use: so lower split_vma
2508 * places tmp vma above, and higher split_vma places tmp vma below.
2509 */
2510
2511 /* Does it split the first one? */
2512 if (start > vma->vm_start) {
2513
2514 /*
2515 * Make sure that map_count on return from munmap() will
2516 * not exceed its limit; but let map_count go just above
2517 * its limit temporarily, to help free resources as expected.
2518 */
2519 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2520 goto map_count_exceeded;
2521
2522 error = __split_vma(vmi, vma, start, 1);
2523 if (error)
2524 goto start_split_failed;
2525 }
2526
2527 /*
2528 * Detach a range of VMAs from the mm. Using next as a temp variable as
2529 * it is always overwritten.
2530 */
2531 next = vma;
2532 do {
2533 /* Does it split the end? */
2534 if (next->vm_end > end) {
2535 error = __split_vma(vmi, next, end, 0);
2536 if (error)
2537 goto end_split_failed;
2538 }
2539 vma_start_write(next);
2540 mas_set(&mas_detach, count);
2541 error = mas_store_gfp(&mas_detach, next, GFP_KERNEL);
2542 if (error)
2543 goto munmap_gather_failed;
2544 vma_mark_detached(next, true);
2545 if (next->vm_flags & VM_LOCKED)
2546 locked_vm += vma_pages(next);
2547
2548 count++;
2549 if (unlikely(uf)) {
2550 /*
2551 * If userfaultfd_unmap_prep returns an error the vmas
2552 * will remain split, but userland will get a
2553 * highly unexpected error anyway. This is no
2554 * different than the case where the first of the two
2555 * __split_vma fails, but we don't undo the first
2556 * split, despite we could. This is unlikely enough
2557 * failure that it's not worth optimizing it for.
2558 */
2559 error = userfaultfd_unmap_prep(next, start, end, uf);
2560
2561 if (error)
2562 goto userfaultfd_error;
2563 }
2564 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
2565 BUG_ON(next->vm_start < start);
2566 BUG_ON(next->vm_start > end);
2567 #endif
2568 } for_each_vma_range(*vmi, next, end);
2569
2570 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
2571 /* Make sure no VMAs are about to be lost. */
2572 {
2573 MA_STATE(test, &mt_detach, 0, 0);
2574 struct vm_area_struct *vma_mas, *vma_test;
2575 int test_count = 0;
2576
2577 vma_iter_set(vmi, start);
2578 rcu_read_lock();
2579 vma_test = mas_find(&test, count - 1);
2580 for_each_vma_range(*vmi, vma_mas, end) {
2581 BUG_ON(vma_mas != vma_test);
2582 test_count++;
2583 vma_test = mas_next(&test, count - 1);
2584 }
2585 rcu_read_unlock();
2586 BUG_ON(count != test_count);
2587 }
2588 #endif
2589
2590 while (vma_iter_addr(vmi) > start)
2591 vma_iter_prev_range(vmi);
2592
2593 error = vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL);
2594 if (error)
2595 goto clear_tree_failed;
2596
2597 /* Point of no return */
2598 mm->locked_vm -= locked_vm;
2599 mm->map_count -= count;
2600 if (unlock)
2601 mmap_write_downgrade(mm);
2602
2603 prev = vma_iter_prev_range(vmi);
2604 next = vma_next(vmi);
2605 if (next)
2606 vma_iter_prev_range(vmi);
2607
2608 /*
2609 * We can free page tables without write-locking mmap_lock because VMAs
2610 * were isolated before we downgraded mmap_lock.
2611 */
2612 mas_set(&mas_detach, 1);
2613 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2614 !unlock);
2615 /* Statistics and freeing VMAs */
2616 mas_set(&mas_detach, 0);
2617 remove_mt(mm, &mas_detach);
2618 validate_mm(mm);
2619 if (unlock)
2620 mmap_read_unlock(mm);
2621
2622 __mt_destroy(&mt_detach);
2623 return 0;
2624
2625 clear_tree_failed:
2626 userfaultfd_error:
2627 munmap_gather_failed:
2628 end_split_failed:
2629 mas_set(&mas_detach, 0);
2630 mas_for_each(&mas_detach, next, end)
2631 vma_mark_detached(next, false);
2632
2633 __mt_destroy(&mt_detach);
2634 start_split_failed:
2635 map_count_exceeded:
2636 validate_mm(mm);
2637 return error;
2638 }
2639
2640 /*
2641 * do_vmi_munmap() - munmap a given range.
2642 * @vmi: The vma iterator
2643 * @mm: The mm_struct
2644 * @start: The start address to munmap
2645 * @len: The length of the range to munmap
2646 * @uf: The userfaultfd list_head
2647 * @unlock: set to true if the user wants to drop the mmap_lock on success
2648 *
2649 * This function takes a @mas that is either pointing to the previous VMA or set
2650 * to MA_START and sets it up to remove the mapping(s). The @len will be
2651 * aligned and any arch_unmap work will be preformed.
2652 *
2653 * Return: 0 on success and drops the lock if so directed, error and leaves the
2654 * lock held otherwise.
2655 */
do_vmi_munmap(struct vma_iterator * vmi,struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf,bool unlock)2656 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
2657 unsigned long start, size_t len, struct list_head *uf,
2658 bool unlock)
2659 {
2660 unsigned long end;
2661 struct vm_area_struct *vma;
2662
2663 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2664 return -EINVAL;
2665
2666 end = start + PAGE_ALIGN(len);
2667 if (end == start)
2668 return -EINVAL;
2669
2670 /*
2671 * Check if memory is sealed before arch_unmap.
2672 * Prevent unmapping a sealed VMA.
2673 * can_modify_mm assumes we have acquired the lock on MM.
2674 */
2675 if (unlikely(!can_modify_mm(mm, start, end)))
2676 return -EPERM;
2677
2678 /* arch_unmap() might do unmaps itself. */
2679 arch_unmap(mm, start, end);
2680
2681 /* Find the first overlapping VMA */
2682 vma = vma_find(vmi, end);
2683 if (!vma) {
2684 if (unlock)
2685 mmap_write_unlock(mm);
2686 return 0;
2687 }
2688
2689 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2690 }
2691
2692 /* do_munmap() - Wrapper function for non-maple tree aware do_munmap() calls.
2693 * @mm: The mm_struct
2694 * @start: The start address to munmap
2695 * @len: The length to be munmapped.
2696 * @uf: The userfaultfd list_head
2697 *
2698 * Return: 0 on success, error otherwise.
2699 */
do_munmap(struct mm_struct * mm,unsigned long start,size_t len,struct list_head * uf)2700 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2701 struct list_head *uf)
2702 {
2703 VMA_ITERATOR(vmi, mm, start);
2704
2705 return do_vmi_munmap(&vmi, mm, start, len, uf, false);
2706 }
2707
mmap_region(struct file * file,unsigned long addr,unsigned long len,vm_flags_t vm_flags,unsigned long pgoff,struct list_head * uf)2708 unsigned long mmap_region(struct file *file, unsigned long addr,
2709 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2710 struct list_head *uf)
2711 {
2712 struct mm_struct *mm = current->mm;
2713 struct vm_area_struct *vma = NULL;
2714 struct vm_area_struct *next, *prev, *merge;
2715 pgoff_t pglen = len >> PAGE_SHIFT;
2716 unsigned long charged = 0;
2717 unsigned long end = addr + len;
2718 unsigned long merge_start = addr, merge_end = end;
2719 pgoff_t vm_pgoff;
2720 int error;
2721 VMA_ITERATOR(vmi, mm, addr);
2722
2723 /* Check against address space limit. */
2724 if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
2725 unsigned long nr_pages;
2726
2727 /*
2728 * MAP_FIXED may remove pages of mappings that intersects with
2729 * requested mapping. Account for the pages it would unmap.
2730 */
2731 nr_pages = count_vma_pages_range(mm, addr, end);
2732
2733 if (!may_expand_vm(mm, vm_flags,
2734 (len >> PAGE_SHIFT) - nr_pages))
2735 return -ENOMEM;
2736 }
2737
2738 /* Unmap any existing mapping in the area */
2739 error = do_vmi_munmap(&vmi, mm, addr, len, uf, false);
2740 if (error == -EPERM)
2741 return error;
2742 else if (error)
2743 return -ENOMEM;
2744
2745 /*
2746 * Private writable mapping: check memory availability
2747 */
2748 if (accountable_mapping(file, vm_flags)) {
2749 charged = len >> PAGE_SHIFT;
2750 if (security_vm_enough_memory_mm(mm, charged))
2751 return -ENOMEM;
2752 vm_flags |= VM_ACCOUNT;
2753 }
2754
2755 next = vma_next(&vmi);
2756 prev = vma_prev(&vmi);
2757 if (vm_flags & VM_SPECIAL) {
2758 if (prev)
2759 vma_iter_next_range(&vmi);
2760 goto cannot_expand;
2761 }
2762
2763 /* Attempt to expand an old mapping */
2764 /* Check next */
2765 if (next && next->vm_start == end && !vma_policy(next) &&
2766 can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
2767 NULL_VM_UFFD_CTX, NULL)) {
2768 merge_end = next->vm_end;
2769 vma = next;
2770 vm_pgoff = next->vm_pgoff - pglen;
2771 }
2772
2773 /* Check prev */
2774 if (prev && prev->vm_end == addr && !vma_policy(prev) &&
2775 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2776 pgoff, vma->vm_userfaultfd_ctx, NULL) :
2777 can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
2778 NULL_VM_UFFD_CTX, NULL))) {
2779 merge_start = prev->vm_start;
2780 vma = prev;
2781 vm_pgoff = prev->vm_pgoff;
2782 } else if (prev) {
2783 vma_iter_next_range(&vmi);
2784 }
2785
2786 /* Actually expand, if possible */
2787 if (vma &&
2788 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2789 khugepaged_enter_vma(vma, vm_flags);
2790 goto expanded;
2791 }
2792
2793 if (vma == prev)
2794 vma_iter_set(&vmi, addr);
2795 cannot_expand:
2796
2797 /*
2798 * Determine the object being mapped and call the appropriate
2799 * specific mapper. the address has already been validated, but
2800 * not unmapped, but the maps are removed from the list.
2801 */
2802 vma = vm_area_alloc(mm);
2803 if (!vma) {
2804 error = -ENOMEM;
2805 goto unacct_error;
2806 }
2807
2808 vma_iter_config(&vmi, addr, end);
2809 vma->vm_start = addr;
2810 vma->vm_end = end;
2811 vm_flags_init(vma, vm_flags);
2812 vma->vm_page_prot = vm_get_page_prot(vm_flags);
2813 vma->vm_pgoff = pgoff;
2814
2815 if (file) {
2816 if (vm_flags & VM_SHARED) {
2817 error = mapping_map_writable(file->f_mapping);
2818 if (error)
2819 goto free_vma;
2820 }
2821
2822 vma->vm_file = get_file(file);
2823 error = call_mmap(file, vma);
2824 if (error)
2825 goto unmap_and_free_vma;
2826
2827 /*
2828 * Expansion is handled above, merging is handled below.
2829 * Drivers should not alter the address of the VMA.
2830 */
2831 error = -EINVAL;
2832 if (WARN_ON((addr != vma->vm_start)))
2833 goto close_and_free_vma;
2834
2835 vma_iter_config(&vmi, addr, end);
2836 /*
2837 * If vm_flags changed after call_mmap(), we should try merge
2838 * vma again as we may succeed this time.
2839 */
2840 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2841 merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2842 vma->vm_end, vma->vm_flags, NULL,
2843 vma->vm_file, vma->vm_pgoff, NULL,
2844 NULL_VM_UFFD_CTX, NULL);
2845 if (merge) {
2846 /*
2847 * ->mmap() can change vma->vm_file and fput
2848 * the original file. So fput the vma->vm_file
2849 * here or we would add an extra fput for file
2850 * and cause general protection fault
2851 * ultimately.
2852 */
2853 fput(vma->vm_file);
2854 vm_area_free(vma);
2855 vma = merge;
2856 /* Update vm_flags to pick up the change. */
2857 vm_flags = vma->vm_flags;
2858 goto unmap_writable;
2859 }
2860 }
2861
2862 vm_flags = vma->vm_flags;
2863 } else if (vm_flags & VM_SHARED) {
2864 error = shmem_zero_setup(vma);
2865 if (error)
2866 goto free_vma;
2867 } else {
2868 vma_set_anonymous(vma);
2869 }
2870
2871 if (map_deny_write_exec(vma, vma->vm_flags)) {
2872 error = -EACCES;
2873 goto close_and_free_vma;
2874 }
2875
2876 /* Allow architectures to sanity-check the vm_flags */
2877 error = -EINVAL;
2878 if (!arch_validate_flags(vma->vm_flags))
2879 goto close_and_free_vma;
2880
2881 error = -ENOMEM;
2882 if (vma_iter_prealloc(&vmi, vma))
2883 goto close_and_free_vma;
2884
2885 /* Lock the VMA since it is modified after insertion into VMA tree */
2886 vma_start_write(vma);
2887 vma_iter_store(&vmi, vma);
2888 mm->map_count++;
2889 if (vma->vm_file) {
2890 i_mmap_lock_write(vma->vm_file->f_mapping);
2891 if (vma->vm_flags & VM_SHARED)
2892 mapping_allow_writable(vma->vm_file->f_mapping);
2893
2894 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2895 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2896 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2897 i_mmap_unlock_write(vma->vm_file->f_mapping);
2898 }
2899
2900 /*
2901 * vma_merge() calls khugepaged_enter_vma() either, the below
2902 * call covers the non-merge case.
2903 */
2904 khugepaged_enter_vma(vma, vma->vm_flags);
2905
2906 /* Once vma denies write, undo our temporary denial count */
2907 unmap_writable:
2908 if (file && vm_flags & VM_SHARED)
2909 mapping_unmap_writable(file->f_mapping);
2910 file = vma->vm_file;
2911 ksm_add_vma(vma);
2912 expanded:
2913 perf_event_mmap(vma);
2914
2915 vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
2916 if (vm_flags & VM_LOCKED) {
2917 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2918 is_vm_hugetlb_page(vma) ||
2919 vma == get_gate_vma(current->mm))
2920 vm_flags_clear(vma, VM_LOCKED_MASK);
2921 else
2922 mm->locked_vm += (len >> PAGE_SHIFT);
2923 }
2924
2925 if (file)
2926 uprobe_mmap(vma);
2927
2928 /*
2929 * New (or expanded) vma always get soft dirty status.
2930 * Otherwise user-space soft-dirty page tracker won't
2931 * be able to distinguish situation when vma area unmapped,
2932 * then new mapped in-place (which must be aimed as
2933 * a completely new data area).
2934 */
2935 vm_flags_set(vma, VM_SOFTDIRTY);
2936
2937 vma_set_page_prot(vma);
2938
2939 trace_android_vh_mmap_region(vma, addr);
2940
2941 validate_mm(mm);
2942 return addr;
2943
2944 close_and_free_vma:
2945 if (file && vma->vm_ops && vma->vm_ops->close)
2946 vma->vm_ops->close(vma);
2947
2948 if (file || vma->vm_file) {
2949 unmap_and_free_vma:
2950 fput(vma->vm_file);
2951 vma->vm_file = NULL;
2952
2953 vma_iter_set(&vmi, vma->vm_end);
2954 /* Undo any partial mapping done by a device driver. */
2955 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2956 vma->vm_end, vma->vm_end, true);
2957 }
2958 if (file && (vm_flags & VM_SHARED))
2959 mapping_unmap_writable(file->f_mapping);
2960 free_vma:
2961 vm_area_free(vma);
2962 unacct_error:
2963 if (charged)
2964 vm_unacct_memory(charged);
2965 validate_mm(mm);
2966 return error;
2967 }
2968
__vm_munmap(unsigned long start,size_t len,bool unlock)2969 static int __vm_munmap(unsigned long start, size_t len, bool unlock)
2970 {
2971 int ret;
2972 struct mm_struct *mm = current->mm;
2973 LIST_HEAD(uf);
2974 VMA_ITERATOR(vmi, mm, start);
2975
2976 if (mmap_write_lock_killable(mm))
2977 return -EINTR;
2978
2979 ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock);
2980 if (ret || !unlock)
2981 mmap_write_unlock(mm);
2982
2983 userfaultfd_unmap_complete(mm, &uf);
2984 return ret;
2985 }
2986
vm_munmap(unsigned long start,size_t len)2987 int vm_munmap(unsigned long start, size_t len)
2988 {
2989 return __vm_munmap(start, len, false);
2990 }
2991 EXPORT_SYMBOL(vm_munmap);
2992
SYSCALL_DEFINE2(munmap,unsigned long,addr,size_t,len)2993 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2994 {
2995 addr = untagged_addr(addr);
2996
2997 if (!__PAGE_ALIGNED(addr))
2998 return -EINVAL;
2999
3000 len = __PAGE_ALIGN(len);
3001
3002 profile_munmap(addr);
3003 return __vm_munmap(addr, len, true);
3004 }
3005
3006
3007 /*
3008 * Emulation of deprecated remap_file_pages() syscall.
3009 */
SYSCALL_DEFINE5(remap_file_pages,unsigned long,start,unsigned long,size,unsigned long,prot,unsigned long,pgoff,unsigned long,flags)3010 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
3011 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
3012 {
3013
3014 struct mm_struct *mm = current->mm;
3015 struct vm_area_struct *vma;
3016 unsigned long populate = 0;
3017 unsigned long ret = -EINVAL;
3018 struct file *file;
3019
3020 pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/mm/remap_file_pages.rst.\n",
3021 current->comm, current->pid);
3022
3023 if (prot)
3024 return ret;
3025 start = start & PAGE_MASK;
3026 size = size & PAGE_MASK;
3027
3028 if (start + size <= start)
3029 return ret;
3030
3031 /* Does pgoff wrap? */
3032 if (pgoff + (size >> PAGE_SHIFT) < pgoff)
3033 return ret;
3034
3035 if (mmap_write_lock_killable(mm))
3036 return -EINTR;
3037
3038 vma = vma_lookup(mm, start);
3039
3040 if (!vma || !(vma->vm_flags & VM_SHARED))
3041 goto out;
3042
3043 if (start + size > vma->vm_end) {
3044 VMA_ITERATOR(vmi, mm, vma->vm_end);
3045 struct vm_area_struct *next, *prev = vma;
3046
3047 for_each_vma_range(vmi, next, start + size) {
3048 /* hole between vmas ? */
3049 if (next->vm_start != prev->vm_end)
3050 goto out;
3051
3052 if (next->vm_file != vma->vm_file)
3053 goto out;
3054
3055 if (next->vm_flags != vma->vm_flags)
3056 goto out;
3057
3058 if (start + size <= next->vm_end)
3059 break;
3060
3061 prev = next;
3062 }
3063
3064 if (!next)
3065 goto out;
3066 }
3067
3068 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3069 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3070 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3071
3072 flags &= MAP_NONBLOCK;
3073 flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
3074 if (vma->vm_flags & VM_LOCKED)
3075 flags |= MAP_LOCKED;
3076
3077 file = get_file(vma->vm_file);
3078 ret = do_mmap(vma->vm_file, start, size,
3079 prot, flags, 0, pgoff, &populate, NULL);
3080 fput(file);
3081 out:
3082 mmap_write_unlock(mm);
3083 if (populate)
3084 mm_populate(ret, populate);
3085 if (!IS_ERR_VALUE(ret))
3086 ret = 0;
3087 return ret;
3088 }
3089
3090 /*
3091 * do_vma_munmap() - Unmap a full or partial vma.
3092 * @vmi: The vma iterator pointing at the vma
3093 * @vma: The first vma to be munmapped
3094 * @start: the start of the address to unmap
3095 * @end: The end of the address to unmap
3096 * @uf: The userfaultfd list_head
3097 * @unlock: Drop the lock on success
3098 *
3099 * unmaps a VMA mapping when the vma iterator is already in position.
3100 * Does not handle alignment.
3101 *
3102 * Return: 0 on success drops the lock of so directed, error on failure and will
3103 * still hold the lock.
3104 */
do_vma_munmap(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf,bool unlock)3105 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3106 unsigned long start, unsigned long end, struct list_head *uf,
3107 bool unlock)
3108 {
3109 struct mm_struct *mm = vma->vm_mm;
3110
3111 /*
3112 * Check if memory is sealed before arch_unmap.
3113 * Prevent unmapping a sealed VMA.
3114 * can_modify_mm assumes we have acquired the lock on MM.
3115 */
3116 if (unlikely(!can_modify_mm(mm, start, end)))
3117 return -EPERM;
3118
3119 arch_unmap(mm, start, end);
3120 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3121 }
3122
3123 /*
3124 * do_brk_flags() - Increase the brk vma if the flags match.
3125 * @vmi: The vma iterator
3126 * @addr: The start address
3127 * @len: The length of the increase
3128 * @vma: The vma,
3129 * @flags: The VMA Flags
3130 *
3131 * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags
3132 * do not match then create a new anonymous VMA. Eventually we may be able to
3133 * do some brk-specific accounting here.
3134 */
do_brk_flags(struct vma_iterator * vmi,struct vm_area_struct * vma,unsigned long addr,unsigned long len,unsigned long flags)3135 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3136 unsigned long addr, unsigned long len, unsigned long flags)
3137 {
3138 struct mm_struct *mm = current->mm;
3139 struct vma_prepare vp;
3140
3141 /*
3142 * Check against address space limits by the changed size
3143 * Note: This happens *after* clearing old mappings in some code paths.
3144 */
3145 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3146 if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3147 return -ENOMEM;
3148
3149 if (mm->map_count > sysctl_max_map_count)
3150 return -ENOMEM;
3151
3152 if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3153 return -ENOMEM;
3154
3155 /*
3156 * Expand the existing vma if possible; Note that singular lists do not
3157 * occur after forking, so the expand will only happen on new VMAs.
3158 */
3159 if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3160 can_vma_merge_after(vma, flags, NULL, NULL,
3161 addr >> PAGE_SHIFT, NULL_VM_UFFD_CTX, NULL)) {
3162 vma_iter_config(vmi, vma->vm_start, addr + len);
3163 if (vma_iter_prealloc(vmi, vma))
3164 goto unacct_fail;
3165
3166 vma_start_write(vma);
3167
3168 init_vma_prep(&vp, vma);
3169 vma_prepare(&vp);
3170 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3171 vma->vm_end = addr + len;
3172 vm_flags_set(vma, VM_SOFTDIRTY);
3173 vma_iter_store(vmi, vma);
3174
3175 vma_complete(&vp, vmi, mm);
3176 khugepaged_enter_vma(vma, flags);
3177 goto out;
3178 }
3179
3180 if (vma)
3181 vma_iter_next_range(vmi);
3182 /* create a vma struct for an anonymous mapping */
3183 vma = vm_area_alloc(mm);
3184 if (!vma)
3185 goto unacct_fail;
3186
3187 vma_set_anonymous(vma);
3188 vma->vm_start = addr;
3189 vma->vm_end = addr + len;
3190 vma->vm_pgoff = addr >> PAGE_SHIFT;
3191 vm_flags_init(vma, flags);
3192 vma->vm_page_prot = vm_get_page_prot(flags);
3193 vma_start_write(vma);
3194 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3195 goto mas_store_fail;
3196
3197 mm->map_count++;
3198 validate_mm(mm);
3199 ksm_add_vma(vma);
3200 out:
3201 perf_event_mmap(vma);
3202 mm->total_vm += len >> PAGE_SHIFT;
3203 mm->data_vm += len >> PAGE_SHIFT;
3204 if (flags & VM_LOCKED)
3205 mm->locked_vm += (len >> PAGE_SHIFT);
3206 vm_flags_set(vma, VM_SOFTDIRTY);
3207 return 0;
3208
3209 mas_store_fail:
3210 vm_area_free(vma);
3211 unacct_fail:
3212 vm_unacct_memory(len >> PAGE_SHIFT);
3213 return -ENOMEM;
3214 }
3215
vm_brk_flags(unsigned long addr,unsigned long request,unsigned long flags)3216 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3217 {
3218 struct mm_struct *mm = current->mm;
3219 struct vm_area_struct *vma = NULL;
3220 unsigned long len;
3221 int ret;
3222 bool populate;
3223 LIST_HEAD(uf);
3224 VMA_ITERATOR(vmi, mm, addr);
3225
3226 len = PAGE_ALIGN(request);
3227 if (len < request)
3228 return -ENOMEM;
3229 if (!len)
3230 return 0;
3231
3232 /* Until we need other flags, refuse anything except VM_EXEC. */
3233 if ((flags & (~VM_EXEC)) != 0)
3234 return -EINVAL;
3235
3236 if (mmap_write_lock_killable(mm))
3237 return -EINTR;
3238
3239 ret = check_brk_limits(addr, len);
3240 if (ret)
3241 goto limits_failed;
3242
3243 ret = do_vmi_munmap(&vmi, mm, addr, len, &uf, 0);
3244 if (ret)
3245 goto munmap_failed;
3246
3247 vma = vma_prev(&vmi);
3248 ret = do_brk_flags(&vmi, vma, addr, len, flags);
3249 populate = ((mm->def_flags & VM_LOCKED) != 0);
3250 mmap_write_unlock(mm);
3251 userfaultfd_unmap_complete(mm, &uf);
3252 if (populate && !ret)
3253 mm_populate(addr, len);
3254 return ret;
3255
3256 munmap_failed:
3257 limits_failed:
3258 mmap_write_unlock(mm);
3259 return ret;
3260 }
3261 EXPORT_SYMBOL(vm_brk_flags);
3262
vm_brk(unsigned long addr,unsigned long len)3263 int vm_brk(unsigned long addr, unsigned long len)
3264 {
3265 return vm_brk_flags(addr, len, 0);
3266 }
3267 EXPORT_SYMBOL(vm_brk);
3268
3269 /* Release all mmaps. */
exit_mmap(struct mm_struct * mm)3270 void exit_mmap(struct mm_struct *mm)
3271 {
3272 struct mmu_gather tlb;
3273 struct vm_area_struct *vma;
3274 unsigned long nr_accounted = 0;
3275 MA_STATE(mas, &mm->mm_mt, 0, 0);
3276 int count = 0;
3277
3278 /* mm's last user has gone, and its about to be pulled down */
3279 mmu_notifier_release(mm);
3280
3281 mmap_read_lock(mm);
3282 arch_exit_mmap(mm);
3283
3284 vma = mas_find(&mas, ULONG_MAX);
3285 if (!vma || unlikely(xa_is_zero(vma))) {
3286 /* Can happen if dup_mmap() received an OOM */
3287 mmap_read_unlock(mm);
3288 mmap_write_lock(mm);
3289 goto destroy;
3290 }
3291
3292 lru_add_drain();
3293 flush_cache_mm(mm);
3294 tlb_gather_mmu_fullmm(&tlb, mm);
3295 trace_android_vh_swapmem_gather_init(mm);
3296 /* update_hiwater_rss(mm) here? but nobody should be looking */
3297 /* Use ULONG_MAX here to ensure all VMAs in the mm are unmapped */
3298 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3299 trace_android_vh_swapmem_gather_finish(mm);
3300 mmap_read_unlock(mm);
3301
3302 /*
3303 * Set MMF_OOM_SKIP to hide this task from the oom killer/reaper
3304 * because the memory has been already freed.
3305 */
3306 set_bit(MMF_OOM_SKIP, &mm->flags);
3307 mmap_write_lock(mm);
3308 mt_clear_in_rcu(&mm->mm_mt);
3309 mas_set(&mas, vma->vm_end);
3310 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3311 USER_PGTABLES_CEILING, true);
3312 tlb_finish_mmu(&tlb);
3313
3314 /*
3315 * Walk the list again, actually closing and freeing it, with preemption
3316 * enabled, without holding any MM locks besides the unreachable
3317 * mmap_write_lock.
3318 */
3319 mas_set(&mas, vma->vm_end);
3320 do {
3321 if (vma->vm_flags & VM_ACCOUNT)
3322 nr_accounted += vma_pages(vma);
3323 remove_vma(vma, true);
3324 count++;
3325 cond_resched();
3326 vma = mas_find(&mas, ULONG_MAX);
3327 } while (vma && likely(!xa_is_zero(vma)));
3328
3329 BUG_ON(count != mm->map_count);
3330
3331 trace_exit_mmap(mm);
3332 destroy:
3333 __mt_destroy(&mm->mm_mt);
3334 mmap_write_unlock(mm);
3335 vm_unacct_memory(nr_accounted);
3336 }
3337
3338 /* Insert vm structure into process list sorted by address
3339 * and into the inode's i_mmap tree. If vm_file is non-NULL
3340 * then i_mmap_rwsem is taken here.
3341 */
insert_vm_struct(struct mm_struct * mm,struct vm_area_struct * vma)3342 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3343 {
3344 unsigned long charged = vma_pages(vma);
3345
3346
3347 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3348 return -ENOMEM;
3349
3350 if ((vma->vm_flags & VM_ACCOUNT) &&
3351 security_vm_enough_memory_mm(mm, charged))
3352 return -ENOMEM;
3353
3354 /*
3355 * The vm_pgoff of a purely anonymous vma should be irrelevant
3356 * until its first write fault, when page's anon_vma and index
3357 * are set. But now set the vm_pgoff it will almost certainly
3358 * end up with (unless mremap moves it elsewhere before that
3359 * first wfault), so /proc/pid/maps tells a consistent story.
3360 *
3361 * By setting it to reflect the virtual start address of the
3362 * vma, merges and splits can happen in a seamless way, just
3363 * using the existing file pgoff checks and manipulations.
3364 * Similarly in do_mmap and in do_brk_flags.
3365 */
3366 if (vma_is_anonymous(vma)) {
3367 BUG_ON(vma->anon_vma);
3368 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3369 }
3370
3371 if (vma_link(mm, vma)) {
3372 vm_unacct_memory(charged);
3373 return -ENOMEM;
3374 }
3375
3376 return 0;
3377 }
3378
3379 /*
3380 * Copy the vma structure to a new location in the same mm,
3381 * prior to moving page table entries, to effect an mremap move.
3382 */
copy_vma(struct vm_area_struct ** vmap,unsigned long addr,unsigned long len,pgoff_t pgoff,bool * need_rmap_locks)3383 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3384 unsigned long addr, unsigned long len, pgoff_t pgoff,
3385 bool *need_rmap_locks)
3386 {
3387 struct vm_area_struct *vma = *vmap;
3388 unsigned long vma_start = vma->vm_start;
3389 struct mm_struct *mm = vma->vm_mm;
3390 struct vm_area_struct *new_vma, *prev;
3391 bool faulted_in_anon_vma = true;
3392 VMA_ITERATOR(vmi, mm, addr);
3393
3394 /*
3395 * If anonymous vma has not yet been faulted, update new pgoff
3396 * to match new location, to increase its chance of merging.
3397 */
3398 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3399 pgoff = addr >> PAGE_SHIFT;
3400 faulted_in_anon_vma = false;
3401 }
3402
3403 new_vma = find_vma_prev(mm, addr, &prev);
3404 if (new_vma && new_vma->vm_start < addr + len)
3405 return NULL; /* should never get here */
3406
3407 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3408 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3409 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3410 if (new_vma) {
3411 /*
3412 * Source vma may have been merged into new_vma
3413 */
3414 if (unlikely(vma_start >= new_vma->vm_start &&
3415 vma_start < new_vma->vm_end)) {
3416 /*
3417 * The only way we can get a vma_merge with
3418 * self during an mremap is if the vma hasn't
3419 * been faulted in yet and we were allowed to
3420 * reset the dst vma->vm_pgoff to the
3421 * destination address of the mremap to allow
3422 * the merge to happen. mremap must change the
3423 * vm_pgoff linearity between src and dst vmas
3424 * (in turn preventing a vma_merge) to be
3425 * safe. It is only safe to keep the vm_pgoff
3426 * linear if there are no pages mapped yet.
3427 */
3428 VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3429 *vmap = vma = new_vma;
3430 }
3431 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3432 } else {
3433 new_vma = vm_area_dup(vma);
3434 if (!new_vma)
3435 goto out;
3436 new_vma->vm_start = addr;
3437 new_vma->vm_end = addr + len;
3438 new_vma->vm_pgoff = pgoff;
3439 if (vma_dup_policy(vma, new_vma))
3440 goto out_free_vma;
3441 if (anon_vma_clone(new_vma, vma))
3442 goto out_free_mempol;
3443 if (new_vma->vm_file)
3444 get_file(new_vma->vm_file);
3445 if (new_vma->vm_ops && new_vma->vm_ops->open)
3446 new_vma->vm_ops->open(new_vma);
3447 if (vma_link(mm, new_vma))
3448 goto out_vma_link;
3449 *need_rmap_locks = false;
3450 }
3451 return new_vma;
3452
3453 out_vma_link:
3454 if (new_vma->vm_ops && new_vma->vm_ops->close)
3455 new_vma->vm_ops->close(new_vma);
3456
3457 if (new_vma->vm_file)
3458 fput(new_vma->vm_file);
3459
3460 unlink_anon_vmas(new_vma);
3461 out_free_mempol:
3462 mpol_put(vma_policy(new_vma));
3463 out_free_vma:
3464 vm_area_free(new_vma);
3465 out:
3466 return NULL;
3467 }
3468
3469 /*
3470 * Return true if the calling process may expand its vm space by the passed
3471 * number of pages
3472 */
may_expand_vm(struct mm_struct * mm,vm_flags_t flags,unsigned long npages)3473 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3474 {
3475 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3476 return false;
3477
3478 if (is_data_mapping(flags) &&
3479 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3480 /* Workaround for Valgrind */
3481 if (rlimit(RLIMIT_DATA) == 0 &&
3482 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3483 return true;
3484
3485 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3486 current->comm, current->pid,
3487 (mm->data_vm + npages) << PAGE_SHIFT,
3488 rlimit(RLIMIT_DATA),
3489 ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3490
3491 if (!ignore_rlimit_data)
3492 return false;
3493 }
3494
3495 return true;
3496 }
3497
vm_stat_account(struct mm_struct * mm,vm_flags_t flags,long npages)3498 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3499 {
3500 WRITE_ONCE(mm->total_vm, READ_ONCE(mm->total_vm)+npages);
3501
3502 if (is_exec_mapping(flags))
3503 mm->exec_vm += npages;
3504 else if (is_stack_mapping(flags))
3505 mm->stack_vm += npages;
3506 else if (is_data_mapping(flags))
3507 mm->data_vm += npages;
3508 }
3509
3510 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3511
3512 /*
3513 * Having a close hook prevents vma merging regardless of flags.
3514 */
special_mapping_close(struct vm_area_struct * vma)3515 static void special_mapping_close(struct vm_area_struct *vma)
3516 {
3517 }
3518
special_mapping_name(struct vm_area_struct * vma)3519 static const char *special_mapping_name(struct vm_area_struct *vma)
3520 {
3521 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3522 }
3523
special_mapping_mremap(struct vm_area_struct * new_vma)3524 static int special_mapping_mremap(struct vm_area_struct *new_vma)
3525 {
3526 struct vm_special_mapping *sm = new_vma->vm_private_data;
3527
3528 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3529 return -EFAULT;
3530
3531 if (sm->mremap)
3532 return sm->mremap(sm, new_vma);
3533
3534 return 0;
3535 }
3536
special_mapping_split(struct vm_area_struct * vma,unsigned long addr)3537 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3538 {
3539 /*
3540 * Forbid splitting special mappings - kernel has expectations over
3541 * the number of pages in mapping. Together with VM_DONTEXPAND
3542 * the size of vma should stay the same over the special mapping's
3543 * lifetime.
3544 */
3545 return -EINVAL;
3546 }
3547
3548 static const struct vm_operations_struct special_mapping_vmops = {
3549 .close = special_mapping_close,
3550 .fault = special_mapping_fault,
3551 .mremap = special_mapping_mremap,
3552 .name = special_mapping_name,
3553 /* vDSO code relies that VVAR can't be accessed remotely */
3554 .access = NULL,
3555 .may_split = special_mapping_split,
3556 };
3557
3558 static const struct vm_operations_struct legacy_special_mapping_vmops = {
3559 .close = special_mapping_close,
3560 .fault = special_mapping_fault,
3561 };
3562
special_mapping_fault(struct vm_fault * vmf)3563 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3564 {
3565 struct vm_area_struct *vma = vmf->vma;
3566 pgoff_t pgoff;
3567 struct page **pages;
3568
3569 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3570 pages = vma->vm_private_data;
3571 } else {
3572 struct vm_special_mapping *sm = vma->vm_private_data;
3573
3574 if (sm->fault)
3575 return sm->fault(sm, vmf->vma, vmf);
3576
3577 pages = sm->pages;
3578 }
3579
3580 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3581 pgoff--;
3582
3583 if (*pages) {
3584 struct page *page = *pages;
3585 get_page(page);
3586 vmf->page = page;
3587 return 0;
3588 }
3589
3590 return VM_FAULT_SIGBUS;
3591 }
3592
__install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,void * priv,const struct vm_operations_struct * ops)3593 static struct vm_area_struct *__install_special_mapping(
3594 struct mm_struct *mm,
3595 unsigned long addr, unsigned long len,
3596 unsigned long vm_flags, void *priv,
3597 const struct vm_operations_struct *ops)
3598 {
3599 int ret;
3600 struct vm_area_struct *vma;
3601
3602 vma = vm_area_alloc(mm);
3603 if (unlikely(vma == NULL))
3604 return ERR_PTR(-ENOMEM);
3605
3606 vma->vm_start = addr;
3607 vma->vm_end = addr + len;
3608
3609 vm_flags_init(vma, (vm_flags | mm->def_flags |
3610 VM_DONTEXPAND | VM_SOFTDIRTY) & ~VM_LOCKED_MASK);
3611 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3612
3613 vma->vm_ops = ops;
3614 vma->vm_private_data = priv;
3615
3616 ret = insert_vm_struct(mm, vma);
3617 if (ret)
3618 goto out;
3619
3620 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3621
3622 perf_event_mmap(vma);
3623
3624 return vma;
3625
3626 out:
3627 vm_area_free(vma);
3628 return ERR_PTR(ret);
3629 }
3630
vma_is_special_mapping(const struct vm_area_struct * vma,const struct vm_special_mapping * sm)3631 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3632 const struct vm_special_mapping *sm)
3633 {
3634 return vma->vm_private_data == sm &&
3635 (vma->vm_ops == &special_mapping_vmops ||
3636 vma->vm_ops == &legacy_special_mapping_vmops);
3637 }
3638
3639 /*
3640 * Called with mm->mmap_lock held for writing.
3641 * Insert a new vma covering the given region, with the given flags.
3642 * Its pages are supplied by the given array of struct page *.
3643 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3644 * The region past the last page supplied will always produce SIGBUS.
3645 * The array pointer and the pages it points to are assumed to stay alive
3646 * for as long as this mapping might exist.
3647 */
_install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,const struct vm_special_mapping * spec)3648 struct vm_area_struct *_install_special_mapping(
3649 struct mm_struct *mm,
3650 unsigned long addr, unsigned long len,
3651 unsigned long vm_flags, const struct vm_special_mapping *spec)
3652 {
3653 return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3654 &special_mapping_vmops);
3655 }
3656
install_special_mapping(struct mm_struct * mm,unsigned long addr,unsigned long len,unsigned long vm_flags,struct page ** pages)3657 int install_special_mapping(struct mm_struct *mm,
3658 unsigned long addr, unsigned long len,
3659 unsigned long vm_flags, struct page **pages)
3660 {
3661 struct vm_area_struct *vma = __install_special_mapping(
3662 mm, addr, len, vm_flags, (void *)pages,
3663 &legacy_special_mapping_vmops);
3664
3665 return PTR_ERR_OR_ZERO(vma);
3666 }
3667
3668 static DEFINE_MUTEX(mm_all_locks_mutex);
3669
vm_lock_anon_vma(struct mm_struct * mm,struct anon_vma * anon_vma)3670 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3671 {
3672 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3673 /*
3674 * The LSB of head.next can't change from under us
3675 * because we hold the mm_all_locks_mutex.
3676 */
3677 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
3678 /*
3679 * We can safely modify head.next after taking the
3680 * anon_vma->root->rwsem. If some other vma in this mm shares
3681 * the same anon_vma we won't take it again.
3682 *
3683 * No need of atomic instructions here, head.next
3684 * can't change from under us thanks to the
3685 * anon_vma->root->rwsem.
3686 */
3687 if (__test_and_set_bit(0, (unsigned long *)
3688 &anon_vma->root->rb_root.rb_root.rb_node))
3689 BUG();
3690 }
3691 }
3692
vm_lock_mapping(struct mm_struct * mm,struct address_space * mapping)3693 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3694 {
3695 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3696 /*
3697 * AS_MM_ALL_LOCKS can't change from under us because
3698 * we hold the mm_all_locks_mutex.
3699 *
3700 * Operations on ->flags have to be atomic because
3701 * even if AS_MM_ALL_LOCKS is stable thanks to the
3702 * mm_all_locks_mutex, there may be other cpus
3703 * changing other bitflags in parallel to us.
3704 */
3705 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3706 BUG();
3707 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
3708 }
3709 }
3710
3711 /*
3712 * This operation locks against the VM for all pte/vma/mm related
3713 * operations that could ever happen on a certain mm. This includes
3714 * vmtruncate, try_to_unmap, and all page faults.
3715 *
3716 * The caller must take the mmap_lock in write mode before calling
3717 * mm_take_all_locks(). The caller isn't allowed to release the
3718 * mmap_lock until mm_drop_all_locks() returns.
3719 *
3720 * mmap_lock in write mode is required in order to block all operations
3721 * that could modify pagetables and free pages without need of
3722 * altering the vma layout. It's also needed in write mode to avoid new
3723 * anon_vmas to be associated with existing vmas.
3724 *
3725 * A single task can't take more than one mm_take_all_locks() in a row
3726 * or it would deadlock.
3727 *
3728 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3729 * mapping->flags avoid to take the same lock twice, if more than one
3730 * vma in this mm is backed by the same anon_vma or address_space.
3731 *
3732 * We take locks in following order, accordingly to comment at beginning
3733 * of mm/rmap.c:
3734 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3735 * hugetlb mapping);
3736 * - all vmas marked locked
3737 * - all i_mmap_rwsem locks;
3738 * - all anon_vma->rwseml
3739 *
3740 * We can take all locks within these types randomly because the VM code
3741 * doesn't nest them and we protected from parallel mm_take_all_locks() by
3742 * mm_all_locks_mutex.
3743 *
3744 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3745 * that may have to take thousand of locks.
3746 *
3747 * mm_take_all_locks() can fail if it's interrupted by signals.
3748 */
mm_take_all_locks(struct mm_struct * mm)3749 int mm_take_all_locks(struct mm_struct *mm)
3750 {
3751 struct vm_area_struct *vma;
3752 struct anon_vma_chain *avc;
3753 MA_STATE(mas, &mm->mm_mt, 0, 0);
3754
3755 mmap_assert_write_locked(mm);
3756
3757 mutex_lock(&mm_all_locks_mutex);
3758
3759 /*
3760 * vma_start_write() does not have a complement in mm_drop_all_locks()
3761 * because vma_start_write() is always asymmetrical; it marks a VMA as
3762 * being written to until mmap_write_unlock() or mmap_write_downgrade()
3763 * is reached.
3764 */
3765 mas_for_each(&mas, vma, ULONG_MAX) {
3766 if (signal_pending(current))
3767 goto out_unlock;
3768 vma_start_write(vma);
3769 }
3770
3771 mas_set(&mas, 0);
3772 mas_for_each(&mas, vma, ULONG_MAX) {
3773 if (signal_pending(current))
3774 goto out_unlock;
3775 if (vma->vm_file && vma->vm_file->f_mapping &&
3776 is_vm_hugetlb_page(vma))
3777 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3778 }
3779
3780 mas_set(&mas, 0);
3781 mas_for_each(&mas, vma, ULONG_MAX) {
3782 if (signal_pending(current))
3783 goto out_unlock;
3784 if (vma->vm_file && vma->vm_file->f_mapping &&
3785 !is_vm_hugetlb_page(vma))
3786 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3787 }
3788
3789 mas_set(&mas, 0);
3790 mas_for_each(&mas, vma, ULONG_MAX) {
3791 if (signal_pending(current))
3792 goto out_unlock;
3793 if (vma->anon_vma)
3794 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3795 vm_lock_anon_vma(mm, avc->anon_vma);
3796 }
3797
3798 return 0;
3799
3800 out_unlock:
3801 mm_drop_all_locks(mm);
3802 return -EINTR;
3803 }
3804
vm_unlock_anon_vma(struct anon_vma * anon_vma)3805 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3806 {
3807 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3808 /*
3809 * The LSB of head.next can't change to 0 from under
3810 * us because we hold the mm_all_locks_mutex.
3811 *
3812 * We must however clear the bitflag before unlocking
3813 * the vma so the users using the anon_vma->rb_root will
3814 * never see our bitflag.
3815 *
3816 * No need of atomic instructions here, head.next
3817 * can't change from under us until we release the
3818 * anon_vma->root->rwsem.
3819 */
3820 if (!__test_and_clear_bit(0, (unsigned long *)
3821 &anon_vma->root->rb_root.rb_root.rb_node))
3822 BUG();
3823 anon_vma_unlock_write(anon_vma);
3824 }
3825 }
3826
vm_unlock_mapping(struct address_space * mapping)3827 static void vm_unlock_mapping(struct address_space *mapping)
3828 {
3829 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3830 /*
3831 * AS_MM_ALL_LOCKS can't change to 0 from under us
3832 * because we hold the mm_all_locks_mutex.
3833 */
3834 i_mmap_unlock_write(mapping);
3835 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3836 &mapping->flags))
3837 BUG();
3838 }
3839 }
3840
3841 /*
3842 * The mmap_lock cannot be released by the caller until
3843 * mm_drop_all_locks() returns.
3844 */
mm_drop_all_locks(struct mm_struct * mm)3845 void mm_drop_all_locks(struct mm_struct *mm)
3846 {
3847 struct vm_area_struct *vma;
3848 struct anon_vma_chain *avc;
3849 MA_STATE(mas, &mm->mm_mt, 0, 0);
3850
3851 mmap_assert_write_locked(mm);
3852 BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3853
3854 mas_for_each(&mas, vma, ULONG_MAX) {
3855 if (vma->anon_vma)
3856 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3857 vm_unlock_anon_vma(avc->anon_vma);
3858 if (vma->vm_file && vma->vm_file->f_mapping)
3859 vm_unlock_mapping(vma->vm_file->f_mapping);
3860 }
3861
3862 mutex_unlock(&mm_all_locks_mutex);
3863 }
3864
3865 /*
3866 * initialise the percpu counter for VM
3867 */
mmap_init(void)3868 void __init mmap_init(void)
3869 {
3870 int ret;
3871
3872 ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3873 VM_BUG_ON(ret);
3874 }
3875
3876 /*
3877 * Initialise sysctl_user_reserve_kbytes.
3878 *
3879 * This is intended to prevent a user from starting a single memory hogging
3880 * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3881 * mode.
3882 *
3883 * The default value is min(3% of free memory, 128MB)
3884 * 128MB is enough to recover with sshd/login, bash, and top/kill.
3885 */
init_user_reserve(void)3886 static int init_user_reserve(void)
3887 {
3888 unsigned long free_kbytes;
3889
3890 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3891
3892 sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3893 return 0;
3894 }
3895 subsys_initcall(init_user_reserve);
3896
3897 /*
3898 * Initialise sysctl_admin_reserve_kbytes.
3899 *
3900 * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3901 * to log in and kill a memory hogging process.
3902 *
3903 * Systems with more than 256MB will reserve 8MB, enough to recover
3904 * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3905 * only reserve 3% of free pages by default.
3906 */
init_admin_reserve(void)3907 static int init_admin_reserve(void)
3908 {
3909 unsigned long free_kbytes;
3910
3911 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3912
3913 sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3914 return 0;
3915 }
3916 subsys_initcall(init_admin_reserve);
3917
3918 /*
3919 * Reinititalise user and admin reserves if memory is added or removed.
3920 *
3921 * The default user reserve max is 128MB, and the default max for the
3922 * admin reserve is 8MB. These are usually, but not always, enough to
3923 * enable recovery from a memory hogging process using login/sshd, a shell,
3924 * and tools like top. It may make sense to increase or even disable the
3925 * reserve depending on the existence of swap or variations in the recovery
3926 * tools. So, the admin may have changed them.
3927 *
3928 * If memory is added and the reserves have been eliminated or increased above
3929 * the default max, then we'll trust the admin.
3930 *
3931 * If memory is removed and there isn't enough free memory, then we
3932 * need to reset the reserves.
3933 *
3934 * Otherwise keep the reserve set by the admin.
3935 */
reserve_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)3936 static int reserve_mem_notifier(struct notifier_block *nb,
3937 unsigned long action, void *data)
3938 {
3939 unsigned long tmp, free_kbytes;
3940
3941 switch (action) {
3942 case MEM_ONLINE:
3943 /* Default max is 128MB. Leave alone if modified by operator. */
3944 tmp = sysctl_user_reserve_kbytes;
3945 if (0 < tmp && tmp < (1UL << 17))
3946 init_user_reserve();
3947
3948 /* Default max is 8MB. Leave alone if modified by operator. */
3949 tmp = sysctl_admin_reserve_kbytes;
3950 if (0 < tmp && tmp < (1UL << 13))
3951 init_admin_reserve();
3952
3953 break;
3954 case MEM_OFFLINE:
3955 free_kbytes = K(global_zone_page_state(NR_FREE_PAGES));
3956
3957 if (sysctl_user_reserve_kbytes > free_kbytes) {
3958 init_user_reserve();
3959 pr_info("vm.user_reserve_kbytes reset to %lu\n",
3960 sysctl_user_reserve_kbytes);
3961 }
3962
3963 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3964 init_admin_reserve();
3965 pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3966 sysctl_admin_reserve_kbytes);
3967 }
3968 break;
3969 default:
3970 break;
3971 }
3972 return NOTIFY_OK;
3973 }
3974
init_reserve_notifier(void)3975 static int __meminit init_reserve_notifier(void)
3976 {
3977 if (hotplug_memory_notifier(reserve_mem_notifier, DEFAULT_CALLBACK_PRI))
3978 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3979
3980 return 0;
3981 }
3982 subsys_initcall(init_reserve_notifier);
3983