Lines Matching refs:vma
77 struct vm_area_struct *vma, struct vm_area_struct *prev,
122 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
124 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
127 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
128 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
133 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
139 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
142 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
144 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
148 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
156 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
158 struct file *file = vma->vm_file; in unlink_file_vma()
163 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
171 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) in remove_vma() argument
173 struct vm_area_struct *next = vma->vm_next; in remove_vma()
176 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
177 vma->vm_ops->close(vma); in remove_vma()
178 if (vma->vm_file) in remove_vma()
179 fput(vma->vm_file); in remove_vma()
180 mpol_put(vma_policy(vma)); in remove_vma()
181 vm_area_free(vma); in remove_vma()
288 static inline unsigned long vma_compute_gap(struct vm_area_struct *vma) in vma_compute_gap() argument
298 gap = vm_start_gap(vma); in vma_compute_gap()
299 if (vma->vm_prev) { in vma_compute_gap()
300 prev_end = vm_end_gap(vma->vm_prev); in vma_compute_gap()
310 static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma) in vma_compute_subtree_gap() argument
312 unsigned long max = vma_compute_gap(vma), subtree_gap; in vma_compute_subtree_gap()
313 if (vma->vm_rb.rb_left) { in vma_compute_subtree_gap()
314 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
319 if (vma->vm_rb.rb_right) { in vma_compute_subtree_gap()
320 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
336 struct vm_area_struct *vma; in browse_rb() local
337 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in browse_rb()
338 if (vma->vm_start < prev) { in browse_rb()
340 vma->vm_start, prev); in browse_rb()
343 if (vma->vm_start < pend) { in browse_rb()
345 vma->vm_start, pend); in browse_rb()
348 if (vma->vm_start > vma->vm_end) { in browse_rb()
350 vma->vm_start, vma->vm_end); in browse_rb()
354 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { in browse_rb()
356 vma->rb_subtree_gap, in browse_rb()
357 vma_compute_subtree_gap(vma)); in browse_rb()
363 prev = vma->vm_start; in browse_rb()
364 pend = vma->vm_end; in browse_rb()
381 struct vm_area_struct *vma; in validate_mm_rb() local
382 vma = rb_entry(nd, struct vm_area_struct, vm_rb); in validate_mm_rb()
383 VM_BUG_ON_VMA(vma != ignore && in validate_mm_rb()
384 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), in validate_mm_rb()
385 vma); in validate_mm_rb()
394 struct vm_area_struct *vma = mm->mmap; in validate_mm() local
396 while (vma) { in validate_mm()
397 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
402 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
407 highest_address = vm_end_gap(vma); in validate_mm()
408 vma = vma->vm_next; in validate_mm()
442 static void vma_gap_update(struct vm_area_struct *vma) in RB_DECLARE_CALLBACKS_MAX()
448 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); in RB_DECLARE_CALLBACKS_MAX()
451 static inline void vma_rb_insert(struct vm_area_struct *vma, in vma_rb_insert() argument
457 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert()
460 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root) in __vma_rb_erase() argument
467 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in __vma_rb_erase()
470 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma, in vma_rb_erase_ignore() argument
481 __vma_rb_erase(vma, root); in vma_rb_erase_ignore()
484 static __always_inline void vma_rb_erase(struct vm_area_struct *vma, in vma_rb_erase() argument
491 validate_mm_rb(root, vma); in vma_rb_erase()
493 __vma_rb_erase(vma, root); in vma_rb_erase()
511 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
515 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
520 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
524 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
566 struct vm_area_struct *vma; in count_vma_pages_range() local
569 vma = find_vma_intersection(mm, addr, end); in count_vma_pages_range()
570 if (!vma) in count_vma_pages_range()
573 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range()
574 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
577 for (vma = vma->vm_next; vma; vma = vma->vm_next) { in count_vma_pages_range()
580 if (vma->vm_start > end) in count_vma_pages_range()
583 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range()
590 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link_rb() argument
594 if (vma->vm_next) in __vma_link_rb()
595 vma_gap_update(vma->vm_next); in __vma_link_rb()
597 mm->highest_vm_end = vm_end_gap(vma); in __vma_link_rb()
608 rb_link_node(&vma->vm_rb, rb_parent, rb_link); in __vma_link_rb()
609 vma->rb_subtree_gap = 0; in __vma_link_rb()
610 vma_gap_update(vma); in __vma_link_rb()
611 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
614 static void __vma_link_file(struct vm_area_struct *vma) in __vma_link_file() argument
618 file = vma->vm_file; in __vma_link_file()
622 if (vma->vm_flags & VM_DENYWRITE) in __vma_link_file()
624 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
628 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
634 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in __vma_link() argument
638 __vma_link_list(mm, vma, prev, rb_parent); in __vma_link()
639 __vma_link_rb(mm, vma, rb_link, rb_parent); in __vma_link()
642 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, in vma_link() argument
648 if (vma->vm_file) { in vma_link()
649 mapping = vma->vm_file->f_mapping; in vma_link()
653 __vma_link(mm, vma, prev, rb_link, rb_parent); in vma_link()
654 __vma_link_file(vma); in vma_link()
667 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in __insert_vm_struct() argument
672 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
675 __vma_link(mm, vma, prev, rb_link, rb_parent); in __insert_vm_struct()
680 struct vm_area_struct *vma, in __vma_unlink_common() argument
687 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); in __vma_unlink_common()
688 next = vma->vm_next; in __vma_unlink_common()
692 prev = vma->vm_prev; in __vma_unlink_common()
706 struct vm_area_struct *vma, in __vma_unlink_prev() argument
709 __vma_unlink_common(mm, vma, prev, true, vma); in __vma_unlink_prev()
719 int __vma_adjust(struct vm_area_struct *vma, unsigned long start, in __vma_adjust() argument
723 struct mm_struct *mm = vma->vm_mm; in __vma_adjust()
724 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; in __vma_adjust()
728 struct file *file = vma->vm_file; in __vma_adjust()
756 swap(vma, next); in __vma_adjust()
758 VM_WARN_ON(expand != vma); in __vma_adjust()
773 importer = vma; in __vma_adjust()
789 importer = vma; in __vma_adjust()
791 } else if (end < vma->vm_end) { in __vma_adjust()
797 adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT); in __vma_adjust()
798 exporter = vma; in __vma_adjust()
823 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in __vma_adjust()
840 anon_vma = vma->anon_vma; in __vma_adjust()
847 anon_vma_interval_tree_pre_update_vma(vma); in __vma_adjust()
854 vma_interval_tree_remove(vma, root); in __vma_adjust()
859 if (start != vma->vm_start) { in __vma_adjust()
860 vma->vm_start = start; in __vma_adjust()
863 if (end != vma->vm_end) { in __vma_adjust()
864 vma->vm_end = end; in __vma_adjust()
867 vma->vm_pgoff = pgoff; in __vma_adjust()
876 vma_interval_tree_insert(vma, root); in __vma_adjust()
886 __vma_unlink_prev(mm, next, vma); in __vma_adjust()
897 __vma_unlink_common(mm, next, NULL, false, vma); in __vma_adjust()
909 vma_gap_update(vma); in __vma_adjust()
912 mm->highest_vm_end = vm_end_gap(vma); in __vma_adjust()
919 anon_vma_interval_tree_post_update_vma(vma); in __vma_adjust()
928 uprobe_mmap(vma); in __vma_adjust()
940 anon_vma_merge(vma, next); in __vma_adjust()
956 next = vma->vm_next; in __vma_adjust()
968 next = vma; in __vma_adjust()
997 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); in __vma_adjust()
1012 static inline int is_mergeable_vma(struct vm_area_struct *vma, in is_mergeable_vma() argument
1025 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
1027 if (vma->vm_file != file) in is_mergeable_vma()
1029 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
1031 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
1033 if (vma_get_anon_name(vma) != anon_name) in is_mergeable_vma()
1040 struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
1046 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
1047 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
1064 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
1070 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_before()
1071 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
1072 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
1086 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
1092 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_after()
1093 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
1095 vm_pglen = vma_pages(vma); in can_vma_merge_after()
1096 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
1302 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1307 near = vma->vm_next; in find_mergeable_anon_vma()
1311 anon_vma = reusable_anon_vma(near, vma, near); in find_mergeable_anon_vma()
1315 near = vma->vm_prev; in find_mergeable_anon_vma()
1319 anon_vma = reusable_anon_vma(near, near, vma); in find_mergeable_anon_vma()
1452 struct vm_area_struct *vma = find_vma(mm, addr); in do_mmap() local
1454 if (vma && vma->vm_start < addr + len) in do_mmap()
1676 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
1678 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify()
1679 const struct vm_operations_struct *vm_ops = vma->vm_ops; in vma_wants_writenotify()
1704 return vma->vm_file && vma->vm_file->f_mapping && in vma_wants_writenotify()
1705 mapping_cap_account_dirty(vma->vm_file->f_mapping); in vma_wants_writenotify()
1729 struct vm_area_struct *vma, *prev; in mmap_region() local
1769 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, in mmap_region()
1771 if (vma) in mmap_region()
1779 vma = vm_area_alloc(mm); in mmap_region()
1780 if (!vma) { in mmap_region()
1785 vma->vm_start = addr; in mmap_region()
1786 vma->vm_end = addr + len; in mmap_region()
1787 vma->vm_flags = vm_flags; in mmap_region()
1788 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
1789 vma->vm_pgoff = pgoff; in mmap_region()
1808 vma->vm_file = get_file(file); in mmap_region()
1809 error = call_mmap(file, vma); in mmap_region()
1820 WARN_ON_ONCE(addr != vma->vm_start); in mmap_region()
1822 addr = vma->vm_start; in mmap_region()
1823 vm_flags = vma->vm_flags; in mmap_region()
1825 error = shmem_zero_setup(vma); in mmap_region()
1829 vma_set_anonymous(vma); in mmap_region()
1832 vma_link(mm, vma, prev, rb_link, rb_parent); in mmap_region()
1840 file = vma->vm_file; in mmap_region()
1842 perf_event_mmap(vma); in mmap_region()
1846 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in mmap_region()
1847 is_vm_hugetlb_page(vma) || in mmap_region()
1848 vma == get_gate_vma(current->mm)) in mmap_region()
1849 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in mmap_region()
1855 uprobe_mmap(vma); in mmap_region()
1864 vma->vm_flags |= VM_SOFTDIRTY; in mmap_region()
1866 vma_set_page_prot(vma); in mmap_region()
1871 vma->vm_file = NULL; in mmap_region()
1875 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1883 vm_area_free(vma); in mmap_region()
1901 struct vm_area_struct *vma; in unmapped_area() local
1921 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1922 if (vma->rb_subtree_gap < length) in unmapped_area()
1927 gap_end = vm_start_gap(vma); in unmapped_area()
1928 if (gap_end >= low_limit && vma->vm_rb.rb_left) { in unmapped_area()
1930 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
1933 vma = left; in unmapped_area()
1938 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area()
1948 if (vma->vm_rb.rb_right) { in unmapped_area()
1950 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
1953 vma = right; in unmapped_area()
1960 struct rb_node *prev = &vma->vm_rb; in unmapped_area()
1963 vma = rb_entry(rb_parent(prev), in unmapped_area()
1965 if (prev == vma->vm_rb.rb_left) { in unmapped_area()
1966 gap_start = vm_end_gap(vma->vm_prev); in unmapped_area()
1967 gap_end = vm_start_gap(vma); in unmapped_area()
1996 struct vm_area_struct *vma; in unmapped_area_topdown() local
2025 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
2026 if (vma->rb_subtree_gap < length) in unmapped_area_topdown()
2031 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2032 if (gap_start <= high_limit && vma->vm_rb.rb_right) { in unmapped_area_topdown()
2034 rb_entry(vma->vm_rb.rb_right, in unmapped_area_topdown()
2037 vma = right; in unmapped_area_topdown()
2044 gap_end = vm_start_gap(vma); in unmapped_area_topdown()
2052 if (vma->vm_rb.rb_left) { in unmapped_area_topdown()
2054 rb_entry(vma->vm_rb.rb_left, in unmapped_area_topdown()
2057 vma = left; in unmapped_area_topdown()
2064 struct rb_node *prev = &vma->vm_rb; in unmapped_area_topdown()
2067 vma = rb_entry(rb_parent(prev), in unmapped_area_topdown()
2069 if (prev == vma->vm_rb.rb_right) { in unmapped_area_topdown()
2070 gap_start = vma->vm_prev ? in unmapped_area_topdown()
2071 vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2118 struct vm_area_struct *vma, *prev; in arch_get_unmapped_area() local
2130 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area()
2132 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area()
2156 struct vm_area_struct *vma, *prev; in arch_get_unmapped_area_topdown() local
2171 vma = find_vma_prev(mm, addr, &prev); in arch_get_unmapped_area_topdown()
2173 (!vma || addr + len <= vm_start_gap(vma)) && in arch_get_unmapped_area_topdown()
2251 struct vm_area_struct *vma; in find_vma() local
2254 vma = vmacache_find(mm, addr); in find_vma()
2255 if (likely(vma)) in find_vma()
2256 return vma; in find_vma()
2266 vma = tmp; in find_vma()
2274 if (vma) in find_vma()
2275 vmacache_update(addr, vma); in find_vma()
2276 return vma; in find_vma()
2288 struct vm_area_struct *vma; in find_vma_prev() local
2290 vma = find_vma(mm, addr); in find_vma_prev()
2291 if (vma) { in find_vma_prev()
2292 *pprev = vma->vm_prev; in find_vma_prev()
2298 return vma; in find_vma_prev()
2306 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
2309 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2313 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2321 if (vma->vm_flags & VM_LOCKED) { in acct_stack_growth()
2332 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2333 vma->vm_end - size; in acct_stack_growth()
2334 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2352 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
2354 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2359 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2375 next = vma->vm_next; in expand_upwards()
2384 if (unlikely(anon_vma_prepare(vma))) in expand_upwards()
2392 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2395 if (address > vma->vm_end) { in expand_upwards()
2398 size = address - vma->vm_start; in expand_upwards()
2399 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2402 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2403 error = acct_stack_growth(vma, size, grow); in expand_upwards()
2417 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
2419 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2420 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
2421 vma->vm_end = address; in expand_upwards()
2422 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
2423 if (vma->vm_next) in expand_upwards()
2424 vma_gap_update(vma->vm_next); in expand_upwards()
2426 mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2429 perf_event_mmap(vma); in expand_upwards()
2433 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2434 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_upwards()
2443 int expand_downwards(struct vm_area_struct *vma, in expand_downwards() argument
2446 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2455 prev = vma->vm_prev; in expand_downwards()
2464 if (unlikely(anon_vma_prepare(vma))) in expand_downwards()
2472 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2475 if (address < vma->vm_start) { in expand_downwards()
2478 size = vma->vm_end - address; in expand_downwards()
2479 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2482 if (grow <= vma->vm_pgoff) { in expand_downwards()
2483 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2497 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2499 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2500 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2501 vma->vm_start = address; in expand_downwards()
2502 vma->vm_pgoff -= grow; in expand_downwards()
2503 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2504 vma_gap_update(vma); in expand_downwards()
2507 perf_event_mmap(vma); in expand_downwards()
2511 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2512 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_downwards()
2534 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2536 return expand_upwards(vma, address); in expand_stack()
2542 struct vm_area_struct *vma, *prev; in find_extend_vma() local
2545 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2546 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2547 return vma; in find_extend_vma()
2556 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2558 return expand_downwards(vma, address); in expand_stack()
2564 struct vm_area_struct *vma; in find_extend_vma() local
2568 vma = find_vma(mm, addr); in find_extend_vma()
2569 if (!vma) in find_extend_vma()
2571 if (vma->vm_start <= addr) in find_extend_vma()
2572 return vma; in find_extend_vma()
2573 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2578 start = vma->vm_start; in find_extend_vma()
2579 if (expand_stack(vma, addr)) in find_extend_vma()
2581 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2582 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2583 return vma; in find_extend_vma()
2595 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma) in remove_vma_list() argument
2602 long nrpages = vma_pages(vma); in remove_vma_list()
2604 if (vma->vm_flags & VM_ACCOUNT) in remove_vma_list()
2606 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_vma_list()
2607 vma = remove_vma(vma); in remove_vma_list()
2608 } while (vma); in remove_vma_list()
2619 struct vm_area_struct *vma, struct vm_area_struct *prev, in unmap_region() argument
2628 unmap_vmas(&tlb, vma, start, end); in unmap_region()
2629 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2639 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, in detach_vmas_to_be_unmapped() argument
2646 vma->vm_prev = NULL; in detach_vmas_to_be_unmapped()
2648 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2650 tail_vma = vma; in detach_vmas_to_be_unmapped()
2651 vma = vma->vm_next; in detach_vmas_to_be_unmapped()
2652 } while (vma && vma->vm_start < end); in detach_vmas_to_be_unmapped()
2653 *insertion_point = vma; in detach_vmas_to_be_unmapped()
2654 if (vma) { in detach_vmas_to_be_unmapped()
2655 vma->vm_prev = prev; in detach_vmas_to_be_unmapped()
2656 vma_gap_update(vma); in detach_vmas_to_be_unmapped()
2669 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2675 if (vma->vm_ops && vma->vm_ops->split) { in __split_vma()
2676 err = vma->vm_ops->split(vma, addr); in __split_vma()
2681 new = vm_area_dup(vma); in __split_vma()
2689 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2692 err = vma_dup_policy(vma, new); in __split_vma()
2696 err = anon_vma_clone(new, vma); in __split_vma()
2707 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2710 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2733 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2739 return __split_vma(mm, vma, addr, new_below); in split_vma()
2751 struct vm_area_struct *vma, *prev, *last; in __do_munmap() local
2769 vma = find_vma(mm, start); in __do_munmap()
2770 if (!vma) in __do_munmap()
2772 prev = vma->vm_prev; in __do_munmap()
2776 if (vma->vm_start >= end) in __do_munmap()
2786 if (start > vma->vm_start) { in __do_munmap()
2794 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in __do_munmap()
2797 error = __split_vma(mm, vma, start, 0); in __do_munmap()
2800 prev = vma; in __do_munmap()
2810 vma = prev ? prev->vm_next : mm->mmap; in __do_munmap()
2822 int error = userfaultfd_unmap_prep(vma, start, end, uf); in __do_munmap()
2831 struct vm_area_struct *tmp = vma; in __do_munmap()
2843 detach_vmas_to_be_unmapped(mm, vma, prev, end); in __do_munmap()
2848 unmap_region(mm, vma, prev, start, end); in __do_munmap()
2851 remove_vma_list(mm, vma); in __do_munmap()
2909 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
2932 vma = find_vma(mm, start); in SYSCALL_DEFINE5()
2934 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
2937 if (start < vma->vm_start) in SYSCALL_DEFINE5()
2940 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
2943 for (next = vma->vm_next; next; next = next->vm_next) { in SYSCALL_DEFINE5()
2948 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
2951 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
2962 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
2963 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
2964 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
2968 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
2973 for (tmp = vma; tmp->vm_start >= start + size; in SYSCALL_DEFINE5()
2987 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
2988 ret = do_mmap_pgoff(vma->vm_file, start, size, in SYSCALL_DEFINE5()
3008 struct vm_area_struct *vma, *prev; in do_brk_flags() local
3046 vma = vma_merge(mm, prev, addr, addr + len, flags, in do_brk_flags()
3048 if (vma) in do_brk_flags()
3054 vma = vm_area_alloc(mm); in do_brk_flags()
3055 if (!vma) { in do_brk_flags()
3060 vma_set_anonymous(vma); in do_brk_flags()
3061 vma->vm_start = addr; in do_brk_flags()
3062 vma->vm_end = addr + len; in do_brk_flags()
3063 vma->vm_pgoff = pgoff; in do_brk_flags()
3064 vma->vm_flags = flags; in do_brk_flags()
3065 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
3066 vma_link(mm, vma, prev, rb_link, rb_parent); in do_brk_flags()
3068 perf_event_mmap(vma); in do_brk_flags()
3073 vma->vm_flags |= VM_SOFTDIRTY; in do_brk_flags()
3114 struct vm_area_struct *vma; in exit_mmap() local
3145 vma = mm->mmap; in exit_mmap()
3146 while (vma) { in exit_mmap()
3147 if (vma->vm_flags & VM_LOCKED) in exit_mmap()
3148 munlock_vma_pages_all(vma); in exit_mmap()
3149 vma = vma->vm_next; in exit_mmap()
3155 vma = mm->mmap; in exit_mmap()
3156 if (!vma) /* Can happen if dup_mmap() received an OOM */ in exit_mmap()
3164 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
3165 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING); in exit_mmap()
3172 while (vma) { in exit_mmap()
3173 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
3174 nr_accounted += vma_pages(vma); in exit_mmap()
3175 vma = remove_vma(vma); in exit_mmap()
3184 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3189 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
3192 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3193 security_vm_enough_memory_mm(mm, vma_pages(vma))) in insert_vm_struct()
3208 if (vma_is_anonymous(vma)) { in insert_vm_struct()
3209 BUG_ON(vma->anon_vma); in insert_vm_struct()
3210 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3213 vma_link(mm, vma, prev, rb_link, rb_parent); in insert_vm_struct()
3225 struct vm_area_struct *vma = *vmap; in copy_vma() local
3226 unsigned long vma_start = vma->vm_start; in copy_vma()
3227 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3236 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
3243 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3244 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in copy_vma()
3245 vma->vm_userfaultfd_ctx, vma_get_anon_name(vma)); in copy_vma()
3265 *vmap = vma = new_vma; in copy_vma()
3267 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3269 new_vma = vm_area_dup(vma); in copy_vma()
3275 if (vma_dup_policy(vma, new_vma)) in copy_vma()
3277 if (anon_vma_clone(new_vma, vma)) in copy_vma()
3342 static void special_mapping_close(struct vm_area_struct *vma) in special_mapping_close() argument
3346 static const char *special_mapping_name(struct vm_area_struct *vma) in special_mapping_name() argument
3348 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3378 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() local
3382 if (vma->vm_ops == &legacy_special_mapping_vmops) { in special_mapping_fault()
3383 pages = vma->vm_private_data; in special_mapping_fault()
3385 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
3388 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3413 struct vm_area_struct *vma; in __install_special_mapping() local
3415 vma = vm_area_alloc(mm); in __install_special_mapping()
3416 if (unlikely(vma == NULL)) in __install_special_mapping()
3419 vma->vm_start = addr; in __install_special_mapping()
3420 vma->vm_end = addr + len; in __install_special_mapping()
3422 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3423 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3425 vma->vm_ops = ops; in __install_special_mapping()
3426 vma->vm_private_data = priv; in __install_special_mapping()
3428 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3432 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3434 perf_event_mmap(vma); in __install_special_mapping()
3436 return vma; in __install_special_mapping()
3439 vm_area_free(vma); in __install_special_mapping()
3443 bool vma_is_special_mapping(const struct vm_area_struct *vma, in vma_is_special_mapping() argument
3446 return vma->vm_private_data == sm && in vma_is_special_mapping()
3447 (vma->vm_ops == &special_mapping_vmops || in vma_is_special_mapping()
3448 vma->vm_ops == &legacy_special_mapping_vmops); in vma_is_special_mapping()
3473 struct vm_area_struct *vma = __install_special_mapping( in install_special_mapping() local
3477 return PTR_ERR_OR_ZERO(vma); in install_special_mapping()
3562 struct vm_area_struct *vma; in mm_take_all_locks() local
3569 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3572 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3573 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3574 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3577 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3580 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3581 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3582 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3585 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3588 if (vma->anon_vma) in mm_take_all_locks()
3589 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3642 struct vm_area_struct *vma; in mm_drop_all_locks() local
3648 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()
3649 if (vma->anon_vma) in mm_drop_all_locks()
3650 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3652 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3653 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()