• Home
  • Raw
  • Download

Lines Matching +full:trim +full:- +full:hs +full:- +full:current

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/backing-dev.h>
90 /* description of effects of mapping type and prot in current implementation.
131 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
134 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
137 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
142 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */ in vma_set_page_prot()
143 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
147 * Requires inode->i_mapping->i_mmap_rwsem
152 if (vma->vm_flags & VM_DENYWRITE) in __remove_shared_vm_struct()
154 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
158 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
163 * Unlink a file-based vm structure from its interval tree, to hide
168 struct file *file = vma->vm_file; in unlink_file_vma()
171 struct address_space *mapping = file->f_mapping; in unlink_file_vma()
183 struct vm_area_struct *next = vma->vm_next; in remove_vma()
186 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
187 vma->vm_ops->close(vma); in remove_vma()
188 if (vma->vm_file) in remove_vma()
189 fput(vma->vm_file); in remove_vma()
201 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1()
209 return -EINTR; in SYSCALL_DEFINE1()
211 origbrk = mm->brk; in SYSCALL_DEFINE1()
216 * randomize_va_space to 2, which will still cause mm->start_brk in SYSCALL_DEFINE1()
219 if (current->brk_randomized) in SYSCALL_DEFINE1()
220 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
222 min_brk = mm->end_data; in SYSCALL_DEFINE1()
224 min_brk = mm->start_brk; in SYSCALL_DEFINE1()
233 * not page aligned -Ram Gupta in SYSCALL_DEFINE1()
235 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1()
236 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1()
240 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1()
242 mm->brk = brk; in SYSCALL_DEFINE1()
250 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
254 * mm->brk must to be protected by write mmap_lock so update it in SYSCALL_DEFINE1()
256 * mm->brk will be restored from origbrk. in SYSCALL_DEFINE1()
258 mm->brk = brk; in SYSCALL_DEFINE1()
259 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true); in SYSCALL_DEFINE1()
261 mm->brk = origbrk; in SYSCALL_DEFINE1()
274 /* Ok, looks good - let it rip. */ in SYSCALL_DEFINE1()
275 if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0) in SYSCALL_DEFINE1()
277 mm->brk = brk; in SYSCALL_DEFINE1()
280 populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0; in SYSCALL_DEFINE1()
287 mm_populate(oldbrk, newbrk - oldbrk); in SYSCALL_DEFINE1()
307 if (vma->vm_prev) { in vma_compute_gap()
308 prev_end = vm_end_gap(vma->vm_prev); in vma_compute_gap()
310 gap -= prev_end; in vma_compute_gap()
321 if (vma->vm_rb.rb_left) { in vma_compute_subtree_gap()
322 subtree_gap = rb_entry(vma->vm_rb.rb_left, in vma_compute_subtree_gap()
323 struct vm_area_struct, vm_rb)->rb_subtree_gap; in vma_compute_subtree_gap()
327 if (vma->vm_rb.rb_right) { in vma_compute_subtree_gap()
328 subtree_gap = rb_entry(vma->vm_rb.rb_right, in vma_compute_subtree_gap()
329 struct vm_area_struct, vm_rb)->rb_subtree_gap; in vma_compute_subtree_gap()
338 struct rb_root *root = &mm->mm_rb; in browse_rb()
346 if (vma->vm_start < prev) { in browse_rb()
348 vma->vm_start, prev); in browse_rb()
351 if (vma->vm_start < pend) { in browse_rb()
353 vma->vm_start, pend); in browse_rb()
356 if (vma->vm_start > vma->vm_end) { in browse_rb()
358 vma->vm_start, vma->vm_end); in browse_rb()
361 spin_lock(&mm->page_table_lock); in browse_rb()
362 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { in browse_rb()
364 vma->rb_subtree_gap, in browse_rb()
368 spin_unlock(&mm->page_table_lock); in browse_rb()
371 prev = vma->vm_start; in browse_rb()
372 pend = vma->vm_end; in browse_rb()
381 return bug ? -1 : i; in browse_rb()
392 vma->rb_subtree_gap != vma_compute_subtree_gap(vma), in validate_mm_rb()
402 struct vm_area_struct *vma = mm->mmap; in validate_mm()
405 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
410 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
416 vma = vma->vm_next; in validate_mm()
419 if (i != mm->map_count) { in validate_mm()
420 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); in validate_mm()
423 if (highest_address != mm->highest_vm_end) { in validate_mm()
424 pr_emerg("mm->highest_vm_end %lx, found %lx\n", in validate_mm()
425 mm->highest_vm_end, highest_address); in validate_mm()
429 if (i != mm->map_count) { in validate_mm()
430 if (i != -1) in validate_mm()
431 pr_emerg("map_count %d rb %d\n", mm->map_count, i); in validate_mm()
446 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or in RB_DECLARE_CALLBACKS_MAX()
447 * vma->vm_prev->vm_end values changed, without modifying the vma's position in RB_DECLARE_CALLBACKS_MAX()
456 vma_gap_callbacks_propagate(&vma->vm_rb, NULL); in RB_DECLARE_CALLBACKS_MAX()
465 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in vma_rb_insert()
475 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks); in __vma_rb_erase()
486 * a. the "next" vma being erased if next->vm_start was reduced in in vma_rb_erase_ignore()
487 * __vma_adjust() -> __vma_unlink() in vma_rb_erase_ignore()
488 * b. the vma being erased in detach_vmas_to_be_unmapped() -> in vma_rb_erase_ignore()
521 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
522 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); in anon_vma_interval_tree_pre_update_vma()
530 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
531 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); in anon_vma_interval_tree_post_update_vma()
540 __rb_link = &mm->mm_rb.rb_node; in find_vma_links()
549 if (vma_tmp->vm_end > addr) { in find_vma_links()
551 if (vma_tmp->vm_start < end) in find_vma_links()
552 return -ENOMEM; in find_vma_links()
553 __rb_link = &__rb_parent->rb_left; in find_vma_links()
556 __rb_link = &__rb_parent->rb_right; in find_vma_links()
569 * vma_next() - Get the next VMA.
571 * @vma: The current vma.
581 return mm->mmap; in vma_next()
583 return vma->vm_next; in vma_next()
587 * munmap_vma_range() - munmap VMAs that overlap a range.
598 * Returns: -ENOMEM on munmap failure or 0 on success.
608 return -ENOMEM; in munmap_vma_range()
623 nr_pages = (min(end, vma->vm_end) - in count_vma_pages_range()
624 max(addr, vma->vm_start)) >> PAGE_SHIFT; in count_vma_pages_range()
627 for (vma = vma->vm_next; vma; vma = vma->vm_next) { in count_vma_pages_range()
630 if (vma->vm_start > end) in count_vma_pages_range()
633 overlap_len = min(end, vma->vm_end) - vma->vm_start; in count_vma_pages_range()
644 if (vma->vm_next) in __vma_link_rb()
645 vma_gap_update(vma->vm_next); in __vma_link_rb()
647 mm->highest_vm_end = vm_end_gap(vma); in __vma_link_rb()
650 * vma->vm_prev wasn't known when we followed the rbtree to find the in __vma_link_rb()
658 rb_link_node(&vma->vm_rb, rb_parent, rb_link); in __vma_link_rb()
659 vma->rb_subtree_gap = 0; in __vma_link_rb()
661 vma_rb_insert(vma, &mm->mm_rb); in __vma_link_rb()
668 file = vma->vm_file; in __vma_link_file()
670 struct address_space *mapping = file->f_mapping; in __vma_link_file()
672 if (vma->vm_flags & VM_DENYWRITE) in __vma_link_file()
674 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
678 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
698 if (vma->vm_file) { in vma_link()
699 mapping = vma->vm_file->f_mapping; in vma_link()
709 mm->map_count++; in vma_link()
722 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in __insert_vm_struct()
726 mm->map_count++; in __insert_vm_struct()
733 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore); in __vma_unlink()
750 struct mm_struct *mm = vma->vm_mm; in __vma_adjust()
751 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma; in __vma_adjust()
755 struct file *file = vma->vm_file; in __vma_adjust()
763 if (end >= next->vm_end) { in __vma_adjust()
775 VM_WARN_ON(end != next->vm_end); in __vma_adjust()
782 VM_WARN_ON(file != next->vm_file); in __vma_adjust()
790 remove_next = 1 + (end > next->vm_end); in __vma_adjust()
792 end != next->vm_next->vm_end); in __vma_adjust()
793 /* trim end to next, for case 6 first pass */ in __vma_adjust()
794 end = next->vm_end; in __vma_adjust()
804 if (remove_next == 2 && !next->anon_vma) in __vma_adjust()
805 exporter = next->vm_next; in __vma_adjust()
807 } else if (end > next->vm_start) { in __vma_adjust()
812 adjust_next = (end - next->vm_start); in __vma_adjust()
816 } else if (end < vma->vm_end) { in __vma_adjust()
822 adjust_next = -(vma->vm_end - end); in __vma_adjust()
833 if (exporter && exporter->anon_vma && !importer->anon_vma) { in __vma_adjust()
836 importer->anon_vma = exporter->anon_vma; in __vma_adjust()
846 mapping = file->f_mapping; in __vma_adjust()
847 root = &mapping->i_mmap; in __vma_adjust()
848 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in __vma_adjust()
851 uprobe_munmap(next, next->vm_start, next->vm_end); in __vma_adjust()
865 anon_vma = vma->anon_vma; in __vma_adjust()
867 anon_vma = next->anon_vma; in __vma_adjust()
869 VM_WARN_ON(adjust_next && next->anon_vma && in __vma_adjust()
870 anon_vma != next->anon_vma); in __vma_adjust()
884 if (start != vma->vm_start) { in __vma_adjust()
885 vma->vm_start = start; in __vma_adjust()
888 if (end != vma->vm_end) { in __vma_adjust()
889 vma->vm_end = end; in __vma_adjust()
892 vma->vm_pgoff = pgoff; in __vma_adjust()
894 next->vm_start += adjust_next; in __vma_adjust()
895 next->vm_pgoff += adjust_next >> PAGE_SHIFT; in __vma_adjust()
917 * pre-swap() next->vm_start was reduced so in __vma_adjust()
918 * tell validate_mm_rb to ignore pre-swap() in __vma_adjust()
919 * "next" (which is stored in post-swap() in __vma_adjust()
937 mm->highest_vm_end = vm_end_gap(vma); in __vma_adjust()
960 uprobe_munmap(next, next->vm_start, next->vm_end); in __vma_adjust()
963 if (next->anon_vma) in __vma_adjust()
965 mm->map_count--; in __vma_adjust()
975 * If "next" was removed and vma->vm_end was in __vma_adjust()
977 * "next->vm_prev->vm_end" changed and the in __vma_adjust()
978 * "vma->vm_next" gap must be updated. in __vma_adjust()
980 next = vma->vm_next; in __vma_adjust()
984 * "vma" considered pre-swap(): if "vma" was in __vma_adjust()
985 * removed, next->vm_start was expanded (down) in __vma_adjust()
987 * Because of the swap() the post-swap() "vma" in __vma_adjust()
988 * actually points to pre-swap() "next" in __vma_adjust()
989 * (post-swap() "next" as opposed is now a in __vma_adjust()
996 end = next->vm_end; in __vma_adjust()
1007 * path because pre-swap() next is always not in __vma_adjust()
1008 * NULL. pre-swap() "next" is not being in __vma_adjust()
1009 * removed and its next->vm_end is not altered in __vma_adjust()
1011 * next->vm_end in remove_next == 3). in __vma_adjust()
1016 * case next->vm_end == "end" and the extended in __vma_adjust()
1017 * "vma" has vma->vm_end == next->vm_end so in __vma_adjust()
1018 * mm->highest_vm_end doesn't need any update in __vma_adjust()
1021 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma)); in __vma_adjust()
1033 * If the vma has a ->close operation then the driver probably needs to release
1034 * per-vma resources, so we don't attempt to merge those.
1043 * match the flags but dirty bit -- the caller should mark in is_mergeable_vma()
1049 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
1051 if (vma->vm_file != file) in is_mergeable_vma()
1053 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
1071 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
1080 * We cannot merge two vmas if they have differently assigned (non-NULL)
1085 * wrap, nor mmaps which cover the final page at index -1UL.
1095 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
1096 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
1106 * We cannot merge two vmas if they have differently assigned (non-NULL)
1117 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
1120 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
1131 * In most cases - when called for mmap, brk or mremap - [addr,end) is
1135 * this area are about to be changed to vm_flags - and the no-change
1177 pgoff_t pglen = (end - addr) >> PAGE_SHIFT; in vma_merge()
1182 * We later require that vma->vm_flags == vm_flags, in vma_merge()
1183 * so this tests vma->vm_flags & VM_SPECIAL, too. in vma_merge()
1190 if (area && area->vm_end == end) /* cases 6, 7, 8 */ in vma_merge()
1191 next = next->vm_next; in vma_merge()
1194 VM_WARN_ON(prev && addr <= prev->vm_start); in vma_merge()
1195 VM_WARN_ON(area && end > area->vm_end); in vma_merge()
1201 if (prev && prev->vm_end == addr && in vma_merge()
1209 if (next && end == next->vm_start && in vma_merge()
1215 is_mergeable_anon_vma(prev->anon_vma, in vma_merge()
1216 next->anon_vma, NULL)) { in vma_merge()
1218 err = __vma_adjust(prev, prev->vm_start, in vma_merge()
1219 next->vm_end, prev->vm_pgoff, NULL, in vma_merge()
1222 err = __vma_adjust(prev, prev->vm_start, in vma_merge()
1223 end, prev->vm_pgoff, NULL, prev); in vma_merge()
1233 if (next && end == next->vm_start && in vma_merge()
1238 if (prev && addr < prev->vm_end) /* case 4 */ in vma_merge()
1239 err = __vma_adjust(prev, prev->vm_start, in vma_merge()
1240 addr, prev->vm_pgoff, NULL, next); in vma_merge()
1242 err = __vma_adjust(area, addr, next->vm_end, in vma_merge()
1243 next->vm_pgoff - pglen, NULL, next); in vma_merge()
1269 * there is a vm_ops->close() function, because that indicates that the
1275 return a->vm_end == b->vm_start && in anon_vma_compatible()
1277 a->vm_file == b->vm_file && in anon_vma_compatible()
1278 !((a->vm_flags ^ b->vm_flags) & ~(VM_ACCESS_FLAGS | VM_SOFTDIRTY)) && in anon_vma_compatible()
1279 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT); in anon_vma_compatible()
1283 * Do some basic sanity checking to see if we can re-use the anon_vma
1284 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1293 * we do that READ_ONCE() to make sure that we never re-load the pointer.
1307 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma); in reusable_anon_vma()
1309 if (anon_vma && list_is_singular(&old->anon_vma_chain)) in reusable_anon_vma()
1328 if (vma->vm_next) { in find_mergeable_anon_vma()
1329 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next); in find_mergeable_anon_vma()
1335 if (vma->vm_prev) in find_mergeable_anon_vma()
1336 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma); in find_mergeable_anon_vma()
1373 locked += mm->locked_vm; in mlock_future_check()
1377 return -EAGAIN; in mlock_future_check()
1384 if (S_ISREG(inode->i_mode)) in file_mmap_size_max()
1387 if (S_ISBLK(inode->i_mode)) in file_mmap_size_max()
1390 if (S_ISSOCK(inode->i_mode)) in file_mmap_size_max()
1394 if (file->f_mode & FMODE_UNSIGNED_OFFSET) in file_mmap_size_max()
1408 maxsize -= len; in file_mmap_ok()
1415 * The caller must write-lock current->mm->mmap_lock.
1422 struct mm_struct *mm = current->mm; in do_mmap()
1430 return -EINVAL; in do_mmap()
1438 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) in do_mmap()
1439 if (!(file && path_noexec(&file->f_path))) in do_mmap()
1452 return -ENOMEM; in do_mmap()
1456 return -EOVERFLOW; in do_mmap()
1459 if (mm->map_count > sysctl_max_map_count) in do_mmap()
1460 return -ENOMEM; in do_mmap()
1472 if (vma && vma->vm_start < addr + len) in do_mmap()
1473 return -EEXIST; in do_mmap()
1482 /* Do simple checking here so the lower-level routines won't have in do_mmap()
1487 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; in do_mmap()
1495 return -EPERM; in do_mmap()
1498 return -EAGAIN; in do_mmap()
1505 return -EOVERFLOW; in do_mmap()
1507 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags; in do_mmap()
1512 * Force use of MAP_SHARED_VALIDATE with non-legacy in do_mmap()
1522 return -EOPNOTSUPP; in do_mmap()
1524 if (!(file->f_mode & FMODE_WRITE)) in do_mmap()
1525 return -EACCES; in do_mmap()
1526 if (IS_SWAPFILE(file->f_mapping->host)) in do_mmap()
1527 return -ETXTBSY; in do_mmap()
1531 * Make sure we don't allow writing to an append-only in do_mmap()
1534 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE)) in do_mmap()
1535 return -EACCES; in do_mmap()
1541 return -EAGAIN; in do_mmap()
1544 if (!(file->f_mode & FMODE_WRITE)) in do_mmap()
1548 if (!(file->f_mode & FMODE_READ)) in do_mmap()
1549 return -EACCES; in do_mmap()
1550 if (path_noexec(&file->f_path)) { in do_mmap()
1552 return -EPERM; in do_mmap()
1556 if (!file->f_op->mmap) in do_mmap()
1557 return -ENODEV; in do_mmap()
1559 return -EINVAL; in do_mmap()
1563 return -EINVAL; in do_mmap()
1569 return -EINVAL; in do_mmap()
1593 return -EINVAL; in do_mmap()
1630 return -EBADF; in ksys_mmap_pgoff()
1634 retval = -EINVAL; in ksys_mmap_pgoff()
1639 struct hstate *hs; in ksys_mmap_pgoff() local
1641 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); in ksys_mmap_pgoff()
1642 if (!hs) in ksys_mmap_pgoff()
1643 return -EINVAL; in ksys_mmap_pgoff()
1645 len = ALIGN(len, huge_page_size(hs)); in ksys_mmap_pgoff()
1648 * taken when vm_ops->mmap() is called in ksys_mmap_pgoff()
1665 CALL_HCK_LITE_HOOK(check_jit_memory_lhck, current, fd, prot, flags, PAGE_ALIGN(len), &retval); in ksys_mmap_pgoff()
1697 return -EFAULT; in SYSCALL_DEFINE1()
1699 return -EINVAL; in SYSCALL_DEFINE1()
1707 * Some shared mappings will want the pages marked read-only
1714 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify()
1715 const struct vm_operations_struct *vm_ops = vma->vm_ops; in vma_wants_writenotify()
1717 /* If it was private or non-writable, the write bit is already clear */ in vma_wants_writenotify()
1722 if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) in vma_wants_writenotify()
1744 return vma->vm_file && vma->vm_file->f_mapping && in vma_wants_writenotify()
1745 mapping_can_writeback(vma->vm_file->f_mapping); in vma_wants_writenotify()
1768 struct mm_struct *mm = current->mm; in mmap_region()
1785 (len >> PAGE_SHIFT) - nr_pages)) in mmap_region()
1786 return -ENOMEM; in mmap_region()
1791 return -ENOMEM; in mmap_region()
1798 return -ENOMEM; in mmap_region()
1817 error = -ENOMEM; in mmap_region()
1821 vma->vm_start = addr; in mmap_region()
1822 vma->vm_end = addr + len; in mmap_region()
1823 vma->vm_flags = vm_flags; in mmap_region()
1824 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
1825 vma->vm_pgoff = pgoff; in mmap_region()
1834 error = mapping_map_writable(file->f_mapping); in mmap_region()
1839 /* ->mmap() can change vma->vm_file, but must guarantee that in mmap_region()
1840 * vma_link() below can deny write-access if VM_DENYWRITE is set in mmap_region()
1842 * new file must not have been exposed to user-space, yet. in mmap_region()
1844 vma->vm_file = get_file(file); in mmap_region()
1852 * f_op->mmap method. -DaveM in mmap_region()
1856 WARN_ON_ONCE(addr != vma->vm_start); in mmap_region()
1858 addr = vma->vm_start; in mmap_region()
1863 if (unlikely(vm_flags != vma->vm_flags && prev)) { in mmap_region()
1864 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, in mmap_region()
1865 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); in mmap_region()
1867 /* ->mmap() can change vma->vm_file and fput the original file. So in mmap_region()
1868 * fput the vma->vm_file here or we would add an extra fput for file in mmap_region()
1871 fput(vma->vm_file); in mmap_region()
1875 vm_flags = vma->vm_flags; in mmap_region()
1880 vm_flags = vma->vm_flags; in mmap_region()
1889 /* Allow architectures to sanity-check the vma */ in mmap_region()
1891 !arch_validate_flags(vma->vm_flags)) { in mmap_region()
1892 error = -EINVAL; in mmap_region()
1904 mapping_unmap_writable(file->f_mapping); in mmap_region()
1908 file = vma->vm_file; in mmap_region()
1916 vma == get_gate_vma(current->mm)) in mmap_region()
1917 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in mmap_region()
1919 mm->locked_vm += (len >> PAGE_SHIFT); in mmap_region()
1927 * Otherwise user-space soft-dirty page tracker won't in mmap_region()
1929 * then new mapped in-place (which must be aimed as in mmap_region()
1932 vma->vm_flags |= VM_SOFTDIRTY; in mmap_region()
1939 if (vma->vm_ops && vma->vm_ops->close) in mmap_region()
1940 vma->vm_ops->close(vma); in mmap_region()
1942 vma->vm_file = NULL; in mmap_region()
1946 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); in mmap_region()
1948 mapping_unmap_writable(file->f_mapping); in mmap_region()
1965 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length; in unmapped_area()
1966 * - gap_end = vma->vm_start >= info->low_limit + length; in unmapped_area()
1967 * - gap_end - gap_start >= length in unmapped_area()
1970 struct mm_struct *mm = current->mm; in unmapped_area()
1975 length = info->length + info->align_mask; in unmapped_area()
1976 if (length < info->length) in unmapped_area()
1977 return -ENOMEM; in unmapped_area()
1980 if (info->high_limit < length) in unmapped_area()
1981 return -ENOMEM; in unmapped_area()
1982 high_limit = info->high_limit - length; in unmapped_area()
1984 if (info->low_limit > high_limit) in unmapped_area()
1985 return -ENOMEM; in unmapped_area()
1986 low_limit = info->low_limit + length; in unmapped_area()
1989 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area()
1991 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area()
1992 if (vma->rb_subtree_gap < length) in unmapped_area()
1998 if (gap_end >= low_limit && vma->vm_rb.rb_left) { in unmapped_area()
2000 rb_entry(vma->vm_rb.rb_left, in unmapped_area()
2002 if (left->rb_subtree_gap >= length) { in unmapped_area()
2008 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area()
2010 /* Check if current node has a suitable gap */ in unmapped_area()
2012 return -ENOMEM; in unmapped_area()
2014 gap_end > gap_start && gap_end - gap_start >= length) && in unmapped_area()
2015 (xpm_region_outer_hook(gap_start, gap_end, info->flags))) in unmapped_area()
2019 if (vma->vm_rb.rb_right) { in unmapped_area()
2021 rb_entry(vma->vm_rb.rb_right, in unmapped_area()
2023 if (right->rb_subtree_gap >= length) { in unmapped_area()
2031 struct rb_node *prev = &vma->vm_rb; in unmapped_area()
2036 if (prev == vma->vm_rb.rb_left) { in unmapped_area()
2037 gap_start = vm_end_gap(vma->vm_prev); in unmapped_area()
2046 gap_start = mm->highest_vm_end; in unmapped_area()
2049 return -ENOMEM; in unmapped_area()
2053 if (gap_start < info->low_limit) in unmapped_area()
2054 gap_start = info->low_limit; in unmapped_area()
2057 gap_start += (info->align_offset - gap_start) & info->align_mask; in unmapped_area()
2059 VM_BUG_ON(gap_start + info->length > info->high_limit); in unmapped_area()
2060 VM_BUG_ON(gap_start + info->length > gap_end); in unmapped_area()
2066 struct mm_struct *mm = current->mm; in unmapped_area_topdown()
2071 length = info->length + info->align_mask; in unmapped_area_topdown()
2072 if (length < info->length) in unmapped_area_topdown()
2073 return -ENOMEM; in unmapped_area_topdown()
2079 gap_end = info->high_limit; in unmapped_area_topdown()
2081 return -ENOMEM; in unmapped_area_topdown()
2082 high_limit = gap_end - length; in unmapped_area_topdown()
2084 if (info->low_limit > high_limit) in unmapped_area_topdown()
2085 return -ENOMEM; in unmapped_area_topdown()
2086 low_limit = info->low_limit + length; in unmapped_area_topdown()
2089 gap_start = mm->highest_vm_end; in unmapped_area_topdown()
2094 if (RB_EMPTY_ROOT(&mm->mm_rb)) in unmapped_area_topdown()
2095 return -ENOMEM; in unmapped_area_topdown()
2096 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb); in unmapped_area_topdown()
2097 if (vma->rb_subtree_gap < length) in unmapped_area_topdown()
2098 return -ENOMEM; in unmapped_area_topdown()
2102 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2103 if (gap_start <= high_limit && vma->vm_rb.rb_right) { in unmapped_area_topdown()
2105 rb_entry(vma->vm_rb.rb_right, in unmapped_area_topdown()
2107 if (right->rb_subtree_gap >= length) { in unmapped_area_topdown()
2114 /* Check if current node has a suitable gap */ in unmapped_area_topdown()
2117 return -ENOMEM; in unmapped_area_topdown()
2119 gap_end > gap_start && gap_end - gap_start >= length) && in unmapped_area_topdown()
2120 (xpm_region_outer_hook(gap_start, gap_end, info->flags))) in unmapped_area_topdown()
2124 if (vma->vm_rb.rb_left) { in unmapped_area_topdown()
2126 rb_entry(vma->vm_rb.rb_left, in unmapped_area_topdown()
2128 if (left->rb_subtree_gap >= length) { in unmapped_area_topdown()
2136 struct rb_node *prev = &vma->vm_rb; in unmapped_area_topdown()
2138 return -ENOMEM; in unmapped_area_topdown()
2141 if (prev == vma->vm_rb.rb_right) { in unmapped_area_topdown()
2142 gap_start = vma->vm_prev ? in unmapped_area_topdown()
2143 vm_end_gap(vma->vm_prev) : 0; in unmapped_area_topdown()
2151 if (gap_end > info->high_limit) in unmapped_area_topdown()
2152 gap_end = info->high_limit; in unmapped_area_topdown()
2156 gap_end -= info->length; in unmapped_area_topdown()
2157 gap_end -= (gap_end - info->align_offset) & info->align_mask; in unmapped_area_topdown()
2159 VM_BUG_ON(gap_end < info->low_limit); in unmapped_area_topdown()
2168 * - does not intersect with any VMA;
2169 * - is contained within the [low_limit, high_limit) interval;
2170 * - is at least the desired size.
2171 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2177 if (info->flags & VM_UNMAPPED_AREA_TOPDOWN) in vm_unmapped_area()
2195 * This function "knows" that -ENOMEM has the bits set.
2203 struct mm_struct *mm = current->mm; in arch_get_unmapped_area()
2208 if (len > mmap_end - mmap_min_addr) in arch_get_unmapped_area()
2209 return -ENOMEM; in arch_get_unmapped_area()
2221 if (mmap_end - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area()
2230 info.low_limit = mm->mmap_base; in arch_get_unmapped_area()
2239 * This mmap-allocator allocates new areas top-down from below the
2250 struct mm_struct *mm = current->mm; in arch_get_unmapped_area_topdown()
2255 if (len > mmap_end - mmap_min_addr) in arch_get_unmapped_area_topdown()
2256 return -ENOMEM; in arch_get_unmapped_area_topdown()
2270 if (mmap_end - len >= addr && addr >= mmap_min_addr && in arch_get_unmapped_area_topdown()
2280 info.high_limit = arch_get_mmap_base(addr, mm->mmap_base); in arch_get_unmapped_area_topdown()
2287 * so fall back to the bottom-up function here. This scenario in arch_get_unmapped_area_topdown()
2292 VM_BUG_ON(addr != -ENOMEM); in arch_get_unmapped_area_topdown()
2316 return -ENOMEM; in get_unmapped_area()
2318 get_area = current->mm->get_unmapped_area; in get_unmapped_area()
2320 if (file->f_op->get_unmapped_area) in get_unmapped_area()
2321 get_area = file->f_op->get_unmapped_area; in get_unmapped_area()
2336 if (addr > TASK_SIZE - len) in get_unmapped_area()
2337 return -ENOMEM; in get_unmapped_area()
2339 return -EINVAL; in get_unmapped_area()
2358 rb_node = mm->mm_rb.rb_node; in find_vma()
2365 if (tmp->vm_end > addr) { in find_vma()
2367 if (tmp->vm_start <= addr) in find_vma()
2369 rb_node = rb_node->rb_left; in find_vma()
2371 rb_node = rb_node->rb_right; in find_vma()
2392 *pprev = vma->vm_prev; in find_vma_prev()
2394 struct rb_node *rb_node = rb_last(&mm->mm_rb); in find_vma_prev()
2404 * grow-up and grow-down cases.
2409 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
2413 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
2414 return -ENOMEM; in acct_stack_growth()
2418 return -ENOMEM; in acct_stack_growth()
2421 if (vma->vm_flags & VM_LOCKED) { in acct_stack_growth()
2424 locked = mm->locked_vm + grow; in acct_stack_growth()
2428 return -ENOMEM; in acct_stack_growth()
2431 /* Check to ensure the stack will not grow into a hugetlb-only region */ in acct_stack_growth()
2432 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
2433 vma->vm_end - size; in acct_stack_growth()
2434 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
2435 return -EFAULT; in acct_stack_growth()
2442 return -ENOMEM; in acct_stack_growth()
2449 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2450 * vma is the last one with address > vma->vm_end. Have to extend vma.
2454 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
2459 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
2460 return -EFAULT; in expand_upwards()
2465 return -ENOMEM; in expand_upwards()
2475 next = vma->vm_next; in expand_upwards()
2476 if (next && next->vm_start < gap_addr && vma_is_accessible(next)) { in expand_upwards()
2477 if (!(next->vm_flags & VM_GROWSUP)) in expand_upwards()
2478 return -ENOMEM; in expand_upwards()
2484 return -ENOMEM; in expand_upwards()
2487 * vma->vm_start/vm_end cannot change under us because the caller in expand_upwards()
2491 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2494 if (address > vma->vm_end) { in expand_upwards()
2497 size = address - vma->vm_start; in expand_upwards()
2498 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2500 error = -ENOMEM; in expand_upwards()
2501 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2512 * So, we reuse mm->page_table_lock to guard in expand_upwards()
2515 spin_lock(&mm->page_table_lock); in expand_upwards()
2516 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
2517 mm->locked_vm += grow; in expand_upwards()
2518 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2520 vma->vm_end = address; in expand_upwards()
2522 if (vma->vm_next) in expand_upwards()
2523 vma_gap_update(vma->vm_next); in expand_upwards()
2525 mm->highest_vm_end = vm_end_gap(vma); in expand_upwards()
2526 spin_unlock(&mm->page_table_lock); in expand_upwards()
2532 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2533 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_upwards()
2540 * vma is the first one with address < vma->vm_start. Have to extend vma.
2545 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2551 return -EPERM; in expand_downwards()
2554 prev = vma->vm_prev; in expand_downwards()
2556 if (prev && !(prev->vm_flags & VM_GROWSDOWN) && in expand_downwards()
2558 if (address - prev->vm_end < stack_guard_gap) in expand_downwards()
2559 return -ENOMEM; in expand_downwards()
2564 return -ENOMEM; in expand_downwards()
2567 * vma->vm_start/vm_end cannot change under us because the caller in expand_downwards()
2571 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2574 if (address < vma->vm_start) { in expand_downwards()
2577 size = vma->vm_end - address; in expand_downwards()
2578 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2580 error = -ENOMEM; in expand_downwards()
2581 if (grow <= vma->vm_pgoff) { in expand_downwards()
2592 * So, we reuse mm->page_table_lock to guard in expand_downwards()
2595 spin_lock(&mm->page_table_lock); in expand_downwards()
2596 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2597 mm->locked_vm += grow; in expand_downwards()
2598 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2600 vma->vm_start = address; in expand_downwards()
2601 vma->vm_pgoff -= grow; in expand_downwards()
2604 spin_unlock(&mm->page_table_lock); in expand_downwards()
2610 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2611 khugepaged_enter_vma_merge(vma, vma->vm_flags); in expand_downwards()
2645 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2650 if (prev->vm_flags & VM_LOCKED) in find_extend_vma()
2651 populate_vma_page_range(prev, addr, prev->vm_end, NULL); in find_extend_vma()
2670 if (vma->vm_start <= addr) in find_extend_vma()
2672 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2674 start = vma->vm_start; in find_extend_vma()
2677 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2686 * Ok - we have the memory areas we should free on the vma list,
2700 if (vma->vm_flags & VM_ACCOUNT) in remove_vma_list()
2702 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_vma_list()
2731 * we're holding the mm semaphore for removing the mapping - so any in unmap_region()
2735 for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) { in unmap_region()
2736 if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) { in unmap_region()
2742 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2743 next ? next->vm_start : USER_PGTABLES_CEILING); in unmap_region()
2758 insertion_point = (prev ? &prev->vm_next : &mm->mmap); in detach_vmas_to_be_unmapped()
2759 vma->vm_prev = NULL; in detach_vmas_to_be_unmapped()
2761 vma_rb_erase(vma, &mm->mm_rb); in detach_vmas_to_be_unmapped()
2762 mm->map_count--; in detach_vmas_to_be_unmapped()
2764 vma = vma->vm_next; in detach_vmas_to_be_unmapped()
2765 } while (vma && vma->vm_start < end); in detach_vmas_to_be_unmapped()
2768 vma->vm_prev = prev; in detach_vmas_to_be_unmapped()
2771 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0; in detach_vmas_to_be_unmapped()
2772 tail_vma->vm_next = NULL; in detach_vmas_to_be_unmapped()
2782 if (vma && (vma->vm_flags & VM_GROWSDOWN)) in detach_vmas_to_be_unmapped()
2784 if (prev && (prev->vm_flags & VM_GROWSUP)) in detach_vmas_to_be_unmapped()
2799 if (vma->vm_ops && vma->vm_ops->split) { in __split_vma()
2800 err = vma->vm_ops->split(vma, addr); in __split_vma()
2807 return -ENOMEM; in __split_vma()
2810 new->vm_end = addr; in __split_vma()
2812 new->vm_start = addr; in __split_vma()
2813 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2824 if (new->vm_file) in __split_vma()
2825 get_file(new->vm_file); in __split_vma()
2827 if (new->vm_ops && new->vm_ops->open) in __split_vma()
2828 new->vm_ops->open(new); in __split_vma()
2831 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2832 ((addr - new->vm_start) >> PAGE_SHIFT), new); in __split_vma()
2834 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2841 if (new->vm_ops && new->vm_ops->close) in __split_vma()
2842 new->vm_ops->close(new); in __split_vma()
2843 if (new->vm_file) in __split_vma()
2844 fput(new->vm_file); in __split_vma()
2860 if (mm->map_count >= sysctl_max_map_count) in split_vma()
2861 return -ENOMEM; in split_vma()
2866 /* Munmap is split into 2 main parts -- this part which finds
2877 if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start) in __do_munmap()
2878 return -EINVAL; in __do_munmap()
2883 return -EINVAL; in __do_munmap()
2886 CALL_HCK_LITE_HOOK(delete_jit_memory_lhck, current, start, len, &errno); in __do_munmap()
2901 prev = vma->vm_prev; in __do_munmap()
2902 /* we have start < vma->vm_end */ in __do_munmap()
2905 if (vma->vm_start >= end) in __do_munmap()
2915 if (start > vma->vm_start) { in __do_munmap()
2923 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in __do_munmap()
2924 return -ENOMEM; in __do_munmap()
2934 if (last && end > last->vm_start) { in __do_munmap()
2959 if (mm->locked_vm) { in __do_munmap()
2961 while (tmp && tmp->vm_start < end) { in __do_munmap()
2962 if (tmp->vm_flags & VM_LOCKED) { in __do_munmap()
2963 mm->locked_vm -= vma_pages(tmp); in __do_munmap()
2967 tmp = tmp->vm_next; in __do_munmap()
2995 struct mm_struct *mm = current->mm; in __vm_munmap()
2999 return -EINTR; in __vm_munmap()
3038 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE5()
3041 unsigned long ret = -EINVAL; in SYSCALL_DEFINE5()
3045 current->comm, current->pid); in SYSCALL_DEFINE5()
3060 return -EINTR; in SYSCALL_DEFINE5()
3064 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
3067 if (start < vma->vm_start) in SYSCALL_DEFINE5()
3070 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
3073 for (next = vma->vm_next; next; next = next->vm_next) { in SYSCALL_DEFINE5()
3075 if (next->vm_start != next->vm_prev->vm_end) in SYSCALL_DEFINE5()
3078 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
3081 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
3084 if (start + size <= next->vm_end) in SYSCALL_DEFINE5()
3092 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
3093 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
3094 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
3098 if (vma->vm_flags & VM_LOCKED) { in SYSCALL_DEFINE5()
3102 /* drop PG_Mlocked flag for over-mapped range */ in SYSCALL_DEFINE5()
3103 for (tmp = vma; tmp->vm_start >= start + size; in SYSCALL_DEFINE5()
3104 tmp = tmp->vm_next) { in SYSCALL_DEFINE5()
3112 max(tmp->vm_start, start), in SYSCALL_DEFINE5()
3113 min(tmp->vm_end, start + size)); in SYSCALL_DEFINE5()
3117 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
3118 ret = do_mmap(vma->vm_file, start, size, in SYSCALL_DEFINE5()
3133 * brk-specific accounting here.
3137 struct mm_struct *mm = current->mm; in do_brk_flags()
3146 return -EINVAL; in do_brk_flags()
3147 flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; in do_brk_flags()
3153 error = mlock_future_check(mm, mm->def_flags, len); in do_brk_flags()
3159 return -ENOMEM; in do_brk_flags()
3163 return -ENOMEM; in do_brk_flags()
3165 if (mm->map_count > sysctl_max_map_count) in do_brk_flags()
3166 return -ENOMEM; in do_brk_flags()
3169 return -ENOMEM; in do_brk_flags()
3183 return -ENOMEM; in do_brk_flags()
3187 vma->vm_start = addr; in do_brk_flags()
3188 vma->vm_end = addr + len; in do_brk_flags()
3189 vma->vm_pgoff = pgoff; in do_brk_flags()
3190 vma->vm_flags = flags; in do_brk_flags()
3191 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
3195 mm->total_vm += len >> PAGE_SHIFT; in do_brk_flags()
3196 mm->data_vm += len >> PAGE_SHIFT; in do_brk_flags()
3198 mm->locked_vm += (len >> PAGE_SHIFT); in do_brk_flags()
3199 vma->vm_flags |= VM_SOFTDIRTY; in do_brk_flags()
3205 struct mm_struct *mm = current->mm; in vm_brk_flags()
3213 return -ENOMEM; in vm_brk_flags()
3218 return -EINTR; in vm_brk_flags()
3221 populate = ((mm->def_flags & VM_LOCKED) != 0); in vm_brk_flags()
3250 * this mm from further consideration. Taking mm->mmap_lock for in exit_mmap()
3255 * Nothing can be holding mm->mmap_lock here and the above call in exit_mmap()
3265 set_bit(MMF_OOM_SKIP, &mm->flags); in exit_mmap()
3270 if (mm->locked_vm) { in exit_mmap()
3271 vma = mm->mmap; in exit_mmap()
3273 if (vma->vm_flags & VM_LOCKED) in exit_mmap()
3275 vma = vma->vm_next; in exit_mmap()
3281 vma = mm->mmap; in exit_mmap()
3287 tlb_gather_mmu(&tlb, mm, 0, -1); in exit_mmap()
3289 /* Use -1 here to ensure all VMAs in the mm are unmapped */ in exit_mmap()
3290 unmap_vmas(&tlb, vma, 0, -1); in exit_mmap()
3292 tlb_finish_mmu(&tlb, 0, -1); in exit_mmap()
3299 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
3308 * and into the inode's i_mmap tree. If vm_file is non-NULL
3316 if (find_vma_links(mm, vma->vm_start, vma->vm_end, in insert_vm_struct()
3318 return -ENOMEM; in insert_vm_struct()
3319 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3321 return -ENOMEM; in insert_vm_struct()
3336 BUG_ON(vma->anon_vma); in insert_vm_struct()
3337 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3353 unsigned long vma_start = vma->vm_start; in copy_vma()
3354 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3363 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
3370 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3371 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in copy_vma()
3372 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in copy_vma()
3377 if (unlikely(vma_start >= new_vma->vm_start && in copy_vma()
3378 vma_start < new_vma->vm_end)) { in copy_vma()
3383 * reset the dst vma->vm_pgoff to the in copy_vma()
3394 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3399 new_vma->vm_start = addr; in copy_vma()
3400 new_vma->vm_end = addr + len; in copy_vma()
3401 new_vma->vm_pgoff = pgoff; in copy_vma()
3406 if (new_vma->vm_file) in copy_vma()
3407 get_file(new_vma->vm_file); in copy_vma()
3408 if (new_vma->vm_ops && new_vma->vm_ops->open) in copy_vma()
3409 new_vma->vm_ops->open(new_vma); in copy_vma()
3429 if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT) in may_expand_vm()
3433 mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) { in may_expand_vm()
3436 mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT) in may_expand_vm()
3440 current->comm, current->pid, in may_expand_vm()
3441 (mm->data_vm + npages) << PAGE_SHIFT, in may_expand_vm()
3454 mm->total_vm += npages; in vm_stat_account()
3457 mm->exec_vm += npages; in vm_stat_account()
3459 mm->stack_vm += npages; in vm_stat_account()
3461 mm->data_vm += npages; in vm_stat_account()
3475 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3480 struct vm_special_mapping *sm = new_vma->vm_private_data; in special_mapping_mremap()
3482 if (WARN_ON_ONCE(current->mm != new_vma->vm_mm)) in special_mapping_mremap()
3483 return -EFAULT; in special_mapping_mremap()
3485 if (sm->mremap) in special_mapping_mremap()
3486 return sm->mremap(sm, new_vma); in special_mapping_mremap()
3507 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault()
3511 if (vma->vm_ops == &legacy_special_mapping_vmops) { in special_mapping_fault()
3512 pages = vma->vm_private_data; in special_mapping_fault()
3514 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
3516 if (sm->fault) in special_mapping_fault()
3517 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3519 pages = sm->pages; in special_mapping_fault()
3522 for (pgoff = vmf->pgoff; pgoff && *pages; ++pages) in special_mapping_fault()
3523 pgoff--; in special_mapping_fault()
3528 vmf->page = page; in special_mapping_fault()
3546 return ERR_PTR(-ENOMEM); in __install_special_mapping()
3548 vma->vm_start = addr; in __install_special_mapping()
3549 vma->vm_end = addr + len; in __install_special_mapping()
3551 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3552 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3554 vma->vm_ops = ops; in __install_special_mapping()
3555 vma->vm_private_data = priv; in __install_special_mapping()
3561 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3575 return vma->vm_private_data == sm && in vma_is_special_mapping()
3576 (vma->vm_ops == &special_mapping_vmops || in vma_is_special_mapping()
3577 vma->vm_ops == &legacy_special_mapping_vmops); in vma_is_special_mapping()
3581 * Called with mm->mmap_lock held for writing.
3584 * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3613 if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { in vm_lock_anon_vma()
3618 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock); in vm_lock_anon_vma()
3621 * anon_vma->root->rwsem. If some other vma in this mm shares in vm_lock_anon_vma()
3626 * anon_vma->root->rwsem. in vm_lock_anon_vma()
3629 &anon_vma->root->rb_root.rb_root.rb_node)) in vm_lock_anon_vma()
3636 if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { in vm_lock_mapping()
3641 * Operations on ->flags have to be atomic because in vm_lock_mapping()
3646 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) in vm_lock_mapping()
3648 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock); in vm_lock_mapping()
3669 * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3670 * mapping->flags avoid to take the same lock twice, if more than one
3675 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3677 * - all i_mmap_rwsem locks;
3678 * - all anon_vma->rwseml
3698 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3699 if (signal_pending(current)) in mm_take_all_locks()
3701 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3703 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3706 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3707 if (signal_pending(current)) in mm_take_all_locks()
3709 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3711 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3714 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_take_all_locks()
3715 if (signal_pending(current)) in mm_take_all_locks()
3717 if (vma->anon_vma) in mm_take_all_locks()
3718 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3719 vm_lock_anon_vma(mm, avc->anon_vma); in mm_take_all_locks()
3726 return -EINTR; in mm_take_all_locks()
3731 if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) { in vm_unlock_anon_vma()
3737 * the vma so the users using the anon_vma->rb_root will in vm_unlock_anon_vma()
3742 * anon_vma->root->rwsem. in vm_unlock_anon_vma()
3745 &anon_vma->root->rb_root.rb_root.rb_node)) in vm_unlock_anon_vma()
3753 if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) { in vm_unlock_mapping()
3760 &mapping->flags)) in vm_unlock_mapping()
3777 for (vma = mm->mmap; vma; vma = vma->vm_next) { in mm_drop_all_locks()
3778 if (vma->anon_vma) in mm_drop_all_locks()
3779 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3780 vm_unlock_anon_vma(avc->anon_vma); in mm_drop_all_locks()
3781 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3782 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()
3813 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_user_reserve()
3834 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_admin_reserve()
3878 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in reserve_mem_notifier()