Lines Matching refs:vma
218 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma) in subpool_vma() argument
220 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
624 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
626 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
627 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
630 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
633 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
641 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
643 if (vma->vm_ops && vma->vm_ops->pagesize) in vma_kernel_pagesize()
644 return vma->vm_ops->pagesize(vma); in vma_kernel_pagesize()
655 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() argument
657 return vma_kernel_pagesize(vma); in vma_mmu_pagesize()
688 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() argument
690 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
693 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() argument
696 vma->vm_private_data = (void *)value; in set_vma_private_data()
756 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() argument
758 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in vma_resv_map()
759 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
760 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
766 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
771 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument
773 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_map()
774 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
776 set_vma_private_data(vma, (get_vma_private_data(vma) & in set_vma_resv_map()
780 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) in set_vma_resv_flags() argument
782 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_flags()
783 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
785 set_vma_private_data(vma, get_vma_private_data(vma) | flags); in set_vma_resv_flags()
788 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) in is_vma_resv_set() argument
790 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in is_vma_resv_set()
792 return (get_vma_private_data(vma) & flag) != 0; in is_vma_resv_set()
796 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
798 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in reset_vma_resv_huge_pages()
799 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
800 vma->vm_private_data = (void *)0; in reset_vma_resv_huge_pages()
804 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) in vma_has_reserves() argument
806 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
816 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
823 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
841 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { in vma_has_reserves()
940 struct vm_area_struct *vma, in dequeue_huge_page_vma() argument
955 if (!vma_has_reserves(vma, chg) && in dequeue_huge_page_vma()
964 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
966 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { in dequeue_huge_page_vma()
1738 struct vm_area_struct *vma, unsigned long addr) in alloc_buddy_huge_page_with_mpol() argument
1746 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in alloc_buddy_huge_page_with_mpol()
1795 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, in alloc_huge_page_vma() argument
1805 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in alloc_huge_page_vma()
1986 struct vm_area_struct *vma, unsigned long addr, in __vma_reservation_common() argument
1993 resv = vma_resv_map(vma); in __vma_reservation_common()
1997 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
2010 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
2021 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
2023 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { in __vma_reservation_common()
2047 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
2049 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
2053 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
2055 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
2059 struct vm_area_struct *vma, unsigned long addr) in vma_end_reservation() argument
2061 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
2065 struct vm_area_struct *vma, unsigned long addr) in vma_add_reservation() argument
2067 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
2082 struct vm_area_struct *vma, unsigned long address, in restore_reserve_on_error() argument
2086 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
2102 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
2110 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
2114 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page() argument
2117 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_huge_page()
2118 struct hstate *h = hstate_vma(vma); in alloc_huge_page()
2131 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2145 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2171 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2174 page = alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2177 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { in alloc_huge_page()
2190 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2213 vma_end_reservation(h, vma, addr); in alloc_huge_page()
3279 static void hugetlb_vm_op_open(struct vm_area_struct *vma) in hugetlb_vm_op_open() argument
3281 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
3291 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_open()
3295 static void hugetlb_vm_op_close(struct vm_area_struct *vma) in hugetlb_vm_op_close() argument
3297 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close()
3298 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close()
3299 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close()
3303 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
3306 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
3307 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3323 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) in hugetlb_vm_op_split() argument
3325 if (addr & ~(huge_page_mask(hstate_vma(vma)))) in hugetlb_vm_op_split()
3330 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma) in hugetlb_vm_op_pagesize() argument
3332 struct hstate *hstate = hstate_vma(vma); in hugetlb_vm_op_pagesize()
3364 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3371 vma->vm_page_prot))); in make_huge_pte()
3374 vma->vm_page_prot)); in make_huge_pte()
3378 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3383 static void set_huge_ptep_writable(struct vm_area_struct *vma, in set_huge_ptep_writable() argument
3389 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
3390 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable()
3420 struct vm_area_struct *vma) in copy_hugetlb_page_range() argument
3426 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range()
3431 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
3434 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, src, in copy_hugetlb_page_range()
3435 vma->vm_start, in copy_hugetlb_page_range()
3436 vma->vm_end); in copy_hugetlb_page_range()
3440 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
3519 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
3523 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
3529 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range()
3533 WARN_ON(!is_vm_hugetlb_page(vma)); in __unmap_hugepage_range()
3542 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
3547 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start, in __unmap_hugepage_range()
3549 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in __unmap_hugepage_range()
3599 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); in __unmap_hugepage_range()
3619 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
3623 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range_final() argument
3626 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
3638 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
3641 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
3656 adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end); in unmap_hugepage_range()
3658 mm = vma->vm_mm; in unmap_hugepage_range()
3661 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); in unmap_hugepage_range()
3671 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, in unmap_ref_private() argument
3674 struct hstate *h = hstate_vma(vma); in unmap_ref_private()
3684 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
3685 vma->vm_pgoff; in unmap_ref_private()
3686 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
3696 if (iter_vma == vma) in unmap_ref_private()
3727 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_cow() argument
3732 struct hstate *h = hstate_vma(vma); in hugetlb_cow()
3746 page_move_anon_rmap(old_page, vma); in hugetlb_cow()
3747 set_huge_ptep_writable(vma, haddr, ptep); in hugetlb_cow()
3760 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && in hugetlb_cow()
3771 new_page = alloc_huge_page(vma, haddr, outside_reserve); in hugetlb_cow()
3784 unmap_ref_private(mm, vma, old_page, haddr); in hugetlb_cow()
3806 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_cow()
3811 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
3815 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, in hugetlb_cow()
3829 huge_ptep_clear_flush(vma, haddr, ptep); in hugetlb_cow()
3832 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
3834 hugepage_add_new_anon_rmap(new_page, vma, haddr); in hugetlb_cow()
3842 restore_reserve_on_error(h, vma, haddr, new_page); in hugetlb_cow()
3853 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_page() argument
3858 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
3859 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3869 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_present() argument
3875 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
3876 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3908 struct vm_area_struct *vma, in hugetlb_no_page() argument
3912 struct hstate *h = hstate_vma(vma); in hugetlb_no_page()
3927 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { in hugetlb_no_page()
3947 if (userfaultfd_missing(vma)) { in hugetlb_no_page()
3950 .vma = vma, in hugetlb_no_page()
3974 page = alloc_huge_page(vma, haddr, 0); in hugetlb_no_page()
4002 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
4012 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_no_page()
4037 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
4038 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_no_page()
4043 vma_end_reservation(h, vma, haddr); in hugetlb_no_page()
4057 hugepage_add_new_anon_rmap(page, vma, haddr); in hugetlb_no_page()
4060 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
4061 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
4065 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
4067 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); in hugetlb_no_page()
4088 restore_reserve_on_error(h, vma, haddr, page); in hugetlb_no_page()
4119 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_fault() argument
4129 struct hstate *h = hstate_vma(vma); in hugetlb_fault()
4138 migration_entry_wait_huge(vma, mm, ptep); in hugetlb_fault()
4149 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
4150 idx = vma_hugecache_offset(h, vma, haddr); in hugetlb_fault()
4162 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); in hugetlb_fault()
4187 if (vma_needs_reservation(h, vma, haddr) < 0) { in hugetlb_fault()
4192 vma_end_reservation(h, vma, haddr); in hugetlb_fault()
4194 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_fault()
4196 vma, haddr); in hugetlb_fault()
4221 ret = hugetlb_cow(mm, vma, address, ptep, in hugetlb_fault()
4228 if (huge_ptep_set_access_flags(vma, haddr, ptep, entry, in hugetlb_fault()
4230 update_mmu_cache(vma, haddr, ptep); in hugetlb_fault()
4387 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, in follow_hugetlb_page() argument
4395 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page()
4398 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
4434 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
4471 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); in follow_hugetlb_page()
4518 vmas[i] = vma; in follow_hugetlb_page()
4524 if (vaddr < vma->vm_end && remainder && in follow_hugetlb_page()
4550 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
4553 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
4556 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
4560 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection()
4571 0, vma, mm, start, end); in hugetlb_change_protection()
4572 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end); in hugetlb_change_protection()
4575 flush_cache_range(vma, range.start, range.end); in hugetlb_change_protection()
4578 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4614 old_pte = huge_ptep_modify_prot_start(vma, address, ptep); in hugetlb_change_protection()
4616 pte = arch_make_huge_pte(pte, vma, NULL, 0); in hugetlb_change_protection()
4617 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte); in hugetlb_change_protection()
4630 flush_hugetlb_tlb_range(vma, range.start, range.end); in hugetlb_change_protection()
4632 flush_hugetlb_tlb_range(vma, start, end); in hugetlb_change_protection()
4639 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4647 struct vm_area_struct *vma, in hugetlb_reserve_pages() argument
4676 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4693 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
4694 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); in hugetlb_reserve_pages()
4735 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4755 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
4759 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_reserve_pages()
4804 struct vm_area_struct *vma, in page_table_shareable() argument
4813 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in page_table_shareable()
4828 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument
4836 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) in vma_shareable()
4846 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
4851 if (!(vma->vm_flags & VM_MAYSHARE)) in adjust_range_if_pmd_sharing_possible()
4861 if (range_in_vma(vma, a_start, a_end)) { in adjust_range_if_pmd_sharing_possible()
4881 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local
4882 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
4883 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
4884 vma->vm_pgoff; in huge_pmd_share()
4891 if (!vma_shareable(vma, addr)) in huge_pmd_share()
4896 if (svma == vma) in huge_pmd_share()
4899 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
4913 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share()
4968 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
5060 follow_huge_pd(struct vm_area_struct *vma, in follow_huge_pd() argument