• Home
  • Raw
  • Download

Lines Matching refs:vma

216 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)  in subpool_vma()  argument
218 return subpool_inode(file_inode(vma->vm_file)); in subpool_vma()
622 struct vm_area_struct *vma, unsigned long address) in vma_hugecache_offset() argument
624 return ((address - vma->vm_start) >> huge_page_shift(h)) + in vma_hugecache_offset()
625 (vma->vm_pgoff >> huge_page_order(h)); in vma_hugecache_offset()
628 pgoff_t linear_hugepage_index(struct vm_area_struct *vma, in linear_hugepage_index() argument
631 return vma_hugecache_offset(hstate_vma(vma), vma, address); in linear_hugepage_index()
639 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) in vma_kernel_pagesize() argument
643 if (!is_vm_hugetlb_page(vma)) in vma_kernel_pagesize()
646 hstate = hstate_vma(vma); in vma_kernel_pagesize()
659 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) in vma_mmu_pagesize() argument
661 return vma_kernel_pagesize(vma); in vma_mmu_pagesize()
693 static unsigned long get_vma_private_data(struct vm_area_struct *vma) in get_vma_private_data() argument
695 return (unsigned long)vma->vm_private_data; in get_vma_private_data()
698 static void set_vma_private_data(struct vm_area_struct *vma, in set_vma_private_data() argument
701 vma->vm_private_data = (void *)value; in set_vma_private_data()
753 static struct resv_map *vma_resv_map(struct vm_area_struct *vma) in vma_resv_map() argument
755 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in vma_resv_map()
756 if (vma->vm_flags & VM_MAYSHARE) { in vma_resv_map()
757 struct address_space *mapping = vma->vm_file->f_mapping; in vma_resv_map()
763 return (struct resv_map *)(get_vma_private_data(vma) & in vma_resv_map()
768 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) in set_vma_resv_map() argument
770 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_map()
771 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_map()
773 set_vma_private_data(vma, (get_vma_private_data(vma) & in set_vma_resv_map()
777 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) in set_vma_resv_flags() argument
779 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in set_vma_resv_flags()
780 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); in set_vma_resv_flags()
782 set_vma_private_data(vma, get_vma_private_data(vma) | flags); in set_vma_resv_flags()
785 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) in is_vma_resv_set() argument
787 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in is_vma_resv_set()
789 return (get_vma_private_data(vma) & flag) != 0; in is_vma_resv_set()
793 void reset_vma_resv_huge_pages(struct vm_area_struct *vma) in reset_vma_resv_huge_pages() argument
795 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); in reset_vma_resv_huge_pages()
796 if (!(vma->vm_flags & VM_MAYSHARE)) in reset_vma_resv_huge_pages()
797 vma->vm_private_data = (void *)0; in reset_vma_resv_huge_pages()
801 static bool vma_has_reserves(struct vm_area_struct *vma, long chg) in vma_has_reserves() argument
803 if (vma->vm_flags & VM_NORESERVE) { in vma_has_reserves()
813 if (vma->vm_flags & VM_MAYSHARE && chg == 0) in vma_has_reserves()
820 if (vma->vm_flags & VM_MAYSHARE) { in vma_has_reserves()
838 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { in vma_has_reserves()
937 struct vm_area_struct *vma, in dequeue_huge_page_vma() argument
952 if (!vma_has_reserves(vma, chg) && in dequeue_huge_page_vma()
961 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); in dequeue_huge_page_vma()
963 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) { in dequeue_huge_page_vma()
1621 struct vm_area_struct *vma, unsigned long addr) in __alloc_buddy_huge_page_with_mpol() argument
1629 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask); in __alloc_buddy_huge_page_with_mpol()
1857 struct vm_area_struct *vma, unsigned long addr, in __vma_reservation_common() argument
1864 resv = vma_resv_map(vma); in __vma_reservation_common()
1868 idx = vma_hugecache_offset(h, vma, addr); in __vma_reservation_common()
1881 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
1892 if (vma->vm_flags & VM_MAYSHARE) in __vma_reservation_common()
1894 else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { in __vma_reservation_common()
1918 struct vm_area_struct *vma, unsigned long addr) in vma_needs_reservation() argument
1920 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV); in vma_needs_reservation()
1924 struct vm_area_struct *vma, unsigned long addr) in vma_commit_reservation() argument
1926 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV); in vma_commit_reservation()
1930 struct vm_area_struct *vma, unsigned long addr) in vma_end_reservation() argument
1932 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV); in vma_end_reservation()
1936 struct vm_area_struct *vma, unsigned long addr) in vma_add_reservation() argument
1938 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV); in vma_add_reservation()
1953 struct vm_area_struct *vma, unsigned long address, in restore_reserve_on_error() argument
1957 long rc = vma_needs_reservation(h, vma, address); in restore_reserve_on_error()
1973 rc = vma_add_reservation(h, vma, address); in restore_reserve_on_error()
1981 vma_end_reservation(h, vma, address); in restore_reserve_on_error()
1985 struct page *alloc_huge_page(struct vm_area_struct *vma, in alloc_huge_page() argument
1988 struct hugepage_subpool *spool = subpool_vma(vma); in alloc_huge_page()
1989 struct hstate *h = hstate_vma(vma); in alloc_huge_page()
2002 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); in alloc_huge_page()
2016 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg); in alloc_huge_page()
2045 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr); in alloc_huge_page()
2048 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { in alloc_huge_page()
2061 map_commit = vma_commit_reservation(h, vma, addr); in alloc_huge_page()
2084 vma_end_reservation(h, vma, addr); in alloc_huge_page()
2093 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, in alloc_huge_page_noerr() argument
2096 struct page *page = alloc_huge_page(vma, addr, avoid_reserve); in alloc_huge_page_noerr()
3096 static void hugetlb_vm_op_open(struct vm_area_struct *vma) in hugetlb_vm_op_open() argument
3098 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_open()
3108 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_open()
3112 static void hugetlb_vm_op_close(struct vm_area_struct *vma) in hugetlb_vm_op_close() argument
3114 struct hstate *h = hstate_vma(vma); in hugetlb_vm_op_close()
3115 struct resv_map *resv = vma_resv_map(vma); in hugetlb_vm_op_close()
3116 struct hugepage_subpool *spool = subpool_vma(vma); in hugetlb_vm_op_close()
3120 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_vm_op_close()
3123 start = vma_hugecache_offset(h, vma, vma->vm_start); in hugetlb_vm_op_close()
3124 end = vma_hugecache_offset(h, vma, vma->vm_end); in hugetlb_vm_op_close()
3140 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) in hugetlb_vm_op_split() argument
3142 if (addr & ~(huge_page_mask(hstate_vma(vma)))) in hugetlb_vm_op_split()
3166 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, in make_huge_pte() argument
3173 vma->vm_page_prot))); in make_huge_pte()
3176 vma->vm_page_prot)); in make_huge_pte()
3180 entry = arch_make_huge_pte(entry, vma, page, writable); in make_huge_pte()
3185 static void set_huge_ptep_writable(struct vm_area_struct *vma, in set_huge_ptep_writable() argument
3191 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) in set_huge_ptep_writable()
3192 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable()
3222 struct vm_area_struct *vma) in copy_hugetlb_page_range() argument
3228 struct hstate *h = hstate_vma(vma); in copy_hugetlb_page_range()
3234 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; in copy_hugetlb_page_range()
3236 mmun_start = vma->vm_start; in copy_hugetlb_page_range()
3237 mmun_end = vma->vm_end; in copy_hugetlb_page_range()
3241 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) { in copy_hugetlb_page_range()
3315 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, in __unmap_hugepage_range() argument
3319 struct mm_struct *mm = vma->vm_mm; in __unmap_hugepage_range()
3325 struct hstate *h = hstate_vma(vma); in __unmap_hugepage_range()
3330 WARN_ON(!is_vm_hugetlb_page(vma)); in __unmap_hugepage_range()
3339 tlb_start_vma(tlb, vma); in __unmap_hugepage_range()
3385 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED); in __unmap_hugepage_range()
3405 tlb_end_vma(tlb, vma); in __unmap_hugepage_range()
3409 struct vm_area_struct *vma, unsigned long start, in __unmap_hugepage_range_final() argument
3412 __unmap_hugepage_range(tlb, vma, start, end, ref_page); in __unmap_hugepage_range_final()
3424 vma->vm_flags &= ~VM_MAYSHARE; in __unmap_hugepage_range_final()
3427 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, in unmap_hugepage_range() argument
3433 mm = vma->vm_mm; in unmap_hugepage_range()
3436 __unmap_hugepage_range(&tlb, vma, start, end, ref_page); in unmap_hugepage_range()
3446 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma, in unmap_ref_private() argument
3449 struct hstate *h = hstate_vma(vma); in unmap_ref_private()
3459 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + in unmap_ref_private()
3460 vma->vm_pgoff; in unmap_ref_private()
3461 mapping = vma->vm_file->f_mapping; in unmap_ref_private()
3471 if (iter_vma == vma) in unmap_ref_private()
3502 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_cow() argument
3507 struct hstate *h = hstate_vma(vma); in hugetlb_cow()
3520 page_move_anon_rmap(old_page, vma); in hugetlb_cow()
3521 set_huge_ptep_writable(vma, address, ptep); in hugetlb_cow()
3534 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && in hugetlb_cow()
3545 new_page = alloc_huge_page(vma, address, outside_reserve); in hugetlb_cow()
3558 unmap_ref_private(mm, vma, old_page, address); in hugetlb_cow()
3582 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_cow()
3587 copy_user_huge_page(new_page, old_page, address, vma, in hugetlb_cow()
3606 huge_ptep_clear_flush(vma, address, ptep); in hugetlb_cow()
3609 make_huge_pte(vma, new_page, 1)); in hugetlb_cow()
3611 hugepage_add_new_anon_rmap(new_page, vma, address); in hugetlb_cow()
3619 restore_reserve_on_error(h, vma, address, new_page); in hugetlb_cow()
3630 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_page() argument
3635 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_page()
3636 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_page()
3646 struct vm_area_struct *vma, unsigned long address) in hugetlbfs_pagecache_present() argument
3652 mapping = vma->vm_file->f_mapping; in hugetlbfs_pagecache_present()
3653 idx = vma_hugecache_offset(h, vma, address); in hugetlbfs_pagecache_present()
3684 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_no_page() argument
3688 struct hstate *h = hstate_vma(vma); in hugetlb_no_page()
3702 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) { in hugetlb_no_page()
3722 if (userfaultfd_missing(vma)) { in hugetlb_no_page()
3725 .vma = vma, in hugetlb_no_page()
3750 page = alloc_huge_page(vma, address, 0); in hugetlb_no_page()
3763 if (vma->vm_flags & VM_MAYSHARE) { in hugetlb_no_page()
3773 if (unlikely(anon_vma_prepare(vma))) { in hugetlb_no_page()
3798 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3799 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_no_page()
3804 vma_end_reservation(h, vma, address); in hugetlb_no_page()
3818 hugepage_add_new_anon_rmap(page, vma, address); in hugetlb_no_page()
3821 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE) in hugetlb_no_page()
3822 && (vma->vm_flags & VM_SHARED))); in hugetlb_no_page()
3826 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { in hugetlb_no_page()
3828 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl); in hugetlb_no_page()
3849 restore_reserve_on_error(h, vma, address, page); in hugetlb_no_page()
3880 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, in hugetlb_fault() argument
3890 struct hstate *h = hstate_vma(vma); in hugetlb_fault()
3900 migration_entry_wait_huge(vma, mm, ptep); in hugetlb_fault()
3911 mapping = vma->vm_file->f_mapping; in hugetlb_fault()
3912 idx = vma_hugecache_offset(h, vma, address); in hugetlb_fault()
3924 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags); in hugetlb_fault()
3949 if (vma_needs_reservation(h, vma, address) < 0) { in hugetlb_fault()
3954 vma_end_reservation(h, vma, address); in hugetlb_fault()
3956 if (!(vma->vm_flags & VM_MAYSHARE)) in hugetlb_fault()
3958 vma, address); in hugetlb_fault()
3983 ret = hugetlb_cow(mm, vma, address, ptep, in hugetlb_fault()
3990 if (huge_ptep_set_access_flags(vma, address, ptep, entry, in hugetlb_fault()
3992 update_mmu_cache(vma, address, ptep); in hugetlb_fault()
4149 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, in follow_hugetlb_page() argument
4157 struct hstate *h = hstate_vma(vma); in follow_hugetlb_page()
4160 while (vaddr < vma->vm_end && remainder) { in follow_hugetlb_page()
4196 !hugetlbfs_pagecache_present(h, vma, vaddr)) { in follow_hugetlb_page()
4233 ret = hugetlb_fault(mm, vma, vaddr, fault_flags); in follow_hugetlb_page()
4279 vmas[i] = vma; in follow_hugetlb_page()
4285 if (vaddr < vma->vm_end && remainder && in follow_hugetlb_page()
4311 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) argument
4314 unsigned long hugetlb_change_protection(struct vm_area_struct *vma, in hugetlb_change_protection() argument
4317 struct mm_struct *mm = vma->vm_mm; in hugetlb_change_protection()
4321 struct hstate *h = hstate_vma(vma); in hugetlb_change_protection()
4325 flush_cache_range(vma, address, end); in hugetlb_change_protection()
4328 i_mmap_lock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4363 pte = arch_make_huge_pte(pte, vma, NULL, 0); in hugetlb_change_protection()
4375 flush_hugetlb_tlb_range(vma, start, end); in hugetlb_change_protection()
4377 i_mmap_unlock_write(vma->vm_file->f_mapping); in hugetlb_change_protection()
4385 struct vm_area_struct *vma, in hugetlb_reserve_pages() argument
4414 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4426 set_vma_resv_map(vma, resv_map); in hugetlb_reserve_pages()
4427 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); in hugetlb_reserve_pages()
4468 if (!vma || vma->vm_flags & VM_MAYSHARE) { in hugetlb_reserve_pages()
4488 if (!vma || vma->vm_flags & VM_MAYSHARE) in hugetlb_reserve_pages()
4492 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) in hugetlb_reserve_pages()
4533 struct vm_area_struct *vma, in page_table_shareable() argument
4542 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK; in page_table_shareable()
4557 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr) in vma_shareable() argument
4565 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end)) in vma_shareable()
4575 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
4580 if (!(vma->vm_flags & VM_MAYSHARE)) in adjust_range_if_pmd_sharing_possible()
4590 if (range_in_vma(vma, a_start, a_end)) { in adjust_range_if_pmd_sharing_possible()
4610 struct vm_area_struct *vma = find_vma(mm, addr); in huge_pmd_share() local
4611 struct address_space *mapping = vma->vm_file->f_mapping; in huge_pmd_share()
4612 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + in huge_pmd_share()
4613 vma->vm_pgoff; in huge_pmd_share()
4620 if (!vma_shareable(vma, addr)) in huge_pmd_share()
4625 if (svma == vma) in huge_pmd_share()
4628 saddr = page_table_shareable(svma, vma, addr, idx); in huge_pmd_share()
4642 ptl = huge_pte_lock(hstate_vma(vma), mm, spte); in huge_pmd_share()
4697 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, in adjust_range_if_pmd_sharing_possible() argument
4789 follow_huge_pd(struct vm_area_struct *vma, in follow_huge_pd() argument