Lines Matching refs:vma
347 int hugepage_madvise(struct vm_area_struct *vma, in hugepage_madvise() argument
358 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
369 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise()
442 static bool hugepage_vma_check(struct vm_area_struct *vma, in hugepage_vma_check() argument
445 if (!transhuge_vma_enabled(vma, vm_flags)) in hugepage_vma_check()
448 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - in hugepage_vma_check()
449 vma->vm_pgoff, HPAGE_PMD_NR)) in hugepage_vma_check()
453 if (shmem_file(vma->vm_file)) in hugepage_vma_check()
454 return shmem_huge_enabled(vma); in hugepage_vma_check()
461 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file && in hugepage_vma_check()
463 struct inode *inode = vma->vm_file->f_inode; in hugepage_vma_check()
469 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
471 if (vma_is_temporary_stack(vma)) in hugepage_vma_check()
509 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
519 if (!hugepage_vma_check(vma, vm_flags)) in khugepaged_enter_vma_merge()
522 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
523 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
525 return khugepaged_enter(vma, vm_flags); in khugepaged_enter_vma_merge()
601 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument
616 if (!userfaultfd_armed(vma) && in __collapse_huge_page_isolate()
632 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
719 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
744 struct vm_area_struct *vma, in __collapse_huge_page_copy() argument
757 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
767 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
772 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
785 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
978 struct vm_area_struct *vma; in hugepage_vma_revalidate() local
984 *vmap = vma = find_vma(mm, address); in hugepage_vma_revalidate()
985 if (!vma) in hugepage_vma_revalidate()
988 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
989 hend = vma->vm_end & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
992 if (!hugepage_vma_check(vma, vma->vm_flags)) in hugepage_vma_revalidate()
995 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_revalidate()
1009 struct vm_area_struct *vma, in __collapse_huge_page_swapin() argument
1019 .vma = vma, in __collapse_huge_page_swapin()
1021 .pgoff = linear_page_index(vma, haddr), in __collapse_huge_page_swapin()
1038 if (hugepage_vma_revalidate(mm, haddr, &vma)) { in __collapse_huge_page_swapin()
1075 struct vm_area_struct *vma; in collapse_huge_page() local
1104 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
1122 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address, in collapse_huge_page()
1135 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
1142 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1160 _pmd = pmdp_collapse_flush(vma, address, pmd); in collapse_huge_page()
1166 isolated = __collapse_huge_page_isolate(vma, address, pte, in collapse_huge_page()
1181 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1190 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1192 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl, in collapse_huge_page()
1204 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
1205 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); in collapse_huge_page()
1209 page_add_new_anon_rmap(new_page, vma, address, true); in collapse_huge_page()
1210 lru_cache_add_inactive_or_unevictable(new_page, vma); in collapse_huge_page()
1213 update_mmu_cache_pmd(vma, address, pmd); in collapse_huge_page()
1230 struct vm_area_struct *vma, in khugepaged_scan_pmd() argument
1275 if (!userfaultfd_armed(vma) && in khugepaged_scan_pmd()
1299 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
1361 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
1443 struct vm_area_struct *vma = find_vma(mm, haddr); in collapse_pte_mapped_thp() local
1452 if (!vma || !vma->vm_file || in collapse_pte_mapped_thp()
1453 !range_in_vma(vma, haddr, haddr + HPAGE_PMD_SIZE)) in collapse_pte_mapped_thp()
1462 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE)) in collapse_pte_mapped_thp()
1465 hpage = find_lock_page(vma->vm_file->f_mapping, in collapse_pte_mapped_thp()
1466 linear_page_index(vma, haddr)); in collapse_pte_mapped_thp()
1482 i_mmap_lock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1505 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp()
1523 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp()
1532 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count); in collapse_pte_mapped_thp()
1537 if (vma->anon_vma) in collapse_pte_mapped_thp()
1538 anon_vma_lock_write(vma->anon_vma); in collapse_pte_mapped_thp()
1543 _pmd = pmdp_collapse_flush(vma, haddr, pmd); in collapse_pte_mapped_thp()
1549 if (vma->anon_vma) in collapse_pte_mapped_thp()
1550 anon_vma_unlock_write(vma->anon_vma); in collapse_pte_mapped_thp()
1551 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1560 i_mmap_unlock_write(vma->vm_file->f_mapping); in collapse_pte_mapped_thp()
1588 struct vm_area_struct *vma; in retract_page_tables() local
1594 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1612 if (vma->anon_vma) in retract_page_tables()
1614 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1617 if (vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables()
1619 mm = vma->vm_mm; in retract_page_tables()
1640 _pmd = pmdp_collapse_flush(vma, addr, pmd); in retract_page_tables()
2125 struct vm_area_struct *vma; in khugepaged_scan_mm_slot() local
2147 vma = NULL; in khugepaged_scan_mm_slot()
2151 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
2154 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
2162 if (!hugepage_vma_check(vma, vma->vm_flags)) { in khugepaged_scan_mm_slot()
2167 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2168 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
2176 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma)) in khugepaged_scan_mm_slot()
2188 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) { in khugepaged_scan_mm_slot()
2189 struct file *file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
2190 pgoff_t pgoff = linear_page_index(vma, in khugepaged_scan_mm_slot()
2198 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
2222 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()