• Home
  • Raw
  • Download

Lines Matching refs:vma

306 int hugepage_madvise(struct vm_area_struct *vma,  in hugepage_madvise()  argument
317 if (mm_has_pgste(vma->vm_mm)) in hugepage_madvise()
328 khugepaged_enter_vma_merge(vma, *vm_flags)) in hugepage_madvise()
433 int khugepaged_enter_vma_merge(struct vm_area_struct *vma, in khugepaged_enter_vma_merge() argument
437 if (!vma->anon_vma) in khugepaged_enter_vma_merge()
443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED)) in khugepaged_enter_vma_merge()
446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
447 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_enter_vma_merge()
449 return khugepaged_enter(vma, vm_flags); in khugepaged_enter_vma_merge()
501 static int __collapse_huge_page_isolate(struct vm_area_struct *vma, in __collapse_huge_page_isolate() argument
515 if (!userfaultfd_armed(vma) && in __collapse_huge_page_isolate()
527 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
594 mmu_notifier_test_young(vma->vm_mm, address)) in __collapse_huge_page_isolate()
616 struct vm_area_struct *vma, in __collapse_huge_page_copy() argument
628 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); in __collapse_huge_page_copy()
638 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
643 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
656 pte_clear(vma->vm_mm, address, _pte); in __collapse_huge_page_copy()
822 static bool hugepage_vma_check(struct vm_area_struct *vma) in hugepage_vma_check() argument
824 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || in hugepage_vma_check()
825 (vma->vm_flags & VM_NOHUGEPAGE) || in hugepage_vma_check()
826 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check()
828 if (shmem_file(vma->vm_file)) { in hugepage_vma_check()
831 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff, in hugepage_vma_check()
834 if (!vma->anon_vma || vma->vm_ops) in hugepage_vma_check()
836 if (is_vma_temporary_stack(vma)) in hugepage_vma_check()
838 return !(vma->vm_flags & VM_NO_KHUGEPAGED); in hugepage_vma_check()
851 struct vm_area_struct *vma; in hugepage_vma_revalidate() local
857 *vmap = vma = find_vma(mm, address); in hugepage_vma_revalidate()
858 if (!vma) in hugepage_vma_revalidate()
861 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
862 hend = vma->vm_end & HPAGE_PMD_MASK; in hugepage_vma_revalidate()
865 if (!hugepage_vma_check(vma)) in hugepage_vma_revalidate()
879 struct vm_area_struct *vma, in __collapse_huge_page_swapin() argument
885 .vma = vma, in __collapse_huge_page_swapin()
889 .pgoff = linear_page_index(vma, address), in __collapse_huge_page_swapin()
909 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) { in __collapse_huge_page_swapin()
945 struct vm_area_struct *vma; in collapse_huge_page() local
976 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
996 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { in collapse_huge_page()
1012 result = hugepage_vma_revalidate(mm, address, &vma); in collapse_huge_page()
1019 anon_vma_lock_write(vma->anon_vma); in collapse_huge_page()
1034 _pmd = pmdp_collapse_flush(vma, address, pmd); in collapse_huge_page()
1039 isolated = __collapse_huge_page_isolate(vma, address, pte); in collapse_huge_page()
1053 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1062 anon_vma_unlock_write(vma->anon_vma); in collapse_huge_page()
1064 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl); in collapse_huge_page()
1069 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot); in collapse_huge_page()
1070 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma); in collapse_huge_page()
1081 page_add_new_anon_rmap(new_page, vma, address, true); in collapse_huge_page()
1083 lru_cache_add_active_or_unevictable(new_page, vma); in collapse_huge_page()
1086 update_mmu_cache_pmd(vma, address, pmd); in collapse_huge_page()
1104 struct vm_area_struct *vma, in khugepaged_scan_pmd() argument
1139 if (!userfaultfd_armed(vma) && in khugepaged_scan_pmd()
1154 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
1202 mmu_notifier_test_young(vma->vm_mm, address)) in khugepaged_scan_pmd()
1254 struct vm_area_struct *vma; in retract_page_tables() local
1259 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { in retract_page_tables()
1261 if (vma->anon_vma) in retract_page_tables()
1263 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); in retract_page_tables()
1266 if (vma->vm_end < addr + HPAGE_PMD_SIZE) in retract_page_tables()
1268 pmd = mm_find_pmd(vma->vm_mm, addr); in retract_page_tables()
1277 if (down_write_trylock(&vma->vm_mm->mmap_sem)) { in retract_page_tables()
1278 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); in retract_page_tables()
1280 _pmd = pmdp_collapse_flush(vma, addr, pmd); in retract_page_tables()
1282 up_write(&vma->vm_mm->mmap_sem); in retract_page_tables()
1283 atomic_long_dec(&vma->vm_mm->nr_ptes); in retract_page_tables()
1284 pte_free(vma->vm_mm, pmd_pgtable(_pmd)); in retract_page_tables()
1689 struct vm_area_struct *vma; in khugepaged_scan_mm_slot() local
1710 vma = NULL; in khugepaged_scan_mm_slot()
1714 vma = find_vma(mm, khugepaged_scan.address); in khugepaged_scan_mm_slot()
1717 for (; vma; vma = vma->vm_next) { in khugepaged_scan_mm_slot()
1725 if (!hugepage_vma_check(vma)) { in khugepaged_scan_mm_slot()
1730 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
1731 hend = vma->vm_end & HPAGE_PMD_MASK; in khugepaged_scan_mm_slot()
1749 if (shmem_file(vma->vm_file)) { in khugepaged_scan_mm_slot()
1751 pgoff_t pgoff = linear_page_index(vma, in khugepaged_scan_mm_slot()
1753 if (!shmem_huge_enabled(vma)) in khugepaged_scan_mm_slot()
1755 file = get_file(vma->vm_file); in khugepaged_scan_mm_slot()
1762 ret = khugepaged_scan_pmd(mm, vma, in khugepaged_scan_mm_slot()
1786 if (khugepaged_test_exit(mm) || !vma) { in khugepaged_scan_mm_slot()