Lines Matching refs:vma
470 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument
472 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pmd_mkwrite()
553 struct vm_area_struct *vma = vmf->vma; in __do_huge_pmd_anonymous_page() local
561 if (mem_cgroup_try_charge(page, vma->vm_mm, gfp | __GFP_NORETRY, &memcg, in __do_huge_pmd_anonymous_page()
568 pgtable = pte_alloc_one(vma->vm_mm, haddr); in __do_huge_pmd_anonymous_page()
582 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
588 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
593 if (userfaultfd_missing(vma)) { in __do_huge_pmd_anonymous_page()
599 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
605 entry = mk_huge_pmd(page, vma->vm_page_prot); in __do_huge_pmd_anonymous_page()
606 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in __do_huge_pmd_anonymous_page()
607 page_add_new_anon_rmap(page, vma, haddr, true); in __do_huge_pmd_anonymous_page()
609 lru_cache_add_active_or_unevictable(page, vma); in __do_huge_pmd_anonymous_page()
610 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
611 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
612 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
613 atomic_long_inc(&vma->vm_mm->nr_ptes); in __do_huge_pmd_anonymous_page()
623 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
639 static inline gfp_t alloc_hugepage_direct_gfpmask(struct vm_area_struct *vma) in alloc_hugepage_direct_gfpmask() argument
641 const bool vma_madvised = !!(vma->vm_flags & VM_HUGEPAGE); in alloc_hugepage_direct_gfpmask()
658 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument
664 entry = mk_pmd(zero_page, vma->vm_page_prot); in set_huge_zero_page()
675 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_anonymous_page() local
680 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) in do_huge_pmd_anonymous_page()
682 if (unlikely(anon_vma_prepare(vma))) in do_huge_pmd_anonymous_page()
684 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) in do_huge_pmd_anonymous_page()
687 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
693 pgtable = pte_alloc_one(vma->vm_mm, haddr); in do_huge_pmd_anonymous_page()
696 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
698 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
702 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
706 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
709 } else if (userfaultfd_missing(vma)) { in do_huge_pmd_anonymous_page()
714 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
722 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
725 gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_anonymous_page()
726 page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_anonymous_page()
735 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pmd() argument
739 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
749 entry = maybe_pmd_mkwrite(entry, vma); in insert_pfn_pmd()
758 update_mmu_cache_pmd(vma, addr, pmd); in insert_pfn_pmd()
762 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_pmd() argument
765 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pmd()
772 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_pmd()
773 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pmd()
775 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pmd()
778 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pmd()
782 pgtable = pte_alloc_one(vma->vm_mm, addr); in vmf_insert_pfn_pmd()
787 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pmd()
789 insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write, pgtable); in vmf_insert_pfn_pmd()
795 static pud_t maybe_pud_mkwrite(pud_t pud, struct vm_area_struct *vma) in maybe_pud_mkwrite() argument
797 if (likely(vma->vm_flags & VM_WRITE)) in maybe_pud_mkwrite()
802 static void insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in insert_pfn_pud() argument
805 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
815 entry = maybe_pud_mkwrite(entry, vma); in insert_pfn_pud()
818 update_mmu_cache_pud(vma, addr, pud); in insert_pfn_pud()
822 int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr, in vmf_insert_pfn_pud() argument
825 pgprot_t pgprot = vma->vm_page_prot; in vmf_insert_pfn_pud()
831 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))); in vmf_insert_pfn_pud()
832 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) == in vmf_insert_pfn_pud()
834 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags)); in vmf_insert_pfn_pud()
837 if (addr < vma->vm_start || addr >= vma->vm_end) in vmf_insert_pfn_pud()
840 track_pfn_insert(vma, &pgprot, pfn); in vmf_insert_pfn_pud()
842 insert_pfn_pud(vma, addr, pud, pfn, pgprot, write); in vmf_insert_pfn_pud()
848 static void touch_pmd(struct vm_area_struct *vma, unsigned long addr, in touch_pmd() argument
856 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK, in touch_pmd()
858 update_mmu_cache_pmd(vma, addr, pmd); in touch_pmd()
861 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pmd() argument
865 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
886 touch_pmd(vma, addr, pmd, flags); in follow_devmap_pmd()
908 struct vm_area_struct *vma) in copy_huge_pmd() argument
917 if (!vma_is_anonymous(vma)) in copy_huge_pmd()
969 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd, in copy_huge_pmd()
996 static void touch_pud(struct vm_area_struct *vma, unsigned long addr, in touch_pud() argument
1004 if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK, in touch_pud()
1006 update_mmu_cache_pud(vma, addr, pud); in touch_pud()
1009 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, in follow_devmap_pud() argument
1013 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1028 touch_pud(vma, addr, pud, flags); in follow_devmap_pud()
1050 struct vm_area_struct *vma) in copy_huge_pud() argument
1091 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1099 if (pudp_set_access_flags(vmf->vma, haddr, vmf->pud, entry, write)) in huge_pud_set_accessed()
1100 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud); in huge_pud_set_accessed()
1113 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1121 if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write)) in huge_pmd_set_accessed()
1122 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd); in huge_pmd_set_accessed()
1131 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page_fallback() local
1149 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma, in do_huge_pmd_wp_page_fallback()
1152 mem_cgroup_try_charge(pages[i], vma->vm_mm, in do_huge_pmd_wp_page_fallback()
1172 haddr + PAGE_SIZE * i, vma); in do_huge_pmd_wp_page_fallback()
1179 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1181 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1186 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1189 pgtable = pgtable_trans_huge_withdraw(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page_fallback()
1190 pmd_populate(vma->vm_mm, &_pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1194 entry = mk_pte(pages[i], vma->vm_page_prot); in do_huge_pmd_wp_page_fallback()
1195 entry = maybe_mkwrite(pte_mkdirty(entry), vma); in do_huge_pmd_wp_page_fallback()
1198 page_add_new_anon_rmap(pages[i], vmf->vma, haddr, false); in do_huge_pmd_wp_page_fallback()
1200 lru_cache_add_active_or_unevictable(pages[i], vma); in do_huge_pmd_wp_page_fallback()
1203 set_pte_at(vma->vm_mm, haddr, vmf->pte, entry); in do_huge_pmd_wp_page_fallback()
1209 pmd_populate(vma->vm_mm, vmf->pmd, pgtable); in do_huge_pmd_wp_page_fallback()
1213 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1223 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in do_huge_pmd_wp_page_fallback()
1236 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_wp_page() local
1245 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1246 VM_BUG_ON_VMA(!vma->anon_vma, vma); in do_huge_pmd_wp_page()
1274 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1275 if (pmdp_set_access_flags(vma, haddr, vmf->pmd, entry, 1)) in do_huge_pmd_wp_page()
1276 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1285 if (transparent_hugepage_enabled(vma) && in do_huge_pmd_wp_page()
1287 huge_gfp = alloc_hugepage_direct_gfpmask(vma); in do_huge_pmd_wp_page()
1288 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER); in do_huge_pmd_wp_page()
1296 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1301 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1310 if (unlikely(mem_cgroup_try_charge(new_page, vma->vm_mm, in do_huge_pmd_wp_page()
1313 split_huge_pmd(vma, vmf->pmd, vmf->address); in do_huge_pmd_wp_page()
1326 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1331 mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1343 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in do_huge_pmd_wp_page()
1344 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in do_huge_pmd_wp_page()
1345 pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); in do_huge_pmd_wp_page()
1346 page_add_new_anon_rmap(new_page, vma, haddr, true); in do_huge_pmd_wp_page()
1348 lru_cache_add_active_or_unevictable(new_page, vma); in do_huge_pmd_wp_page()
1349 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in do_huge_pmd_wp_page()
1350 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_wp_page()
1352 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in do_huge_pmd_wp_page()
1362 mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); in do_huge_pmd_wp_page()
1380 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, in follow_trans_huge_pmd() argument
1385 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1404 touch_pmd(vma, addr, pmd, flags); in follow_trans_huge_pmd()
1405 if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) { in follow_trans_huge_pmd()
1451 struct vm_area_struct *vma = vmf->vma; in do_huge_pmd_numa_page() local
1462 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1500 target_nid = mpol_misplaced(page, vma, haddr); in do_huge_pmd_numa_page()
1553 if (mm_tlb_flush_pending(vma->vm_mm)) in do_huge_pmd_numa_page()
1554 flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE); in do_huge_pmd_numa_page()
1562 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, in do_huge_pmd_numa_page()
1574 pmd = pmd_modify(pmd, vma->vm_page_prot); in do_huge_pmd_numa_page()
1578 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1579 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd); in do_huge_pmd_numa_page()
1599 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in madvise_free_huge_pmd() argument
1610 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_free_huge_pmd()
1653 pmdp_invalidate(vma, addr, pmd); in madvise_free_huge_pmd()
1678 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pmd() argument
1686 ptl = __pmd_trans_huge_lock(pmd, vma); in zap_huge_pmd()
1698 if (vma_is_dax(vma)) { in zap_huge_pmd()
1746 struct vm_area_struct *vma) in pmd_move_must_withdraw() argument
1754 return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); in pmd_move_must_withdraw()
1769 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, in move_huge_pmd() argument
1775 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1796 old_ptl = __pmd_trans_huge_lock(old_pmd, vma); in move_huge_pmd()
1806 if (pmd_move_must_withdraw(new_ptl, old_ptl, vma)) { in move_huge_pmd()
1814 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); in move_huge_pmd()
1829 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in change_huge_pmd() argument
1832 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1838 ptl = __pmd_trans_huge_lock(pmd, vma); in change_huge_pmd()
1899 pmdp_invalidate(vma, addr, pmd); in change_huge_pmd()
1915 BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry)); in change_huge_pmd()
1927 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) in __pmd_trans_huge_lock() argument
1930 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1944 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) in __pud_trans_huge_lock() argument
1948 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
1956 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, in zap_huge_pud() argument
1962 ptl = __pud_trans_huge_lock(pud, vma); in zap_huge_pud()
1974 if (vma_is_dax(vma)) { in zap_huge_pud()
1984 static void __split_huge_pud_locked(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud_locked() argument
1988 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pud_locked()
1989 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PUD_SIZE, vma); in __split_huge_pud_locked()
1994 pudp_huge_clear_flush_notify(vma, haddr, pud); in __split_huge_pud_locked()
1997 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, in __split_huge_pud() argument
2001 struct mm_struct *mm = vma->vm_mm; in __split_huge_pud()
2008 __split_huge_pud_locked(vma, pud, haddr); in __split_huge_pud()
2016 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, in __split_huge_zero_page_pmd() argument
2019 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2025 pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_zero_page_pmd()
2032 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); in __split_huge_zero_page_pmd()
2043 static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd_locked() argument
2046 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2055 VM_BUG_ON_VMA(vma->vm_start > haddr, vma); in __split_huge_pmd_locked()
2056 VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma); in __split_huge_pmd_locked()
2062 if (!vma_is_anonymous(vma)) { in __split_huge_pmd_locked()
2063 _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd); in __split_huge_pmd_locked()
2070 if (vma_is_dax(vma)) in __split_huge_pmd_locked()
2082 return __split_huge_zero_page_pmd(vma, haddr, pmd); in __split_huge_pmd_locked()
2102 pmdp_huge_split_prepare(vma, haddr, pmd); in __split_huge_pmd_locked()
2120 entry = mk_pte(page + i, READ_ONCE(vma->vm_page_prot)); in __split_huge_pmd_locked()
2121 entry = maybe_mkwrite(entry, vma); in __split_huge_pmd_locked()
2179 pmdp_invalidate(vma, haddr, pmd); in __split_huge_pmd_locked()
2190 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, in __split_huge_pmd() argument
2194 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd()
2214 __split_huge_pmd_locked(vma, pmd, haddr, freeze); in __split_huge_pmd()
2220 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, in split_huge_pmd_address() argument
2228 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address()
2242 __split_huge_pmd(vma, pmd, address, freeze, page); in split_huge_pmd_address()
2245 void vma_adjust_trans_huge(struct vm_area_struct *vma, in vma_adjust_trans_huge() argument
2256 (start & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
2257 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
2258 split_huge_pmd_address(vma, start, false, NULL); in vma_adjust_trans_huge()
2266 (end & HPAGE_PMD_MASK) >= vma->vm_start && in vma_adjust_trans_huge()
2267 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) in vma_adjust_trans_huge()
2268 split_huge_pmd_address(vma, end, false, NULL); in vma_adjust_trans_huge()
2276 struct vm_area_struct *next = vma->vm_next; in vma_adjust_trans_huge()
2842 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() local
2843 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
2852 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); in set_pmd_migration_entry()
2854 pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
2868 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() local
2869 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()
2880 pmde = pmd_mkold(mk_huge_pmd(new, vma->vm_page_prot)); in remove_migration_pmd()
2884 pmde = maybe_pmd_mkwrite(pmde, vma); in remove_migration_pmd()
2886 flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); in remove_migration_pmd()
2887 page_add_anon_rmap(new, vma, mmun_start, true); in remove_migration_pmd()
2889 if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new)) in remove_migration_pmd()
2891 update_mmu_cache_pmd(vma, address, pvmw->pmd); in remove_migration_pmd()