Lines Matching refs:vm_mm
591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page()
599 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page()
613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page()
619 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page()
629 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page()
640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page()
641 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page()
642 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page()
645 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLOC); in __do_huge_pmd_anonymous_page()
653 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page()
725 !mm_forbids_zeropage(vma->vm_mm) && in do_huge_pmd_anonymous_page()
730 pgtable = pte_alloc_one(vma->vm_mm); in do_huge_pmd_anonymous_page()
733 zero_page = mm_get_huge_zero_page(vma->vm_mm); in do_huge_pmd_anonymous_page()
735 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
739 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page()
742 ret = check_stable_address_space(vma->vm_mm); in do_huge_pmd_anonymous_page()
745 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
748 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
752 set_huge_zero_page(pgtable, vma->vm_mm, vma, in do_huge_pmd_anonymous_page()
758 pte_free(vma->vm_mm, pgtable); in do_huge_pmd_anonymous_page()
776 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pmd()
854 pgtable = pte_alloc_one(vma->vm_mm); in vmf_insert_pfn_pmd_prot()
877 struct mm_struct *mm = vma->vm_mm; in insert_pfn_pud()
968 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pmd()
1140 struct mm_struct *mm = vma->vm_mm; in follow_devmap_pud()
1234 vmf->ptl = pud_lock(vmf->vma->vm_mm, vmf->pud); in huge_pud_set_accessed()
1256 vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd); in huge_pmd_set_accessed()
1277 vmf->ptl = pmd_lockptr(vma->vm_mm, vmf->pmd); in do_huge_pmd_wp_page()
1345 struct mm_struct *mm = vma->vm_mm; in follow_trans_huge_pmd()
1424 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_numa_page()
1513 if (mm_tlb_flush_pending(vma->vm_mm)) { in do_huge_pmd_numa_page()
1524 mmu_notifier_invalidate_range(vma->vm_mm, haddr, in do_huge_pmd_numa_page()
1534 migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma, in do_huge_pmd_numa_page()
1550 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, pmd); in do_huge_pmd_numa_page()
1746 struct mm_struct *mm = vma->vm_mm; in move_huge_pmd()
1798 struct mm_struct *mm = vma->vm_mm; in change_huge_pmd()
1902 ptl = pmd_lock(vma->vm_mm, pmd); in __pmd_trans_huge_lock()
1920 ptl = pud_lock(vma->vm_mm, pud); in __pud_trans_huge_lock()
1973 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pud()
1977 ptl = pud_lock(vma->vm_mm, pud); in __split_huge_pud()
1995 struct mm_struct *mm = vma->vm_mm; in __split_huge_zero_page_pmd()
2031 struct mm_struct *mm = vma->vm_mm; in __split_huge_pmd_locked()
2230 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, in __split_huge_pmd()
2234 ptl = pmd_lock(vma->vm_mm, pmd); in __split_huge_pmd()
2309 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address()
2974 struct mm_struct *mm = vma->vm_mm; in set_pmd_migration_entry()
2999 struct mm_struct *mm = vma->vm_mm; in remove_migration_pmd()