• Home
  • Raw
  • Download

Lines Matching refs:vma

204 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,  in remove_migration_pte()  argument
209 .vma = vma, in remove_migration_pte()
223 linear_page_index(vma, pvmw.address); in remove_migration_pte()
235 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte()
244 pte = maybe_mkwrite(pte, vma); in remove_migration_pte()
256 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte()
257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
268 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte()
272 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) in remove_migration_pte()
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
347 void migration_entry_wait_huge(struct vm_area_struct *vma, in migration_entry_wait_huge() argument
350 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge()
1528 struct vm_area_struct *vma; in add_page_for_migration() local
1535 vma = find_vma(mm, addr); in add_page_for_migration()
1536 if (!vma || addr < vma->vm_start || !vma_migratable(vma)) in add_page_for_migration()
1541 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1701 struct vm_area_struct *vma; in do_pages_stat_array() local
1705 vma = find_vma(mm, addr); in do_pages_stat_array()
1706 if (!vma || addr < vma->vm_start) in do_pages_stat_array()
1710 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1948 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1961 (vma->vm_flags & VM_EXEC)) in migrate_misplaced_page()
2004 struct vm_area_struct *vma, in migrate_misplaced_transhuge_page() argument
2038 flush_cache_range(vma, start, start + HPAGE_PMD_SIZE); in migrate_misplaced_transhuge_page()
2065 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in migrate_misplaced_transhuge_page()
2066 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in migrate_misplaced_transhuge_page()
2076 page_add_anon_rmap(new_page, vma, start, true); in migrate_misplaced_transhuge_page()
2089 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
2119 entry = pmd_modify(entry, vma->vm_page_prot); in migrate_misplaced_transhuge_page()
2121 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
2173 struct vm_area_struct *vma = walk->vma; in migrate_vma_collect_pmd() local
2174 struct mm_struct *mm = vma->vm_mm; in migrate_vma_collect_pmd()
2195 split_huge_pmd(vma, pmdp, addr); in migrate_vma_collect_pmd()
2263 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2326 flush_tlb_range(walk->vma, start, end); in migrate_vma_collect_pmd()
2349 migrate->vma->vm_mm, migrate->start, migrate->end); in migrate_vma_collect()
2352 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
2511 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2648 if (!args->vma || is_vm_hugetlb_page(args->vma) || in migrate_vma_setup()
2649 (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma)) in migrate_vma_setup()
2653 if (args->start < args->vma->vm_start || in migrate_vma_setup()
2654 args->start >= args->vma->vm_end) in migrate_vma_setup()
2656 if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end) in migrate_vma_setup()
2688 struct vm_area_struct *vma = migrate->vma; in migrate_vma_insert_page() local
2689 struct mm_struct *mm = vma->vm_mm; in migrate_vma_insert_page()
2701 if (!vma_is_anonymous(vma)) in migrate_vma_insert_page()
2735 if (unlikely(anon_vma_prepare(vma))) in migrate_vma_insert_page()
2737 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page()
2751 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2755 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2756 if (vma->vm_flags & VM_WRITE) in migrate_vma_insert_page()
2781 if (userfaultfd_missing(vma)) { in migrate_vma_insert_page()
2788 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2791 lru_cache_add_active_or_unevictable(page, vma); in migrate_vma_insert_page()
2795 flush_cache_page(vma, addr, pte_pfn(*ptep)); in migrate_vma_insert_page()
2796 ptep_clear_flush_notify(vma, addr, ptep); in migrate_vma_insert_page()
2798 update_mmu_cache(vma, addr, ptep); in migrate_vma_insert_page()
2802 update_mmu_cache(vma, addr, ptep); in migrate_vma_insert_page()
2850 migrate->vma->vm_mm, in migrate_vma_pages()