• Home
  • Raw
  • Download

Lines Matching refs:vma

202 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,  in remove_migration_pte()  argument
207 .vma = vma, in remove_migration_pte()
221 linear_page_index(vma, pvmw.address); in remove_migration_pte()
233 pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot))); in remove_migration_pte()
242 pte = maybe_mkwrite(pte, vma); in remove_migration_pte()
256 pte = arch_make_huge_pte(pte, vma, new, 0); in remove_migration_pte()
257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
265 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
268 page_add_anon_rmap(new, vma, pvmw.address, false); in remove_migration_pte()
272 if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new)) in remove_migration_pte()
279 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
350 void migration_entry_wait_huge(struct vm_area_struct *vma, in migration_entry_wait_huge() argument
353 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge()
1532 struct vm_area_struct *vma; in do_move_page_to_node_array() local
1538 vma = find_vma(mm, pp->addr); in do_move_page_to_node_array()
1539 if (!vma || pp->addr < vma->vm_start || !vma_migratable(vma)) in do_move_page_to_node_array()
1546 page = follow_page(vma, pp->addr, follflags); in do_move_page_to_node_array()
1709 struct vm_area_struct *vma; in do_pages_stat_array() local
1713 vma = find_vma(mm, addr); in do_pages_stat_array()
1714 if (!vma || addr < vma->vm_start) in do_pages_stat_array()
1718 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1967 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1980 (vma->vm_flags & VM_EXEC)) in migrate_misplaced_page()
2024 struct vm_area_struct *vma, in migrate_misplaced_transhuge_page() argument
2094 entry = mk_huge_pmd(new_page, vma->vm_page_prot); in migrate_misplaced_transhuge_page()
2095 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); in migrate_misplaced_transhuge_page()
2104 flush_cache_range(vma, mmun_start, mmun_end); in migrate_misplaced_transhuge_page()
2105 page_add_anon_rmap(new_page, vma, mmun_start, true); in migrate_misplaced_transhuge_page()
2106 pmdp_huge_clear_flush_notify(vma, mmun_start, pmd); in migrate_misplaced_transhuge_page()
2108 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
2140 entry = pmd_modify(entry, vma->vm_page_prot); in migrate_misplaced_transhuge_page()
2142 update_mmu_cache_pmd(vma, address, &entry); in migrate_misplaced_transhuge_page()
2157 struct vm_area_struct *vma; member
2204 struct vm_area_struct *vma = walk->vma; in migrate_vma_collect_pmd() local
2205 struct mm_struct *mm = vma->vm_mm; in migrate_vma_collect_pmd()
2226 split_huge_pmd(vma, pmdp, addr); in migrate_vma_collect_pmd()
2296 page = _vm_normal_page(migrate->vma, addr, pte, true); in migrate_vma_collect_pmd()
2359 flush_tlb_range(walk->vma, start, end); in migrate_vma_collect_pmd()
2381 mm_walk.vma = migrate->vma; in migrate_vma_collect()
2382 mm_walk.mm = migrate->vma->vm_mm; in migrate_vma_collect()
2557 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2630 struct vm_area_struct *vma = migrate->vma; in migrate_vma_insert_page() local
2631 struct mm_struct *mm = vma->vm_mm; in migrate_vma_insert_page()
2643 if (!vma_is_anonymous(vma)) in migrate_vma_insert_page()
2677 if (unlikely(anon_vma_prepare(vma))) in migrate_vma_insert_page()
2679 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page()
2693 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2696 entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in migrate_vma_insert_page()
2697 if (vma->vm_flags & VM_WRITE) in migrate_vma_insert_page()
2702 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2703 if (vma->vm_flags & VM_WRITE) in migrate_vma_insert_page()
2728 if (userfaultfd_missing(vma)) { in migrate_vma_insert_page()
2735 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2738 lru_cache_add_active_or_unevictable(page, vma); in migrate_vma_insert_page()
2742 flush_cache_page(vma, addr, pte_pfn(*ptep)); in migrate_vma_insert_page()
2743 ptep_clear_flush_notify(vma, addr, ptep); in migrate_vma_insert_page()
2745 update_mmu_cache(vma, addr, ptep); in migrate_vma_insert_page()
2749 update_mmu_cache(vma, addr, ptep); in migrate_vma_insert_page()
2772 struct vm_area_struct *vma = migrate->vma; in migrate_vma_pages() local
2773 struct mm_struct *mm = vma->vm_mm; in migrate_vma_pages()
2944 struct vm_area_struct *vma, in migrate_vma() argument
2956 if (!vma || is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_SPECIAL)) in migrate_vma()
2958 if (start < vma->vm_start || start >= vma->vm_end) in migrate_vma()
2960 if (end <= vma->vm_start || end > vma->vm_end) in migrate_vma()
2972 migrate.vma = vma; in migrate_vma()
2997 ops->alloc_and_copy(vma, src, dst, start, end, private); in migrate_vma()
3002 ops->finalize_and_map(vma, src, dst, start, end, private); in migrate_vma()