Searched refs:pvmw (Results 1 – 6 of 6) sorted by relevance
/mm/ |
D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw) in map_pte() argument 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 19 if (!(pvmw->flags & PVMW_SYNC)) { in map_pte() 20 if (pvmw->flags & PVMW_MIGRATION) { in map_pte() 21 if (!is_swap_pte(*pvmw->pte)) in map_pte() 39 if (is_swap_pte(*pvmw->pte)) { in map_pte() 43 entry = pte_to_swp_entry(*pvmw->pte); in map_pte() 46 } else if (!pte_present(*pvmw->pte)) in map_pte() [all …]
|
D | rmap.c | 758 struct page_vma_mapped_walk pvmw = { in page_referenced_one() local 765 while (page_vma_mapped_walk(&pvmw)) { in page_referenced_one() 766 address = pvmw.address; in page_referenced_one() 769 page_vma_mapped_walk_done(&pvmw); in page_referenced_one() 774 if (pvmw.pte) { in page_referenced_one() 776 pvmw.pte)) { in page_referenced_one() 790 pvmw.pmd)) in page_referenced_one() 887 struct page_vma_mapped_walk pvmw = { in page_mkclean_one() local 905 while (page_vma_mapped_walk(&pvmw)) { in page_mkclean_one() 908 address = pvmw.address; in page_mkclean_one() [all …]
|
D | page_idle.c | 58 struct page_vma_mapped_walk pvmw = { in page_idle_clear_pte_refs_one() local 65 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 66 addr = pvmw.address; in page_idle_clear_pte_refs_one() 67 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 72 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 75 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
D | ksm.c | 1036 struct page_vma_mapped_walk pvmw = { in write_protect_page() local 1044 pvmw.address = page_address_in_vma(page, vma); in write_protect_page() 1045 if (pvmw.address == -EFAULT) in write_protect_page() 1051 pvmw.address, in write_protect_page() 1052 pvmw.address + PAGE_SIZE); in write_protect_page() 1055 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1057 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1060 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1061 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || in write_protect_page() 1066 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page() [all …]
|
D | migrate.c | 207 struct page_vma_mapped_walk pvmw = { in remove_migration_pte() local 218 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 222 new = page - pvmw.page->index + in remove_migration_pte() 223 linear_page_index(vma, pvmw.address); in remove_migration_pte() 227 if (!pvmw.pte) { in remove_migration_pte() 229 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 236 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 242 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte() 257 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte() 259 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte() [all …]
|
D | huge_memory.c | 3029 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 3032 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 3034 unsigned long address = pvmw->address; in set_pmd_migration_entry() 3039 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 3043 pmdval = *pvmw->pmd; in set_pmd_migration_entry() 3044 pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 3051 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 3056 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 3058 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 3060 unsigned long address = pvmw->address; in remove_migration_pmd() [all …]
|