Lines Matching refs:pmd
606 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, in smaps_pmd_entry() argument
615 if (pmd_present(*pmd)) { in smaps_pmd_entry()
617 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); in smaps_pmd_entry()
618 } else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) { in smaps_pmd_entry()
619 swp_entry_t entry = pmd_to_swp_entry(*pmd); in smaps_pmd_entry()
637 smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), in smaps_pmd_entry()
641 static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, in smaps_pmd_entry() argument
647 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in smaps_pte_range() argument
654 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range()
656 smaps_pmd_entry(pmd, addr, walk); in smaps_pte_range()
661 if (pmd_trans_unstable(pmd)) in smaps_pte_range()
668 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
1173 pmd_t old, pmd = *pmdp; in clear_soft_dirty_pmd() local
1175 if (pmd_present(pmd)) { in clear_soft_dirty_pmd()
1179 pmd = pmd_mkdirty(pmd); in clear_soft_dirty_pmd()
1181 pmd = pmd_mkyoung(pmd); in clear_soft_dirty_pmd()
1183 pmd = pmd_wrprotect(pmd); in clear_soft_dirty_pmd()
1184 pmd = pmd_clear_soft_dirty(pmd); in clear_soft_dirty_pmd()
1186 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1187 } else if (is_migration_entry(pmd_to_swp_entry(pmd))) { in clear_soft_dirty_pmd()
1188 pmd = pmd_swp_clear_soft_dirty(pmd); in clear_soft_dirty_pmd()
1189 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1199 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, in clear_refs_pte_range() argument
1208 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range()
1211 clear_soft_dirty_pmd(vma, addr, pmd); in clear_refs_pte_range()
1215 if (!pmd_present(*pmd)) in clear_refs_pte_range()
1218 page = pmd_page(*pmd); in clear_refs_pte_range()
1221 pmdp_test_and_clear_young(vma, addr, pmd); in clear_refs_pte_range()
1229 if (pmd_trans_unstable(pmd)) in clear_refs_pte_range()
1232 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1503 pmd_t pmd = *pmdp; in pagemap_pmd_range() local
1509 if (pmd_present(pmd)) { in pagemap_pmd_range()
1510 page = pmd_page(pmd); in pagemap_pmd_range()
1513 if (pmd_soft_dirty(pmd)) in pagemap_pmd_range()
1516 frame = pmd_pfn(pmd) + in pagemap_pmd_range()
1520 else if (is_swap_pmd(pmd)) { in pagemap_pmd_range()
1521 swp_entry_t entry = pmd_to_swp_entry(pmd); in pagemap_pmd_range()
1531 if (pmd_swp_soft_dirty(pmd)) in pagemap_pmd_range()
1533 VM_BUG_ON(!is_pmd_migration_entry(pmd)); in pagemap_pmd_range()
1850 static struct page *can_gather_numa_stats_pmd(pmd_t pmd, in can_gather_numa_stats_pmd() argument
1857 if (!pmd_present(pmd)) in can_gather_numa_stats_pmd()
1860 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1875 static int gather_pte_stats(pmd_t *pmd, unsigned long addr, in gather_pte_stats() argument
1885 ptl = pmd_trans_huge_lock(pmd, vma); in gather_pte_stats()
1889 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1891 gather_stats(page, md, pmd_dirty(*pmd), in gather_pte_stats()
1897 if (pmd_trans_unstable(pmd)) in gather_pte_stats()
1900 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats()