/mm/ |
D | huge_memory.c | 487 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 490 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite() 491 return pmd; in maybe_pmd_mkwrite() 609 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 610 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page() 637 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 638 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 695 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument 699 if (!pmd_none(*pmd)) in set_huge_zero_page() 704 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page() [all …]
|
D | memory.c | 226 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument 229 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range() 230 pmd_clear(pmd); in free_pte_range() 239 pmd_t *pmd; in free_pmd_range() local 244 pmd = pmd_offset(pud, addr); in free_pmd_range() 247 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range() 249 free_pte_range(tlb, pmd, addr); in free_pmd_range() 250 } while (pmd++, addr = next, addr != end); in free_pmd_range() 263 pmd = pmd_offset(pud, start); in free_pmd_range() 265 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() [all …]
|
D | mprotect.c | 38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 53 if (pmd_trans_unstable(pmd)) in change_pte_range() 61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 168 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) in pmd_none_or_clear_bad_unless_trans_huge() argument 170 pmd_t pmdval = pmd_read_atomic(pmd); in pmd_none_or_clear_bad_unless_trans_huge() 182 pmd_clear_bad(pmd); in pmd_none_or_clear_bad_unless_trans_huge() 193 pmd_t *pmd; in change_pmd_range() local 201 pmd = pmd_offset(pud, addr); in change_pmd_range() 215 if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && in change_pmd_range() 216 pmd_none_or_clear_bad_unless_trans_huge(pmd)) in change_pmd_range() [all …]
|
D | gup.c | 183 unsigned long address, pmd_t *pmd, unsigned int flags, in follow_page_pte() argument 203 if (unlikely(pmd_bad(*pmd))) in follow_page_pte() 206 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 223 migration_entry_wait(mm, pmd, address); in follow_page_pte() 333 pmd_t *pmd, pmdval; in follow_pmd_mask() local 338 pmd = pmd_offset(pudp, address); in follow_pmd_mask() 343 pmdval = READ_ONCE(*pmd); in follow_pmd_mask() 367 pmd_migration_entry_wait(mm, pmd); in follow_pmd_mask() 368 pmdval = READ_ONCE(*pmd); in follow_pmd_mask() 378 ptl = pmd_lock(mm, pmd); in follow_pmd_mask() [all …]
|
D | sparse-vmemmap.c | 143 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument 145 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() 170 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() local 171 if (pmd_none(*pmd)) { in vmemmap_pmd_populate() 175 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate() 177 return pmd; in vmemmap_pmd_populate() 223 pmd_t *pmd; in vmemmap_populate_basepages() local 236 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages() 237 if (!pmd) in vmemmap_populate_basepages() 239 pte = vmemmap_pte_populate(pmd, addr, node); in vmemmap_populate_basepages()
|
D | page_vma_mapped.c | 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 156 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 205 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() 211 pmde = READ_ONCE(*pvmw->pmd); in page_vma_mapped_walk() 214 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 215 pmde = *pvmw->pmd; in page_vma_mapped_walk() 246 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 275 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); in page_vma_mapped_walk() 281 pvmw->ptl = pte_lockptr(mm, pvmw->pmd); in page_vma_mapped_walk()
|
D | pgtable-generic.c | 39 void pmd_clear_bad(pmd_t *pmd) in pmd_clear_bad() argument 41 pmd_ERROR(*pmd); in pmd_clear_bad() 42 pmd_clear(pmd); in pmd_clear_bad() 127 pmd_t pmd; in pmdp_huge_clear_flush() local 131 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 133 return pmd; in pmdp_huge_clear_flush() 202 pmd_t pmd; in pmdp_collapse_flush() local 206 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush() 210 return pmd; in pmdp_collapse_flush()
|
D | madvise.c | 183 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument 190 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in swapin_walk_pmd_entry() 199 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 300 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, in madvise_cold_or_pageout_pte_range() argument 318 if (pmd_trans_huge(*pmd)) { in madvise_cold_or_pageout_pte_range() 323 ptl = pmd_trans_huge_lock(pmd, vma); in madvise_cold_or_pageout_pte_range() 327 orig_pmd = *pmd; in madvise_cold_or_pageout_pte_range() 358 pmdp_invalidate(vma, addr, pmd); in madvise_cold_or_pageout_pte_range() 361 set_pmd_at(mm, addr, pmd, orig_pmd); in madvise_cold_or_pageout_pte_range() 362 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); in madvise_cold_or_pageout_pte_range() [all …]
|
D | khugepaged.c | 910 unsigned long address, pmd_t *pmd, in __collapse_huge_page_swapin() argument 919 .pmd = pmd, in __collapse_huge_page_swapin() 928 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin() 946 if (mm_find_pmd(mm, address) != pmd) { in __collapse_huge_page_swapin() 956 vmf.pte = pte_offset_map(pmd, vmf.address); in __collapse_huge_page_swapin() 969 pmd_t *pmd, _pmd; in collapse_huge_page() local 1011 pmd = mm_find_pmd(mm, address); in collapse_huge_page() 1012 if (!pmd) { in collapse_huge_page() 1024 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { in collapse_huge_page() 1041 if (mm_find_pmd(mm, address) != pmd) in collapse_huge_page() [all …]
|
D | pagewalk.c | 7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 14 pte = pte_offset_map(pmd, addr); in walk_pte_range() 32 pmd_t *pmd; in walk_pmd_range() local 37 pmd = pmd_offset(pud, addr); in walk_pmd_range() 41 if (pmd_none(*pmd)) { in walk_pmd_range() 53 err = ops->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 64 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 65 if (pmd_trans_unstable(pmd)) in walk_pmd_range() 67 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() 70 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
|
D | hmm.c | 395 static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd) in pmd_to_hmm_pfn_flags() argument 397 if (pmd_protnone(pmd)) in pmd_to_hmm_pfn_flags() 399 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] | in pmd_to_hmm_pfn_flags() 406 unsigned long end, uint64_t *pfns, pmd_t pmd) in hmm_vma_handle_pmd() argument 415 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd); in hmm_vma_handle_pmd() 419 if (pmd_protnone(pmd) || fault || write_fault) in hmm_vma_handle_pmd() 422 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT); in hmm_vma_handle_pmd() 424 if (pmd_devmap(pmd)) { in hmm_vma_handle_pmd() 442 unsigned long end, uint64_t *pfns, pmd_t pmd); 562 pmd_t pmd; in hmm_vma_walk_pmd() local [all …]
|
D | mremap.c | 38 pmd_t *pmd; in get_old_pmd() local 52 pmd = pmd_offset(pud, addr); in get_old_pmd() 53 if (pmd_none(*pmd)) in get_old_pmd() 56 return pmd; in get_old_pmd() 65 pmd_t *pmd; in alloc_new_pmd() local 75 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd() 76 if (!pmd) in alloc_new_pmd() 79 VM_BUG_ON(pmd_trans_huge(*pmd)); in alloc_new_pmd() 81 return pmd; in alloc_new_pmd() 201 pmd_t pmd; in move_normal_pmd() local [all …]
|
D | mincore.c | 122 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument 131 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 138 if (pmd_trans_unstable(pmd)) { in mincore_pte_range() 143 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
|
D | rmap.c | 725 pmd_t *pmd = NULL; in mm_find_pmd() local 740 pmd = pmd_offset(pud, address); in mm_find_pmd() 746 pmde = *pmd; in mm_find_pmd() 749 pmd = NULL; in mm_find_pmd() 751 return pmd; in mm_find_pmd() 799 pvmw.pmd)) in page_referenced_one() 936 pmd_t *pmd = pvmw.pmd; in page_mkclean_one() local 939 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) in page_mkclean_one() 943 entry = pmdp_invalidate(vma, address, pmd); in page_mkclean_one() 946 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
|
D | vmalloc.c | 64 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 68 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 77 pmd_t *pmd; in vunmap_pmd_range() local 80 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 83 if (pmd_clear_huge(pmd)) in vunmap_pmd_range() 85 if (pmd_none_or_clear_bad(pmd)) in vunmap_pmd_range() 87 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range() 90 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 140 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument 150 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range() [all …]
|
D | migrate.c | 340 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, in migration_entry_wait() argument 343 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() 344 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait() 356 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) in pmd_migration_entry_wait() argument 361 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait() 362 if (!is_pmd_migration_entry(*pmd)) in pmd_migration_entry_wait() 364 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait() 1966 bool pmd_trans_migrating(pmd_t pmd) in pmd_trans_migrating() argument 1968 struct page *page = pmd_page(pmd); in pmd_trans_migrating() 2034 pmd_t *pmd, pmd_t entry, in migrate_misplaced_transhuge_page() argument [all …]
|
D | swapfile.c | 1868 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte() argument 1888 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1924 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte_range() argument 1938 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 1959 vmf.pmd = pmd; in unuse_pte_range() 1971 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range() 1987 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 2001 pmd_t *pmd; in unuse_pmd_range() local 2005 pmd = pmd_offset(pud, addr); in unuse_pmd_range() 2009 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in unuse_pmd_range() [all …]
|
D | mempolicy.c | 441 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, in queue_pages_pmd() argument 449 if (unlikely(is_pmd_migration_entry(*pmd))) { in queue_pages_pmd() 453 page = pmd_page(*pmd); in queue_pages_pmd() 456 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); in queue_pages_pmd() 490 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, in queue_pages_pte_range() argument 502 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range() 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); in queue_pages_pte_range() 510 if (pmd_trans_unstable(pmd)) in queue_pages_pte_range() 513 mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range()
|
D | memory-failure.c | 271 pmd_t *pmd; in dev_pagemap_mapping_shift() local 285 pmd = pmd_offset(pud, address); in dev_pagemap_mapping_shift() 286 if (!pmd_present(*pmd)) in dev_pagemap_mapping_shift() 288 if (pmd_devmap(*pmd)) in dev_pagemap_mapping_shift() 290 pte = pte_offset_map(pmd, address); in dev_pagemap_mapping_shift()
|
D | memcontrol.c | 5679 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 5684 if (unlikely(is_swap_pmd(pmd))) { in get_mctgt_type_thp() 5686 !is_pmd_migration_entry(pmd)); in get_mctgt_type_thp() 5689 page = pmd_page(pmd); in get_mctgt_type_thp() 5704 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 5710 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, in mem_cgroup_count_precharge_pte_range() argument 5718 ptl = pmd_trans_huge_lock(pmd, vma); in mem_cgroup_count_precharge_pte_range() 5725 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) in mem_cgroup_count_precharge_pte_range() 5731 if (pmd_trans_unstable(pmd)) in mem_cgroup_count_precharge_pte_range() 5733 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range() [all …]
|
D | page_idle.c | 75 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
D | ksm.c | 1124 pmd_t *pmd; in replace_page() local 1136 pmd = mm_find_pmd(mm, addr); in replace_page() 1137 if (!pmd) in replace_page() 1144 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
|
D | internal.h | 339 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
/mm/kasan/ |
D | init.c | 74 static inline bool kasan_pte_table(pmd_t pmd) in kasan_pte_table() argument 76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte)); in kasan_pte_table() 96 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr, in zero_pte_populate() argument 99 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 109 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 116 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate() local 123 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate() 128 if (pmd_none(*pmd)) { in zero_pmd_populate() 138 pmd_populate_kernel(&init_mm, pmd, p); in zero_pmd_populate() 140 zero_pte_populate(pmd, addr, next); in zero_pmd_populate() [all …]
|
D | common.c | 644 pmd_t *pmd; in shadow_mapped() local 663 pmd = pmd_offset(pud, addr); in shadow_mapped() 664 if (pmd_none(*pmd)) in shadow_mapped() 667 if (pmd_bad(*pmd)) in shadow_mapped() 669 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()
|