/mm/ |
D | huge_memory.c | 470 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 473 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite() 474 return pmd; in maybe_pmd_mkwrite() 582 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 583 if (unlikely(!pmd_none(*vmf->pmd))) { in __do_huge_pmd_anonymous_page() 610 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 611 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 658 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, in set_huge_zero_page() argument 662 if (!pmd_none(*pmd)) in set_huge_zero_page() 667 pgtable_trans_huge_deposit(mm, pmd, pgtable); in set_huge_zero_page() [all …]
|
D | gup.c | 74 unsigned long address, pmd_t *pmd, unsigned int flags) in follow_page_pte() argument 83 if (unlikely(pmd_bad(*pmd))) in follow_page_pte() 86 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 103 migration_entry_wait(mm, pmd, address); in follow_page_pte() 218 pmd_t *pmd; in follow_pmd_mask() local 223 pmd = pmd_offset(pudp, address); in follow_pmd_mask() 224 if (pmd_none(*pmd)) in follow_pmd_mask() 226 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_pmd_mask() 227 page = follow_huge_pmd(mm, address, pmd, flags); in follow_pmd_mask() 232 if (is_hugepd(__hugepd(pmd_val(*pmd)))) { in follow_pmd_mask() [all …]
|
D | memory.c | 444 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument 447 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range() 448 pmd_clear(pmd); in free_pte_range() 457 pmd_t *pmd; in free_pmd_range() local 462 pmd = pmd_offset(pud, addr); in free_pmd_range() 465 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range() 467 free_pte_range(tlb, pmd, addr); in free_pmd_range() 468 } while (pmd++, addr = next, addr != end); in free_pmd_range() 481 pmd = pmd_offset(pud, start); in free_pmd_range() 483 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() [all …]
|
D | page_vma_mapped.c | 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 50 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 147 if (pvmw->pmd && !pvmw->pte) in page_vma_mapped_walk() 176 pvmw->pmd = pmd_offset(pud, pvmw->address); in page_vma_mapped_walk() 182 pmde = READ_ONCE(*pvmw->pmd); in page_vma_mapped_walk() 184 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 185 if (likely(pmd_trans_huge(*pvmw->pmd))) { in page_vma_mapped_walk() 188 if (pmd_page(*pvmw->pmd) != page) in page_vma_mapped_walk() 191 } else if (!pmd_present(*pvmw->pmd)) { in page_vma_mapped_walk() 195 if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) { in page_vma_mapped_walk() [all …]
|
D | mprotect.c | 37 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 53 if (pmd_trans_unstable(pmd)) in change_pte_range() 61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 155 static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) in pmd_none_or_clear_bad_unless_trans_huge() argument 157 pmd_t pmdval = pmd_read_atomic(pmd); in pmd_none_or_clear_bad_unless_trans_huge() 169 pmd_clear_bad(pmd); in pmd_none_or_clear_bad_unless_trans_huge() 180 pmd_t *pmd; in change_pmd_range() local 187 pmd = pmd_offset(pud, addr); in change_pmd_range() 201 if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && in change_pmd_range() 202 pmd_none_or_clear_bad_unless_trans_huge(pmd)) in change_pmd_range() [all …]
|
D | pgtable-generic.c | 38 void pmd_clear_bad(pmd_t *pmd) in pmd_clear_bad() argument 40 pmd_ERROR(*pmd); in pmd_clear_bad() 41 pmd_clear(pmd); in pmd_clear_bad() 126 pmd_t pmd; in pmdp_huge_clear_flush() local 130 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 132 return pmd; in pmdp_huge_clear_flush() 201 pmd_t pmd; in pmdp_collapse_flush() local 205 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush() 209 return pmd; in pmdp_collapse_flush()
|
D | sparse-vmemmap.c | 169 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument 171 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() 185 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() local 186 if (pmd_none(*pmd)) { in vmemmap_pmd_populate() 190 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate() 192 return pmd; in vmemmap_pmd_populate() 238 pmd_t *pmd; in vmemmap_populate_basepages() local 251 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages() 252 if (!pmd) in vmemmap_populate_basepages() 254 pte = vmemmap_pte_populate(pmd, addr, node); in vmemmap_populate_basepages()
|
D | pagewalk.c | 7 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 13 pte = pte_offset_map(pmd, addr); in walk_pte_range() 31 pmd_t *pmd; in walk_pmd_range() local 35 pmd = pmd_offset(pud, addr); in walk_pmd_range() 39 if (pmd_none(*pmd) || !walk->vma) { in walk_pmd_range() 51 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 62 split_huge_pmd(walk->vma, pmd, addr); in walk_pmd_range() 63 if (pmd_trans_unstable(pmd)) in walk_pmd_range() 65 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() 68 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
|
D | khugepaged.c | 880 unsigned long address, pmd_t *pmd, in __collapse_huge_page_swapin() argument 888 .pmd = pmd, in __collapse_huge_page_swapin() 897 vmf.pte = pte_offset_map(pmd, address); in __collapse_huge_page_swapin() 915 if (mm_find_pmd(mm, address) != pmd) { in __collapse_huge_page_swapin() 925 vmf.pte = pte_offset_map(pmd, vmf.address); in __collapse_huge_page_swapin() 938 pmd_t *pmd, _pmd; in collapse_huge_page() local 983 pmd = mm_find_pmd(mm, address); in collapse_huge_page() 984 if (!pmd) { in collapse_huge_page() 996 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) { in collapse_huge_page() 1016 if (mm_find_pmd(mm, address) != pmd) in collapse_huge_page() [all …]
|
D | madvise.c | 193 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument 200 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in swapin_walk_pmd_entry() 209 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 310 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, in madvise_free_pte_range() argument 324 if (pmd_trans_huge(*pmd)) in madvise_free_pte_range() 325 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) in madvise_free_pte_range() 328 if (pmd_trans_unstable(pmd)) in madvise_free_pte_range() 332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() 378 pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() 383 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range()
|
D | mremap.c | 38 pmd_t *pmd; in get_old_pmd() local 52 pmd = pmd_offset(pud, addr); in get_old_pmd() 53 if (pmd_none(*pmd)) in get_old_pmd() 56 return pmd; in get_old_pmd() 65 pmd_t *pmd; in alloc_new_pmd() local 75 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd() 76 if (!pmd) in alloc_new_pmd() 79 VM_BUG_ON(pmd_trans_huge(*pmd)); in alloc_new_pmd() 81 return pmd; in alloc_new_pmd()
|
D | vmalloc.c | 60 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 64 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 73 pmd_t *pmd; in vunmap_pmd_range() local 76 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 79 if (pmd_clear_huge(pmd)) in vunmap_pmd_range() 81 if (pmd_none_or_clear_bad(pmd)) in vunmap_pmd_range() 83 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range() 84 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 134 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument 144 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range() [all …]
|
D | mincore.c | 114 static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in mincore_pte_range() argument 123 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 130 if (pmd_trans_unstable(pmd)) { in mincore_pte_range() 135 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range()
|
D | rmap.c | 715 pmd_t *pmd = NULL; in mm_find_pmd() local 730 pmd = pmd_offset(pud, address); in mm_find_pmd() 736 pmde = *pmd; in mm_find_pmd() 739 pmd = NULL; in mm_find_pmd() 741 return pmd; in mm_find_pmd() 789 pvmw.pmd)) in page_referenced_one() 923 pmd_t *pmd = pvmw.pmd; in page_mkclean_one() local 926 if (!pmd_dirty(*pmd) && !pmd_write(*pmd)) in page_mkclean_one() 930 entry = pmdp_huge_clear_flush(vma, address, pmd); in page_mkclean_one() 933 set_pmd_at(vma->vm_mm, address, pmd, entry); in page_mkclean_one()
|
D | migrate.c | 342 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, in migration_entry_wait() argument 345 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() 346 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait() 358 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) in pmd_migration_entry_wait() argument 363 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait() 364 if (!is_pmd_migration_entry(*pmd)) in pmd_migration_entry_wait() 366 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait() 1956 bool pmd_trans_migrating(pmd_t pmd) in pmd_trans_migrating() argument 1958 struct page *page = pmd_page(pmd); in pmd_trans_migrating() 2025 pmd_t *pmd, pmd_t entry, in migrate_misplaced_transhuge_page() argument [all …]
|
D | hmm.c | 377 pmd_t pmd; in hmm_vma_walk_pmd() local 388 pmd = pmd_read_atomic(pmdp); in hmm_vma_walk_pmd() 390 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd)) in hmm_vma_walk_pmd() 392 if (pmd_protnone(pmd)) in hmm_vma_walk_pmd() 395 if (write_fault && !pmd_write(pmd)) in hmm_vma_walk_pmd() 398 pfn = pmd_pfn(pmd) + pte_index(addr); in hmm_vma_walk_pmd() 399 flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; in hmm_vma_walk_pmd()
|
D | mempolicy.c | 437 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr, in queue_pages_pmd() argument 445 if (unlikely(is_pmd_migration_entry(*pmd))) { in queue_pages_pmd() 449 page = pmd_page(*pmd); in queue_pages_pmd() 452 __split_huge_pmd(walk->vma, pmd, addr, false, NULL); in queue_pages_pmd() 491 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, in queue_pages_pte_range() argument 502 ptl = pmd_trans_huge_lock(pmd, vma); in queue_pages_pte_range() 504 ret = queue_pages_pmd(pmd, ptl, addr, end, walk); in queue_pages_pte_range() 511 if (pmd_trans_unstable(pmd)) in queue_pages_pte_range() 514 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in queue_pages_pte_range() 538 pte = pte_offset_map_lock(walk->mm, pmd, in queue_pages_pte_range()
|
D | swapfile.c | 1766 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte() argument 1786 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1822 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte_range() argument 1839 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 1847 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range() 1850 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 1862 pmd_t *pmd; in unuse_pmd_range() local 1866 pmd = pmd_offset(pud, addr); in unuse_pmd_range() 1870 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in unuse_pmd_range() 1872 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); in unuse_pmd_range() [all …]
|
D | memcontrol.c | 4760 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 4765 if (unlikely(is_swap_pmd(pmd))) { in get_mctgt_type_thp() 4767 !is_pmd_migration_entry(pmd)); in get_mctgt_type_thp() 4770 page = pmd_page(pmd); in get_mctgt_type_thp() 4785 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 4791 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, in mem_cgroup_count_precharge_pte_range() argument 4799 ptl = pmd_trans_huge_lock(pmd, vma); in mem_cgroup_count_precharge_pte_range() 4806 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) in mem_cgroup_count_precharge_pte_range() 4812 if (pmd_trans_unstable(pmd)) in mem_cgroup_count_precharge_pte_range() 4814 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range() [all …]
|
D | hugetlb.c | 4749 pmd_t *pmd; in huge_pte_offset() local 4765 pmd = pmd_offset(pud, addr); in huge_pte_offset() 4766 if (sz != PMD_SIZE && pmd_none(*pmd)) in huge_pte_offset() 4769 if (pmd_huge(*pmd) || !pmd_present(*pmd)) in huge_pte_offset() 4770 return (pte_t *)pmd; in huge_pte_offset() 4798 pmd_t *pmd, int flags) in follow_huge_pmd() argument 4804 ptl = pmd_lockptr(mm, pmd); in follow_huge_pmd() 4810 if (!pmd_huge(*pmd)) in follow_huge_pmd() 4812 pte = huge_ptep_get((pte_t *)pmd); in follow_huge_pmd() 4814 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT); in follow_huge_pmd() [all …]
|
D | page_idle.c | 75 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
D | ksm.c | 1100 pmd_t *pmd; in replace_page() local 1113 pmd = mm_find_pmd(mm, addr); in replace_page() 1114 if (!pmd) in replace_page() 1121 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
|
D | internal.h | 330 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|
/mm/kasan/ |
D | kasan_init.c | 50 static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, in zero_pte_populate() argument 53 pte_t *pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 62 pte = pte_offset_kernel(pmd, addr); in zero_pte_populate() 69 pmd_t *pmd = pmd_offset(pud, addr); in zero_pmd_populate() local 76 pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); in zero_pmd_populate() 80 if (pmd_none(*pmd)) { in zero_pmd_populate() 81 pmd_populate_kernel(&init_mm, pmd, in zero_pmd_populate() 84 zero_pte_populate(pmd, addr, next); in zero_pmd_populate() 85 } while (pmd++, addr = next, addr != end); in zero_pmd_populate() 97 pmd_t *pmd; in zero_pud_populate() local [all …]
|
D | kasan.c | 746 pmd_t *pmd; in shadow_mapped() local 765 pmd = pmd_offset(pud, addr); in shadow_mapped() 766 if (pmd_none(*pmd)) in shadow_mapped() 769 if (pmd_bad(*pmd)) in shadow_mapped() 771 pte = pte_offset_kernel(pmd, addr); in shadow_mapped()
|