/mm/ |
D | huge_memory.c | 181 static inline bool is_huge_zero_pmd(pmd_t pmd) in is_huge_zero_pmd() argument 183 return is_huge_zero_page(pmd_page(pmd)); in is_huge_zero_pmd() 698 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma) in maybe_pmd_mkwrite() argument 701 pmd = pmd_mkwrite(pmd); in maybe_pmd_mkwrite() 702 return pmd; in maybe_pmd_mkwrite() 715 unsigned long haddr, pmd_t *pmd, in __do_huge_pmd_anonymous_page() argument 741 ptl = pmd_lock(mm, pmd); in __do_huge_pmd_anonymous_page() 742 if (unlikely(!pmd_none(*pmd))) { in __do_huge_pmd_anonymous_page() 754 pgtable_trans_huge_deposit(mm, pmd, pgtable); in __do_huge_pmd_anonymous_page() 755 set_pmd_at(mm, haddr, pmd, entry); in __do_huge_pmd_anonymous_page() [all …]
|
D | gup.c | 46 unsigned long address, pmd_t *pmd, unsigned int flags) in follow_page_pte() argument 54 if (unlikely(pmd_bad(*pmd))) in follow_page_pte() 57 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 74 migration_entry_wait(mm, pmd, address); in follow_page_pte() 159 pmd_t *pmd; in follow_page_mask() local 188 pmd = pmd_offset(pud, address); in follow_page_mask() 189 if (pmd_none(*pmd)) in follow_page_mask() 191 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask() 192 page = follow_huge_pmd(mm, address, pmd, flags); in follow_page_mask() 197 if ((flags & FOLL_NUMA) && pmd_numa(*pmd)) in follow_page_mask() [all …]
|
D | memory.c | 390 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, in free_pte_range() argument 393 pgtable_t token = pmd_pgtable(*pmd); in free_pte_range() 394 pmd_clear(pmd); in free_pte_range() 403 pmd_t *pmd; in free_pmd_range() local 408 pmd = pmd_offset(pud, addr); in free_pmd_range() 411 if (pmd_none_or_clear_bad(pmd)) in free_pmd_range() 413 free_pte_range(tlb, pmd, addr); in free_pmd_range() 414 } while (pmd++, addr = next, addr != end); in free_pmd_range() 427 pmd = pmd_offset(pud, start); in free_pmd_range() 429 pmd_free_tlb(tlb, pmd, start); in free_pmd_range() [all …]
|
D | mprotect.c | 39 static pte_t *lock_pte_protection(struct vm_area_struct *vma, pmd_t *pmd, in lock_pte_protection() argument 47 return pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 49 pmdl = pmd_lock(vma->vm_mm, pmd); in lock_pte_protection() 50 if (unlikely(pmd_trans_huge(*pmd) || pmd_none(*pmd))) { in lock_pte_protection() 55 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, ptl); in lock_pte_protection() 60 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument 69 pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl); in change_pte_range() 137 pmd_t *pmd; in change_pmd_range() local 144 pmd = pmd_offset(pud, addr); in change_pmd_range() 149 if (!pmd_trans_huge(*pmd) && pmd_none_or_clear_bad(pmd)) in change_pmd_range() [all …]
|
D | sparse-vmemmap.c | 101 pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node) in vmemmap_pte_populate() argument 103 pte_t *pte = pte_offset_kernel(pmd, addr); in vmemmap_pte_populate() 117 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() local 118 if (pmd_none(*pmd)) { in vmemmap_pmd_populate() 122 pmd_populate_kernel(&init_mm, pmd, p); in vmemmap_pmd_populate() 124 return pmd; in vmemmap_pmd_populate() 157 pmd_t *pmd; in vmemmap_populate_basepages() local 167 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages() 168 if (!pmd) in vmemmap_populate_basepages() 170 pte = vmemmap_pte_populate(pmd, addr, node); in vmemmap_populate_basepages()
|
D | pagewalk.c | 6 static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, in walk_pte_range() argument 12 pte = pte_offset_map(pmd, addr); in walk_pte_range() 30 pmd_t *pmd; in walk_pmd_range() local 34 pmd = pmd_offset(pud, addr); in walk_pmd_range() 38 if (pmd_none(*pmd)) { in walk_pmd_range() 50 err = walk->pmd_entry(pmd, addr, next, walk); in walk_pmd_range() 61 split_huge_page_pmd_mm(walk->mm, addr, pmd); in walk_pmd_range() 62 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in walk_pmd_range() 64 err = walk_pte_range(pmd, addr, next, walk); in walk_pmd_range() 67 } while (pmd++, addr = next, addr != end); in walk_pmd_range()
|
D | pgtable-generic.c | 31 void pmd_clear_bad(pmd_t *pmd) in pmd_clear_bad() argument 33 pmd_ERROR(*pmd); in pmd_clear_bad() 34 pmd_clear(pmd); in pmd_clear_bad() 127 pmd_t pmd; in pmdp_clear_flush() local 129 pmd = pmdp_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_clear_flush() 131 return pmd; in pmdp_clear_flush() 141 pmd_t pmd = pmd_mksplitting(*pmdp); in pmdp_splitting_flush() local 143 set_pmd_at(vma->vm_mm, address, pmdp, pmd); in pmdp_splitting_flush()
|
D | mincore.c | 116 static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in mincore_pte_range() argument 124 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mincore_pte_range() 164 pmd_t *pmd; in mincore_pmd_range() local 166 pmd = pmd_offset(pud, addr); in mincore_pmd_range() 169 if (pmd_trans_huge(*pmd)) { in mincore_pmd_range() 170 if (mincore_huge_pmd(vma, pmd, addr, next, vec)) { in mincore_pmd_range() 176 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in mincore_pmd_range() 179 mincore_pte_range(vma, pmd, addr, next, vec); in mincore_pmd_range() 181 } while (pmd++, addr = next, addr != end); in mincore_pmd_range()
|
D | migrate.c | 111 pmd_t *pmd; in remove_migration_pte() local 121 pmd = mm_find_pmd(mm, addr); in remove_migration_pte() 122 if (!pmd) in remove_migration_pte() 125 ptep = pte_offset_map(pmd, addr); in remove_migration_pte() 132 ptl = pte_lockptr(mm, pmd); in remove_migration_pte() 268 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, in migration_entry_wait() argument 271 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() 272 pte_t *ptep = pte_offset_map(pmd, address); in migration_entry_wait() 1693 bool pmd_trans_migrating(pmd_t pmd) in pmd_trans_migrating() argument 1695 struct page *page = pmd_page(pmd); in pmd_trans_migrating() [all …]
|
D | rmap.c | 616 pmd_t *pmd = NULL; in mm_find_pmd() local 627 pmd = pmd_offset(pud, address); in mm_find_pmd() 633 pmde = ACCESS_ONCE(*pmd); in mm_find_pmd() 635 pmd = NULL; in mm_find_pmd() 637 return pmd; in mm_find_pmd() 652 pmd_t *pmd; in __page_check_address() local 666 pmd = mm_find_pmd(mm, address); in __page_check_address() 667 if (!pmd) in __page_check_address() 670 pte = pte_offset_map(pmd, address); in __page_check_address() 677 ptl = pte_lockptr(mm, pmd); in __page_check_address() [all …]
|
D | mremap.c | 35 pmd_t *pmd; in get_old_pmd() local 45 pmd = pmd_offset(pud, addr); in get_old_pmd() 46 if (pmd_none(*pmd)) in get_old_pmd() 49 return pmd; in get_old_pmd() 57 pmd_t *pmd; in alloc_new_pmd() local 64 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd() 65 if (!pmd) in alloc_new_pmd() 68 VM_BUG_ON(pmd_trans_huge(*pmd)); in alloc_new_pmd() 70 return pmd; in alloc_new_pmd()
|
D | vmalloc.c | 58 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) in vunmap_pte_range() argument 62 pte = pte_offset_kernel(pmd, addr); in vunmap_pte_range() 71 pmd_t *pmd; in vunmap_pmd_range() local 74 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 77 if (pmd_none_or_clear_bad(pmd)) in vunmap_pmd_range() 79 vunmap_pte_range(pmd, addr, next); in vunmap_pmd_range() 80 } while (pmd++, addr = next, addr != end); in vunmap_pmd_range() 112 static int vmap_pte_range(pmd_t *pmd, unsigned long addr, in vmap_pte_range() argument 122 pte = pte_alloc_kernel(pmd, addr); in vmap_pte_range() 141 pmd_t *pmd; in vmap_pmd_range() local [all …]
|
D | mempolicy.c | 484 static int queue_pages_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in queue_pages_pte_range() argument 493 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in queue_pages_pte_range() 523 pmd_t *pmd, const nodemask_t *nodes, unsigned long flags, in queue_pages_hugetlb_pmd_range() argument 532 ptl = huge_pte_lock(hstate_vma(vma), vma->vm_mm, (pte_t *)pmd); in queue_pages_hugetlb_pmd_range() 533 entry = huge_ptep_get((pte_t *)pmd); in queue_pages_hugetlb_pmd_range() 556 pmd_t *pmd; in queue_pages_pmd_range() local 559 pmd = pmd_offset(pud, addr); in queue_pages_pmd_range() 562 if (!pmd_present(*pmd)) in queue_pages_pmd_range() 564 if (pmd_huge(*pmd) && is_vm_hugetlb_page(vma)) { in queue_pages_pmd_range() 565 queue_pages_hugetlb_pmd_range(vma, pmd, nodes, in queue_pages_pmd_range() [all …]
|
D | madvise.c | 139 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, in swapin_walk_pmd_entry() argument 146 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in swapin_walk_pmd_entry() 155 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry()
|
D | swapfile.c | 1134 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte() argument 1153 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in unuse_pte() 1189 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in unuse_pte_range() argument 1206 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 1214 ret = unuse_pte(vma, pmd, addr, entry, page); in unuse_pte_range() 1217 pte = pte_offset_map(pmd, addr); in unuse_pte_range() 1229 pmd_t *pmd; in unuse_pmd_range() local 1233 pmd = pmd_offset(pud, addr); in unuse_pmd_range() 1236 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) in unuse_pmd_range() 1238 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); in unuse_pmd_range() [all …]
|
D | hugetlb.c | 3723 pmd_t *pmd = NULL; in huge_pte_offset() local 3731 pmd = pmd_offset(pud, addr); in huge_pte_offset() 3734 return (pte_t *) pmd; in huge_pte_offset() 3752 pmd_t *pmd, int flags) in follow_huge_pmd() argument 3758 ptl = pmd_lockptr(mm, pmd); in follow_huge_pmd() 3764 if (!pmd_huge(*pmd)) in follow_huge_pmd() 3766 pte = huge_ptep_get((pte_t *)pmd); in follow_huge_pmd() 3768 page = pte_page(*(pte_t *)pmd) + in follow_huge_pmd() 3775 __migration_entry_wait(mm, (pte_t *)pmd, ptl); in follow_huge_pmd()
|
D | memcontrol.c | 5807 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 5813 page = pmd_page(pmd); in get_mctgt_type_thp() 5829 unsigned long addr, pmd_t pmd, union mc_target *target) in get_mctgt_type_thp() argument 5835 static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, in mem_cgroup_count_precharge_pte_range() argument 5843 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_count_precharge_pte_range() 5844 if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE) in mem_cgroup_count_precharge_pte_range() 5850 if (pmd_trans_unstable(pmd)) in mem_cgroup_count_precharge_pte_range() 5852 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in mem_cgroup_count_precharge_pte_range() 6011 static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, in mem_cgroup_move_charge_pte_range() argument 6034 if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { in mem_cgroup_move_charge_pte_range() [all …]
|
D | ksm.c | 934 pmd_t *pmd; in replace_page() local 946 pmd = mm_find_pmd(mm, addr); in replace_page() 947 if (!pmd) in replace_page() 954 ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); in replace_page()
|
D | internal.h | 269 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
|