/mm/ |
D | sparse-vmemmap.c | 115 pmd_t * __meminit vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node) in vmemmap_pmd_populate() argument 117 pmd_t *pmd = pmd_offset(pud, addr); in vmemmap_pmd_populate() 129 pud_t *pud = pud_offset(pgd, addr); in vmemmap_pud_populate() local 130 if (pud_none(*pud)) { in vmemmap_pud_populate() 134 pud_populate(&init_mm, pud, p); in vmemmap_pud_populate() 136 return pud; in vmemmap_pud_populate() 156 pud_t *pud; in vmemmap_populate_basepages() local 164 pud = vmemmap_pud_populate(pgd, addr, node); in vmemmap_populate_basepages() 165 if (!pud) in vmemmap_populate_basepages() 167 pmd = vmemmap_pmd_populate(pud, addr, node); in vmemmap_populate_basepages()
|
D | gup.c | 158 pud_t *pud; in follow_page_mask() local 176 pud = pud_offset(pgd, address); in follow_page_mask() 177 if (pud_none(*pud)) in follow_page_mask() 179 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) { in follow_page_mask() 180 page = follow_huge_pud(mm, address, pud, flags); in follow_page_mask() 185 if (unlikely(pud_bad(*pud))) in follow_page_mask() 188 pmd = pmd_offset(pud, address); in follow_page_mask() 227 pud_t *pud; in get_gate_page() local 240 pud = pud_offset(pgd, address); in get_gate_page() 241 BUG_ON(pud_none(*pud)); in get_gate_page() [all …]
|
D | pagewalk.c | 27 static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end, in walk_pmd_range() argument 34 pmd = pmd_offset(pud, addr); in walk_pmd_range() 75 pud_t *pud; in walk_pud_range() local 79 pud = pud_offset(pgd, addr); in walk_pud_range() 82 if (pud_none_or_clear_bad(pud)) { in walk_pud_range() 90 err = walk->pud_entry(pud, addr, next, walk); in walk_pud_range() 92 err = walk_pmd_range(pud, addr, next, walk); in walk_pud_range() 95 } while (pud++, addr = next, addr != end); in walk_pud_range()
|
D | memory.c | 399 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud, in free_pmd_range() argument 408 pmd = pmd_offset(pud, addr); in free_pmd_range() 427 pmd = pmd_offset(pud, start); in free_pmd_range() 428 pud_clear(pud); in free_pmd_range() 436 pud_t *pud; in free_pud_range() local 441 pud = pud_offset(pgd, addr); in free_pud_range() 444 if (pud_none_or_clear_bad(pud)) in free_pud_range() 446 free_pmd_range(tlb, pud, addr, next, floor, ceiling); in free_pud_range() 447 } while (pud++, addr = next, addr != end); in free_pud_range() 460 pud = pud_offset(pgd, start); in free_pud_range() [all …]
|
D | mincore.c | 159 static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, in mincore_pmd_range() argument 166 pmd = pmd_offset(pud, addr); in mincore_pmd_range() 189 pud_t *pud; in mincore_pud_range() local 191 pud = pud_offset(pgd, addr); in mincore_pud_range() 194 if (pud_none_or_clear_bad(pud)) in mincore_pud_range() 197 mincore_pmd_range(vma, pud, addr, next, vec); in mincore_pud_range() 199 } while (pud++, addr = next, addr != end); in mincore_pud_range()
|
D | mprotect.c | 134 pud_t *pud, unsigned long addr, unsigned long end, in change_pmd_range() argument 144 pmd = pmd_offset(pud, addr); in change_pmd_range() 194 pud_t *pud; in change_pud_range() local 198 pud = pud_offset(pgd, addr); in change_pud_range() 201 if (pud_none_or_clear_bad(pud)) in change_pud_range() 203 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range() 205 } while (pud++, addr = next, addr != end); in change_pud_range()
|
D | mremap.c | 34 pud_t *pud; in get_old_pmd() local 41 pud = pud_offset(pgd, addr); in get_old_pmd() 42 if (pud_none_or_clear_bad(pud)) in get_old_pmd() 45 pmd = pmd_offset(pud, addr); in get_old_pmd() 56 pud_t *pud; in alloc_new_pmd() local 60 pud = pud_alloc(mm, pgd, addr); in alloc_new_pmd() 61 if (!pud) in alloc_new_pmd() 64 pmd = pmd_alloc(mm, pud, addr); in alloc_new_pmd()
|
D | vmalloc.c | 69 static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end) in vunmap_pmd_range() argument 74 pmd = pmd_offset(pud, addr); in vunmap_pmd_range() 85 pud_t *pud; in vunmap_pud_range() local 88 pud = pud_offset(pgd, addr); in vunmap_pud_range() 91 if (pud_none_or_clear_bad(pud)) in vunmap_pud_range() 93 vunmap_pmd_range(pud, addr, next); in vunmap_pud_range() 94 } while (pud++, addr = next, addr != end); in vunmap_pud_range() 138 static int vmap_pmd_range(pud_t *pud, unsigned long addr, in vmap_pmd_range() argument 144 pmd = pmd_alloc(&init_mm, pud, addr); in vmap_pmd_range() 158 pud_t *pud; in vmap_pud_range() local [all …]
|
D | hugetlb.c | 3611 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) in huge_pmd_share() argument 3624 return (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share() 3646 if (pud_none(*pud)) in huge_pmd_share() 3647 pud_populate(mm, pud, in huge_pmd_share() 3653 pte = (pte_t *)pmd_alloc(mm, pud, addr); in huge_pmd_share() 3673 pud_t *pud = pud_offset(pgd, *addr); in huge_pmd_unshare() local 3679 pud_clear(pud); in huge_pmd_unshare() 3686 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) in huge_pmd_share() argument 3698 pud_t *pud; in huge_pte_alloc() local 3702 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc() [all …]
|
D | pgtable-generic.c | 25 void pud_clear_bad(pud_t *pud) in pud_clear_bad() argument 27 pud_ERROR(*pud); in pud_clear_bad() 28 pud_clear(pud); in pud_clear_bad()
|
D | mempolicy.c | 551 static inline int queue_pages_pmd_range(struct vm_area_struct *vma, pud_t *pud, in queue_pages_pmd_range() argument 559 pmd = pmd_offset(pud, addr); in queue_pages_pmd_range() 584 pud_t *pud; in queue_pages_pud_range() local 587 pud = pud_offset(pgd, addr); in queue_pages_pud_range() 590 if (pud_huge(*pud) && is_vm_hugetlb_page(vma)) in queue_pages_pud_range() 592 if (pud_none_or_clear_bad(pud)) in queue_pages_pud_range() 594 if (queue_pages_pmd_range(vma, pud, addr, next, nodes, in queue_pages_pud_range() 597 } while (pud++, addr = next, addr != end); in queue_pages_pud_range()
|
D | huge_memory.c | 1595 pud_t *pud; in page_check_address_pmd() local 1604 pud = pud_offset(pgd, address); in page_check_address_pmd() 1605 if (!pud_present(*pud)) in page_check_address_pmd() 1607 pmd = pmd_offset(pud, address); in page_check_address_pmd() 2927 pud_t *pud; in split_huge_page_address() local 2936 pud = pud_offset(pgd, address); in split_huge_page_address() 2937 if (!pud_present(*pud)) in split_huge_page_address() 2940 pmd = pmd_offset(pud, address); in split_huge_page_address()
|
D | rmap.c | 615 pud_t *pud; in mm_find_pmd() local 623 pud = pud_offset(pgd, address); in mm_find_pmd() 624 if (!pud_present(*pud)) in mm_find_pmd() 627 pmd = pmd_offset(pud, address); in mm_find_pmd()
|
D | swapfile.c | 1225 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, in unuse_pmd_range() argument 1233 pmd = pmd_offset(pud, addr); in unuse_pmd_range() 1249 pud_t *pud; in unuse_pud_range() local 1253 pud = pud_offset(pgd, addr); in unuse_pud_range() 1256 if (pud_none_or_clear_bad(pud)) in unuse_pud_range() 1258 ret = unuse_pmd_range(vma, pud, addr, next, entry, page); in unuse_pud_range() 1261 } while (pud++, addr = next, addr != end); in unuse_pud_range()
|