/mm/ |
D | pagewalk.c | 72 static int walk_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_pud_range() argument 79 pud = pud_offset(pgd, addr); in walk_pud_range() 170 pgd_t *pgd; in walk_page_range() local 182 pgd = pgd_offset(walk->mm, addr); in walk_page_range() 206 pgd = pgd_offset(walk->mm, next); in walk_page_range() 227 pgd = pgd_offset(walk->mm, next); in walk_page_range() 232 if (pgd_none_or_clear_bad(pgd)) { in walk_page_range() 237 pgd++; in walk_page_range() 241 err = walk->pgd_entry(pgd, addr, next, walk); in walk_page_range() 244 err = walk_pud_range(pgd, addr, next, walk); in walk_page_range() [all …]
|
D | sparse-vmemmap.c | 127 pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_pud_populate() argument 129 pud_t *pud = pud_offset(pgd, addr); in vmemmap_pud_populate() 141 pgd_t *pgd = pgd_offset_k(addr); in vmemmap_pgd_populate() local 142 if (pgd_none(*pgd)) { in vmemmap_pgd_populate() 146 pgd_populate(&init_mm, pgd, p); in vmemmap_pgd_populate() 148 return pgd; in vmemmap_pgd_populate() 155 pgd_t *pgd; in vmemmap_populate_basepages() local 161 pgd = vmemmap_pgd_populate(addr, node); in vmemmap_populate_basepages() 162 if (!pgd) in vmemmap_populate_basepages() 164 pud = vmemmap_pud_populate(pgd, addr, node); in vmemmap_populate_basepages()
|
D | mincore.c | 184 static void mincore_pud_range(struct vm_area_struct *vma, pgd_t *pgd, in mincore_pud_range() argument 191 pud = pud_offset(pgd, addr); in mincore_pud_range() 207 pgd_t *pgd; in mincore_page_range() local 209 pgd = pgd_offset(vma->vm_mm, addr); in mincore_page_range() 212 if (pgd_none_or_clear_bad(pgd)) in mincore_page_range() 215 mincore_pud_range(vma, pgd, addr, next, vec); in mincore_page_range() 217 } while (pgd++, addr = next, addr != end); in mincore_page_range()
|
D | memory.c | 432 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, in free_pud_range() argument 441 pud = pud_offset(pgd, addr); in free_pud_range() 460 pud = pud_offset(pgd, start); in free_pud_range() 461 pgd_clear(pgd); in free_pud_range() 472 pgd_t *pgd; in free_pgd_range() local 517 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range() 520 if (pgd_none_or_clear_bad(pgd)) in free_pgd_range() 522 free_pud_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range() 523 } while (pgd++, addr = next, addr != end); in free_pgd_range() 647 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() local [all …]
|
D | mprotect.c | 191 pgd_t *pgd, unsigned long addr, unsigned long end, in change_pud_range() argument 198 pud = pud_offset(pgd, addr); in change_pud_range() 215 pgd_t *pgd; in change_protection_range() local 221 pgd = pgd_offset(mm, addr); in change_protection_range() 226 if (pgd_none_or_clear_bad(pgd)) in change_protection_range() 228 pages += change_pud_range(vma, pgd, addr, next, newprot, in change_protection_range() 230 } while (pgd++, addr = next, addr != end); in change_protection_range()
|
D | gup.c | 157 pgd_t *pgd; in follow_page_mask() local 172 pgd = pgd_offset(mm, address); in follow_page_mask() 173 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) in follow_page_mask() 176 pud = pud_offset(pgd, address); in follow_page_mask() 226 pgd_t *pgd; in get_gate_page() local 236 pgd = pgd_offset_k(address); in get_gate_page() 238 pgd = pgd_offset_gate(mm, address); in get_gate_page() 239 BUG_ON(pgd_none(*pgd)); in get_gate_page() 240 pud = pud_offset(pgd, address); in get_gate_page()
|
D | mremap.c | 33 pgd_t *pgd; in get_old_pmd() local 37 pgd = pgd_offset(mm, addr); in get_old_pmd() 38 if (pgd_none_or_clear_bad(pgd)) in get_old_pmd() 41 pud = pud_offset(pgd, addr); in get_old_pmd() 55 pgd_t *pgd; in alloc_new_pmd() local 59 pgd = pgd_offset(mm, addr); in alloc_new_pmd() 60 pud = pud_alloc(mm, pgd, addr); in alloc_new_pmd()
|
D | vmalloc.c | 83 static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_pud_range() argument 88 pud = pud_offset(pgd, addr); in vunmap_pud_range() 99 pgd_t *pgd; in vunmap_page_range() local 103 pgd = pgd_offset_k(addr); in vunmap_page_range() 106 if (pgd_none_or_clear_bad(pgd)) in vunmap_page_range() 108 vunmap_pud_range(pgd, addr, next); in vunmap_page_range() 109 } while (pgd++, addr = next, addr != end); in vunmap_page_range() 155 static int vmap_pud_range(pgd_t *pgd, unsigned long addr, in vmap_pud_range() argument 161 pud = pud_alloc(&init_mm, pgd, addr); in vmap_pud_range() 181 pgd_t *pgd; in vmap_page_range_noflush() local [all …]
|
D | pgtable-generic.c | 19 void pgd_clear_bad(pgd_t *pgd) in pgd_clear_bad() argument 21 pgd_ERROR(*pgd); in pgd_clear_bad() 22 pgd_clear(pgd); in pgd_clear_bad()
|
D | init-mm.c | 18 .pgd = swapper_pg_dir,
|
D | huge_memory.c | 1594 pgd_t *pgd; in page_check_address_pmd() local 1601 pgd = pgd_offset(mm, address); in page_check_address_pmd() 1602 if (!pgd_present(*pgd)) in page_check_address_pmd() 1604 pud = pud_offset(pgd, address); in page_check_address_pmd() 2926 pgd_t *pgd; in split_huge_page_address() local 2932 pgd = pgd_offset(mm, address); in split_huge_page_address() 2933 if (!pgd_present(*pgd)) in split_huge_page_address() 2936 pud = pud_offset(pgd, address); in split_huge_page_address()
|
D | mempolicy.c | 579 static inline int queue_pages_pud_range(struct vm_area_struct *vma, pgd_t *pgd, in queue_pages_pud_range() argument 587 pud = pud_offset(pgd, addr); in queue_pages_pud_range() 606 pgd_t *pgd; in queue_pages_pgd_range() local 609 pgd = pgd_offset(vma->vm_mm, addr); in queue_pages_pgd_range() 612 if (pgd_none_or_clear_bad(pgd)) in queue_pages_pgd_range() 614 if (queue_pages_pud_range(vma, pgd, addr, next, nodes, in queue_pages_pgd_range() 617 } while (pgd++, addr = next, addr != end); in queue_pages_pgd_range()
|
D | hugetlb.c | 3672 pgd_t *pgd = pgd_offset(mm, *addr); in huge_pmd_unshare() local 3673 pud_t *pud = pud_offset(pgd, *addr); in huge_pmd_unshare() 3697 pgd_t *pgd; in huge_pte_alloc() local 3701 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 3702 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc() 3721 pgd_t *pgd; in huge_pte_offset() local 3725 pgd = pgd_offset(mm, addr); in huge_pte_offset() 3726 if (pgd_present(*pgd)) { in huge_pte_offset() 3727 pud = pud_offset(pgd, addr); in huge_pte_offset()
|
D | rmap.c | 614 pgd_t *pgd; in mm_find_pmd() local 619 pgd = pgd_offset(mm, address); in mm_find_pmd() 620 if (!pgd_present(*pgd)) in mm_find_pmd() 623 pud = pud_offset(pgd, address); in mm_find_pmd()
|
D | swapfile.c | 1245 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, in unuse_pud_range() argument 1253 pud = pud_offset(pgd, addr); in unuse_pud_range() 1268 pgd_t *pgd; in unuse_vma() local 1283 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma() 1286 if (pgd_none_or_clear_bad(pgd)) in unuse_vma() 1288 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); in unuse_vma() 1291 } while (pgd++, addr = next, addr != end); in unuse_vma()
|
D | debug.c | 204 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
|