/mm/kasan/ |
D | init.c | 33 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 38 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 179 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, in zero_p4d_populate() argument 182 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() 231 pgd_t *pgd = pgd_offset_k(addr); in kasan_populate_early_shadow() local 250 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow() 252 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 264 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 268 p = p4d_alloc(&init_mm, pgd, addr); in kasan_populate_early_shadow() [all …]
|
D | shadow.c | 142 pgd_t *pgd = pgd_offset_k(addr); in shadow_mapped() local 148 if (pgd_none(*pgd)) in shadow_mapped() 150 p4d = p4d_offset(pgd, addr); in shadow_mapped()
|
/mm/ |
D | sparse-vmemmap.c | 195 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_p4d_populate() argument 197 p4d_t *p4d = p4d_offset(pgd, addr); in vmemmap_p4d_populate() 209 pgd_t *pgd = pgd_offset_k(addr); in vmemmap_pgd_populate() local 210 if (pgd_none(*pgd)) { in vmemmap_pgd_populate() 214 pgd_populate(&init_mm, pgd, p); in vmemmap_pgd_populate() 216 return pgd; in vmemmap_pgd_populate() 223 pgd_t *pgd; in vmemmap_populate_basepages() local 230 pgd = vmemmap_pgd_populate(addr, node); in vmemmap_populate_basepages() 231 if (!pgd) in vmemmap_populate_basepages() 233 p4d = vmemmap_p4d_populate(pgd, addr, node); in vmemmap_populate_basepages()
|
D | pagewalk.c | 168 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_p4d_range() argument 177 p4d = p4d_offset(pgd, addr); in walk_p4d_range() 204 pgd_t *pgd; in walk_pgd_range() local 209 if (walk->pgd) in walk_pgd_range() 210 pgd = walk->pgd + pgd_index(addr); in walk_pgd_range() 212 pgd = pgd_offset(walk->mm, addr); in walk_pgd_range() 215 if (pgd_none_or_clear_bad(pgd)) { in walk_pgd_range() 223 err = ops->pgd_entry(pgd, addr, next, walk); in walk_pgd_range() 229 err = walk_p4d_range(pgd, addr, next, walk); in walk_pgd_range() 232 } while (pgd++, addr = next, addr != end); in walk_pgd_range() [all …]
|
D | pgalloc-track.h | 6 static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd, in p4d_alloc_track() argument 10 if (unlikely(pgd_none(*pgd))) { in p4d_alloc_track() 11 if (__p4d_alloc(mm, pgd, address)) in p4d_alloc_track() 16 return p4d_offset(pgd, address); in p4d_alloc_track()
|
D | ioremap.c | 198 static inline int ioremap_p4d_range(pgd_t *pgd, unsigned long addr, in ioremap_p4d_range() argument 205 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in ioremap_p4d_range() 225 pgd_t *pgd; in ioremap_page_range() local 235 pgd = pgd_offset_k(addr); in ioremap_page_range() 238 err = ioremap_p4d_range(pgd, addr, next, phys_addr, prot, in ioremap_page_range() 242 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); in ioremap_page_range()
|
D | gup.c | 791 pgd_t *pgd; in follow_page_mask() local 804 pgd = pgd_offset(mm, address); in follow_page_mask() 806 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) in follow_page_mask() 809 if (pgd_huge(*pgd)) { in follow_page_mask() 810 page = follow_huge_pgd(mm, address, pgd, flags); in follow_page_mask() 815 if (is_hugepd(__hugepd(pgd_val(*pgd)))) { in follow_page_mask() 817 __hugepd(pgd_val(*pgd)), flags, in follow_page_mask() 824 return follow_p4d_mask(vma, address, pgd, flags, ctx); in follow_page_mask() 843 pgd_t *pgd; in get_gate_page() local 854 pgd = pgd_offset_k(address); in get_gate_page() [all …]
|
D | memory.c | 332 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, in free_p4d_range() argument 341 p4d = p4d_offset(pgd, addr); in free_p4d_range() 360 p4d = p4d_offset(pgd, start); in free_p4d_range() 361 pgd_clear(pgd); in free_p4d_range() 372 pgd_t *pgd; in free_pgd_range() local 421 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range() 424 if (pgd_none_or_clear_bad(pgd)) in free_pgd_range() 426 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range() 427 } while (pgd++, addr = next, addr != end); in free_pgd_range() 548 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() local [all …]
|
D | ptdump.c | 28 static int ptdump_pgd_entry(pgd_t *pgd, unsigned long addr, in ptdump_pgd_entry() argument 32 pgd_t val = READ_ONCE(*pgd); in ptdump_pgd_entry() 143 void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd) in ptdump_walk_pgd() argument 150 &ptdump_ops, pgd, st); in ptdump_walk_pgd()
|
D | debug_vm_pgtable.c | 474 pgd_t pgd; in pgd_basic_tests() local 477 memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t)); in pgd_basic_tests() 478 WARN_ON(!pgd_same(pgd, pgd)); in pgd_basic_tests() 560 pgd_t pgd = READ_ONCE(*pgdp); in pgd_clear_tests() local 566 pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE); in pgd_clear_tests() 567 WRITE_ONCE(*pgdp, pgd); in pgd_clear_tests() 569 pgd = READ_ONCE(*pgdp); in pgd_clear_tests() 570 WARN_ON(!pgd_none(pgd)); in pgd_clear_tests() 576 pgd_t pgd; in pgd_populate_tests() local 589 pgd = READ_ONCE(*pgdp); in pgd_populate_tests() [all …]
|
D | mprotect.c | 305 pgd_t *pgd, unsigned long addr, unsigned long end, in change_p4d_range() argument 312 p4d = p4d_offset(pgd, addr); in change_p4d_range() 329 pgd_t *pgd; in change_protection_range() local 335 pgd = pgd_offset(mm, addr); in change_protection_range() 340 if (pgd_none_or_clear_bad(pgd)) in change_protection_range() 342 pages += change_p4d_range(vma, pgd, addr, next, newprot, in change_protection_range() 344 } while (pgd++, addr = next, addr != end); in change_protection_range()
|
D | page_vma_mapped.c | 154 pgd_t *pgd; in page_vma_mapped_walk() local 193 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk() 194 if (!pgd_present(*pgd)) { in page_vma_mapped_walk() 198 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
|
D | mremap.c | 35 pgd_t *pgd; in get_old_pud() local 39 pgd = pgd_offset(mm, addr); in get_old_pud() 40 if (pgd_none_or_clear_bad(pgd)) in get_old_pud() 43 p4d = p4d_offset(pgd, addr); in get_old_pud() 73 pgd_t *pgd; in alloc_new_pud() local 76 pgd = pgd_offset(mm, addr); in alloc_new_pud() 77 p4d = p4d_alloc(mm, pgd, addr); in alloc_new_pud()
|
D | vmalloc.c | 134 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in vunmap_p4d_range() argument 141 p4d = p4d_offset(pgd, addr); in vunmap_p4d_range() 174 pgd_t *pgd; in unmap_kernel_range_noflush() local 179 pgd = pgd_offset_k(addr); in unmap_kernel_range_noflush() 182 if (pgd_bad(*pgd)) in unmap_kernel_range_noflush() 184 if (pgd_none_or_clear_bad(pgd)) in unmap_kernel_range_noflush() 186 vunmap_p4d_range(pgd, addr, next, &mask); in unmap_kernel_range_noflush() 187 } while (pgd++, addr = next, addr != end); in unmap_kernel_range_noflush() 257 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, in vmap_p4d_range() argument 264 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in vmap_p4d_range() [all …]
|
D | pgtable-generic.c | 21 void pgd_clear_bad(pgd_t *pgd) in pgd_clear_bad() argument 23 pgd_ERROR(*pgd); in pgd_clear_bad() 24 pgd_clear(pgd); in pgd_clear_bad()
|
D | userfaultfd.c | 255 pgd_t *pgd; in mm_alloc_pmd() local 259 pgd = pgd_offset(mm, address); in mm_alloc_pmd() 260 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd()
|
D | init-mm.c | 34 .pgd = swapper_pg_dir,
|
D | memory-failure.c | 296 pgd_t *pgd; in dev_pagemap_mapping_shift() local 302 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift() 303 if (!pgd_present(*pgd)) in dev_pagemap_mapping_shift() 305 p4d = p4d_offset(pgd, address); in dev_pagemap_mapping_shift()
|
D | hugetlb.c | 5506 pgd_t *pgd = pgd_offset(mm, *addr); in huge_pmd_unshare() local 5507 p4d_t *p4d = p4d_offset(pgd, *addr); in huge_pmd_unshare() 5557 pgd_t *pgd; in huge_pte_alloc() local 5562 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 5563 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 5595 pgd_t *pgd; in huge_pte_offset() local 5600 pgd = pgd_offset(mm, addr); in huge_pte_offset() 5601 if (!pgd_present(*pgd)) in huge_pte_offset() 5603 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 5704 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) in follow_huge_pgd() argument [all …]
|
D | rmap.c | 743 pgd_t *pgd; in mm_find_pmd() local 749 pgd = pgd_offset(mm, address); in mm_find_pmd() 750 if (!pgd_present(*pgd)) in mm_find_pmd() 753 p4d = p4d_offset(pgd, address); in mm_find_pmd()
|
D | swapfile.c | 2097 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, in unuse_p4d_range() argument 2106 p4d = p4d_offset(pgd, addr); in unuse_p4d_range() 2122 pgd_t *pgd; in unuse_vma() local 2129 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma() 2132 if (pgd_none_or_clear_bad(pgd)) in unuse_vma() 2134 ret = unuse_p4d_range(vma, pgd, addr, next, type, in unuse_vma() 2138 } while (pgd++, addr = next, addr != end); in unuse_vma()
|
D | debug.c | 252 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
|
D | huge_memory.c | 2304 pgd_t *pgd; in split_huge_pmd_address() local 2309 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address() 2310 if (!pgd_present(*pgd)) in split_huge_pmd_address() 2313 p4d = p4d_offset(pgd, address); in split_huge_pmd_address()
|