/mm/kasan/ |
D | init.c | 38 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 43 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 183 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, in zero_p4d_populate() argument 186 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() 235 pgd_t *pgd = pgd_offset_k(addr); in kasan_populate_early_shadow() local 264 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow() 267 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 279 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 283 p = p4d_alloc(&init_mm, pgd, addr); in kasan_populate_early_shadow() [all …]
|
D | common.c | 641 pgd_t *pgd = pgd_offset_k(addr); in shadow_mapped() local 647 if (pgd_none(*pgd)) in shadow_mapped() 649 p4d = p4d_offset(pgd, addr); in shadow_mapped()
|
/mm/ |
D | sparse-vmemmap.c | 192 p4d_t * __meminit vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node) in vmemmap_p4d_populate() argument 194 p4d_t *p4d = p4d_offset(pgd, addr); in vmemmap_p4d_populate() 206 pgd_t *pgd = pgd_offset_k(addr); in vmemmap_pgd_populate() local 207 if (pgd_none(*pgd)) { in vmemmap_pgd_populate() 211 pgd_populate(&init_mm, pgd, p); in vmemmap_pgd_populate() 213 return pgd; in vmemmap_pgd_populate() 220 pgd_t *pgd; in vmemmap_populate_basepages() local 227 pgd = vmemmap_pgd_populate(addr, node); in vmemmap_populate_basepages() 228 if (!pgd) in vmemmap_populate_basepages() 230 p4d = vmemmap_p4d_populate(pgd, addr, node); in vmemmap_populate_basepages()
|
D | gup.c | 512 pgd_t *pgd; in follow_page_mask() local 525 pgd = pgd_offset(mm, address); in follow_page_mask() 527 if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) in follow_page_mask() 530 if (pgd_huge(*pgd)) { in follow_page_mask() 531 page = follow_huge_pgd(mm, address, pgd, flags); in follow_page_mask() 536 if (is_hugepd(__hugepd(pgd_val(*pgd)))) { in follow_page_mask() 538 __hugepd(pgd_val(*pgd)), flags, in follow_page_mask() 545 return follow_p4d_mask(vma, address, pgd, flags, ctx); in follow_page_mask() 564 pgd_t *pgd; in get_gate_page() local 575 pgd = pgd_offset_k(address); in get_gate_page() [all …]
|
D | pagewalk.c | 120 static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end, in walk_p4d_range() argument 128 p4d = p4d_offset(pgd, addr); in walk_p4d_range() 150 pgd_t *pgd; in walk_pgd_range() local 155 pgd = pgd_offset(walk->mm, addr); in walk_pgd_range() 158 if (pgd_none_or_clear_bad(pgd)) { in walk_pgd_range() 166 err = walk_p4d_range(pgd, addr, next, walk); in walk_pgd_range() 169 } while (pgd++, addr = next, addr != end); in walk_pgd_range()
|
D | memory.c | 291 static inline void free_p4d_range(struct mmu_gather *tlb, pgd_t *pgd, in free_p4d_range() argument 300 p4d = p4d_offset(pgd, addr); in free_p4d_range() 319 p4d = p4d_offset(pgd, start); in free_p4d_range() 320 pgd_clear(pgd); in free_p4d_range() 331 pgd_t *pgd; in free_pgd_range() local 380 pgd = pgd_offset(tlb->mm, addr); in free_pgd_range() 383 if (pgd_none_or_clear_bad(pgd)) in free_pgd_range() 385 free_p4d_range(tlb, pgd, addr, next, floor, ceiling); in free_pgd_range() 386 } while (pgd++, addr = next, addr != end); in free_pgd_range() 503 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() local [all …]
|
D | mprotect.c | 248 pgd_t *pgd, unsigned long addr, unsigned long end, in change_p4d_range() argument 255 p4d = p4d_offset(pgd, addr); in change_p4d_range() 272 pgd_t *pgd; in change_protection_range() local 278 pgd = pgd_offset(mm, addr); in change_protection_range() 283 if (pgd_none_or_clear_bad(pgd)) in change_protection_range() 285 pages += change_p4d_range(vma, pgd, addr, next, newprot, in change_protection_range() 287 } while (pgd++, addr = next, addr != end); in change_protection_range()
|
D | page_vma_mapped.c | 142 pgd_t *pgd; in page_vma_mapped_walk() local 167 pgd = pgd_offset(mm, pvmw->address); in page_vma_mapped_walk() 168 if (!pgd_present(*pgd)) in page_vma_mapped_walk() 170 p4d = p4d_offset(pgd, pvmw->address); in page_vma_mapped_walk()
|
D | mremap.c | 35 pgd_t *pgd; in get_old_pmd() local 40 pgd = pgd_offset(mm, addr); in get_old_pmd() 41 if (pgd_none_or_clear_bad(pgd)) in get_old_pmd() 44 p4d = p4d_offset(pgd, addr); in get_old_pmd() 62 pgd_t *pgd; in alloc_new_pmd() local 67 pgd = pgd_offset(mm, addr); in alloc_new_pmd() 68 p4d = p4d_alloc(mm, pgd, addr); in alloc_new_pmd()
|
D | pgtable-generic.c | 21 void pgd_clear_bad(pgd_t *pgd) in pgd_clear_bad() argument 23 pgd_ERROR(*pgd); in pgd_clear_bad() 24 pgd_clear(pgd); in pgd_clear_bad()
|
D | vmalloc.c | 106 static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end) in vunmap_p4d_range() argument 111 p4d = p4d_offset(pgd, addr); in vunmap_p4d_range() 124 pgd_t *pgd; in vunmap_page_range() local 128 pgd = pgd_offset_k(addr); in vunmap_page_range() 131 if (pgd_none_or_clear_bad(pgd)) in vunmap_page_range() 133 vunmap_p4d_range(pgd, addr, next); in vunmap_page_range() 134 } while (pgd++, addr = next, addr != end); in vunmap_page_range() 197 static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, in vmap_p4d_range() argument 203 p4d = p4d_alloc(&init_mm, pgd, addr); in vmap_p4d_range() 223 pgd_t *pgd; in vmap_page_range_noflush() local [all …]
|
D | userfaultfd.c | 149 pgd_t *pgd; in mm_alloc_pmd() local 153 pgd = pgd_offset(mm, address); in mm_alloc_pmd() 154 p4d = p4d_alloc(mm, pgd, address); in mm_alloc_pmd()
|
D | init-mm.c | 31 .pgd = swapper_pg_dir,
|
D | memory-failure.c | 268 pgd_t *pgd; in dev_pagemap_mapping_shift() local 274 pgd = pgd_offset(vma->vm_mm, address); in dev_pagemap_mapping_shift() 275 if (!pgd_present(*pgd)) in dev_pagemap_mapping_shift() 277 p4d = p4d_offset(pgd, address); in dev_pagemap_mapping_shift()
|
D | rmap.c | 713 pgd_t *pgd; in mm_find_pmd() local 719 pgd = pgd_offset(mm, address); in mm_find_pmd() 720 if (!pgd_present(*pgd)) in mm_find_pmd() 723 p4d = p4d_offset(pgd, address); in mm_find_pmd()
|
D | hugetlb.c | 4942 pgd_t *pgd = pgd_offset(mm, *addr); in huge_pmd_unshare() local 4943 p4d_t *p4d = p4d_offset(pgd, *addr); in huge_pmd_unshare() 4979 pgd_t *pgd; in huge_pte_alloc() local 4984 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 4985 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 5017 pgd_t *pgd; in huge_pte_offset() local 5022 pgd = pgd_offset(mm, addr); in huge_pte_offset() 5023 if (!pgd_present(*pgd)) in huge_pte_offset() 5025 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 5115 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags) in follow_huge_pgd() argument [all …]
|
D | debug.c | 169 mm->pgd, atomic_read(&mm->mm_users), in dump_mm()
|
D | swapfile.c | 2022 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, in unuse_p4d_range() argument 2031 p4d = p4d_offset(pgd, addr); in unuse_p4d_range() 2047 pgd_t *pgd; in unuse_vma() local 2054 pgd = pgd_offset(vma->vm_mm, addr); in unuse_vma() 2057 if (pgd_none_or_clear_bad(pgd)) in unuse_vma() 2059 ret = unuse_p4d_range(vma, pgd, addr, next, type, in unuse_vma() 2063 } while (pgd++, addr = next, addr != end); in unuse_vma()
|
D | huge_memory.c | 2356 pgd_t *pgd; in split_huge_pmd_address() local 2361 pgd = pgd_offset(vma->vm_mm, address); in split_huge_pmd_address() 2362 if (!pgd_present(*pgd)) in split_huge_pmd_address() 2365 p4d = p4d_offset(pgd, address); in split_huge_pmd_address()
|