/kernel/linux/linux-5.10/include/asm-generic/ |
D | pgtable-nopud.h | 16 typedef struct { p4d_t p4d; } pud_t; member 28 static inline int p4d_none(p4d_t p4d) { return 0; } in p4d_none() argument 29 static inline int p4d_bad(p4d_t p4d) { return 0; } in p4d_bad() argument 30 static inline int p4d_present(p4d_t p4d) { return 1; } in p4d_present() argument 31 static inline void p4d_clear(p4d_t *p4d) { } in p4d_clear() argument 32 #define pud_ERROR(pud) (p4d_ERROR((pud).p4d)) 34 #define p4d_populate(mm, p4d, pud) do { } while (0) argument 35 #define p4d_populate_safe(mm, p4d, pud) do { } while (0) argument 42 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) in pud_offset() argument 44 return (pud_t *)p4d; in pud_offset() [all …]
|
/kernel/linux/linux-5.10/mm/kasan/ |
D | init.c | 50 static inline bool kasan_pud_table(p4d_t p4d) in kasan_pud_table() argument 52 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud)); in kasan_pud_table() 55 static inline bool kasan_pud_table(p4d_t p4d) in kasan_pud_table() argument 146 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr, in zero_pud_populate() argument 149 pud_t *pud = pud_offset(p4d, addr); in zero_pud_populate() 186 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() local 195 p4d_populate(&init_mm, p4d, in zero_p4d_populate() 197 pud = pud_offset(p4d, addr); in zero_p4d_populate() 206 if (p4d_none(*p4d)) { in zero_p4d_populate() 210 p = pud_alloc(&init_mm, p4d, addr); in zero_p4d_populate() [all …]
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/nohash/64/ |
D | pgtable-4k.h | 56 #define p4d_none(p4d) (!p4d_val(p4d)) argument 57 #define p4d_bad(p4d) (p4d_val(p4d) == 0) argument 58 #define p4d_present(p4d) (p4d_val(p4d) != 0) argument 59 #define p4d_page_vaddr(p4d) (p4d_val(p4d) & ~P4D_MASKED_BITS) argument 68 static inline pte_t p4d_pte(p4d_t p4d) in p4d_pte() argument 70 return __pte(p4d_val(p4d)); in p4d_pte() 77 extern struct page *p4d_page(p4d_t p4d);
|
/kernel/linux/linux-5.10/arch/x86/include/asm/ |
D | pgalloc.h | 115 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) in p4d_populate() argument 118 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud))); in p4d_populate() 121 static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) in p4d_populate_safe() argument 124 set_p4d_safe(p4d, __p4d(_PAGE_TABLE | __pa(pud))); in p4d_populate_safe() 136 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) in pgd_populate() argument 140 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate() 141 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); in pgd_populate() 144 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) in pgd_populate_safe() argument 148 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); in pgd_populate_safe() 149 set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); in pgd_populate_safe() [all …]
|
D | pgtable_types.h | 333 typedef struct { p4dval_t p4d; } p4d_t; member 340 static inline p4dval_t native_p4d_val(p4d_t p4d) in native_p4d_val() argument 342 return p4d.p4d; in native_p4d_val() 352 static inline p4dval_t native_p4d_val(p4d_t p4d) in native_p4d_val() argument 354 return native_pgd_val(p4d.pgd); in native_p4d_val() 375 return (pud_t) { .p4d.pgd = native_make_pgd(val) }; in native_make_pud() 380 return native_pgd_val(pud.p4d.pgd); in native_pud_val() 401 return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) }; in native_make_pmd() 406 return native_pgd_val(pmd.pud.p4d.pgd); in native_pmd_val() 410 static inline p4dval_t p4d_pfn_mask(p4d_t p4d) in p4d_pfn_mask() argument [all …]
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
D | init_64.c | 72 DEFINE_POPULATE(p4d_populate, p4d, pud, init) 73 DEFINE_POPULATE(pgd_populate, pgd, p4d, init) 87 DEFINE_ENTRY(p4d, p4d, init) 186 p4d_t *p4d; in sync_global_pgds_l4() local 190 p4d = p4d_offset(pgd, addr); in sync_global_pgds_l4() 195 if (!p4d_none(*p4d_ref) && !p4d_none(*p4d)) in sync_global_pgds_l4() 196 BUG_ON(p4d_page_vaddr(*p4d) in sync_global_pgds_l4() 199 if (p4d_none(*p4d)) in sync_global_pgds_l4() 200 set_p4d(p4d, *p4d_ref); in sync_global_pgds_l4() 246 p4d_t *p4d = (p4d_t *)spp_getpage(); in fill_p4d() local [all …]
|
D | ident_map.c | 68 p4d_t *p4d = p4d_page + p4d_index(addr); in ident_p4d_init() local 75 if (p4d_present(*p4d)) { in ident_p4d_init() 76 pud = pud_offset(p4d, 0); in ident_p4d_init() 91 set_p4d(p4d, __p4d(__pa(pud) | info->kernpg_flag)); in ident_p4d_init() 114 p4d_t *p4d; in kernel_ident_mapping_init() local 121 p4d = p4d_offset(pgd, 0); in kernel_ident_mapping_init() 122 result = ident_p4d_init(info, p4d, addr, next); in kernel_ident_mapping_init() 128 p4d = (p4d_t *)info->alloc_pgt_page(info->context); in kernel_ident_mapping_init() 129 if (!p4d) in kernel_ident_mapping_init() 131 result = ident_p4d_init(info, p4d, addr, next); in kernel_ident_mapping_init() [all …]
|
D | kasan_init_64.c | 105 static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, in kasan_populate_p4d() argument 111 if (p4d_none(*p4d)) { in kasan_populate_p4d() 114 p4d_populate(&init_mm, p4d, p); in kasan_populate_p4d() 117 pud = pud_offset(p4d, addr); in kasan_populate_p4d() 129 p4d_t *p4d; in kasan_populate_pgd() local 137 p4d = p4d_offset(pgd, addr); in kasan_populate_pgd() 140 kasan_populate_p4d(p4d, addr, next, nid); in kasan_populate_pgd() 141 } while (p4d++, addr = next, addr != end); in kasan_populate_pgd() 196 unsigned long p4d; in early_p4d_offset() local 201 p4d = pgd_val(*pgd) & PTE_PFN_MASK; in early_p4d_offset() [all …]
|
D | pti.c | 202 p4d_t *p4d; in pti_user_pagetable_walk_pmd() local 205 p4d = pti_user_pagetable_walk_p4d(address); in pti_user_pagetable_walk_pmd() 206 if (!p4d) in pti_user_pagetable_walk_pmd() 209 BUILD_BUG_ON(p4d_large(*p4d) != 0); in pti_user_pagetable_walk_pmd() 210 if (p4d_none(*p4d)) { in pti_user_pagetable_walk_pmd() 215 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page))); in pti_user_pagetable_walk_pmd() 218 pud = pud_offset(p4d, address); in pti_user_pagetable_walk_pmd() 316 p4d_t *p4d; in pti_clone_pgtable() local 326 p4d = p4d_offset(pgd, addr); in pti_clone_pgtable() 327 if (WARN_ON(p4d_none(*p4d))) in pti_clone_pgtable() [all …]
|
/kernel/linux/linux-5.10/arch/sh/mm/ |
D | hugetlbpage.c | 28 p4d_t *p4d; in huge_pte_alloc() local 35 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 36 if (p4d) { in huge_pte_alloc() 37 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc() 53 p4d_t *p4d; in huge_pte_offset() local 60 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 61 if (p4d) { in huge_pte_offset() 62 pud = pud_offset(p4d, addr); in huge_pte_offset()
|
/kernel/linux/linux-5.10/arch/mips/include/asm/ |
D | pgtable-64.h | 190 static inline int p4d_none(p4d_t p4d) in p4d_none() argument 192 return p4d_val(p4d) == (unsigned long)invalid_pud_table; in p4d_none() 195 static inline int p4d_bad(p4d_t p4d) in p4d_bad() argument 197 if (unlikely(p4d_val(p4d) & ~PAGE_MASK)) in p4d_bad() 203 static inline int p4d_present(p4d_t p4d) in p4d_present() argument 205 return p4d_val(p4d) != (unsigned long)invalid_pud_table; in p4d_present() 213 static inline unsigned long p4d_page_vaddr(p4d_t p4d) in p4d_page_vaddr() argument 215 return p4d_val(p4d); in p4d_page_vaddr() 218 #define p4d_phys(p4d) virt_to_phys((void *)p4d_val(p4d)) argument 219 #define p4d_page(p4d) (pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT)) argument [all …]
|
/kernel/linux/linux-5.10/arch/mips/mm/ |
D | hugetlbpage.c | 28 p4d_t *p4d; in huge_pte_alloc() local 33 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 34 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc() 45 p4d_t *p4d; in huge_pte_offset() local 51 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 52 if (p4d_present(*p4d)) { in huge_pte_offset() 53 pud = pud_offset(p4d, addr); in huge_pte_offset()
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
D | pgd.c | 145 p4d_t *p4d; in pgd_free() local 157 p4d = p4d_offset(pgd, 0); in pgd_free() 158 if (p4d_none_or_clear_bad(p4d)) in pgd_free() 161 pud = pud_offset(p4d, 0); in pgd_free() 178 p4d_clear(p4d); in pgd_free() 182 p4d_free(mm, p4d); in pgd_free() 193 p4d = p4d_offset(pgd, 0); in pgd_free() 194 if (p4d_none_or_clear_bad(p4d)) in pgd_free() 196 pud = pud_offset(p4d, 0); in pgd_free() 203 p4d_clear(p4d); in pgd_free() [all …]
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
D | vmem.c | 310 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, in modify_pud_table() argument 321 pud = pud_offset(p4d, addr); in modify_pud_table() 364 static void try_free_pud_table(p4d_t *p4d, unsigned long start) in try_free_pud_table() argument 378 pud = pud_offset(p4d, start); in try_free_pud_table() 383 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER); in try_free_pud_table() 384 p4d_clear(p4d); in try_free_pud_table() 392 p4d_t *p4d; in modify_p4d_table() local 395 p4d = p4d_offset(pgd, addr); in modify_p4d_table() 396 for (; addr < end; addr = next, p4d++) { in modify_p4d_table() 399 if (p4d_none(*p4d)) in modify_p4d_table() [all …]
|
D | page-states.c | 120 static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) in mark_kernel_pud() argument 127 pud = pud_offset(p4d, addr); in mark_kernel_pud() 145 p4d_t *p4d; in mark_kernel_p4d() local 148 p4d = p4d_offset(pgd, addr); in mark_kernel_p4d() 151 if (p4d_none(*p4d)) in mark_kernel_p4d() 153 if (!p4d_folded(*p4d)) { in mark_kernel_p4d() 154 page = virt_to_page(p4d_val(*p4d)); in mark_kernel_p4d() 158 mark_kernel_pud(p4d, addr, next); in mark_kernel_p4d() 159 } while (p4d++, addr = next, addr != end); in mark_kernel_p4d()
|
/kernel/linux/linux-5.10/mm/ |
D | ioremap.c | 152 static inline int ioremap_pud_range(p4d_t *p4d, unsigned long addr, in ioremap_pud_range() argument 159 pud = pud_alloc_track(&init_mm, p4d, addr, mask); in ioremap_pud_range() 176 static int ioremap_try_huge_p4d(p4d_t *p4d, unsigned long addr, in ioremap_try_huge_p4d() argument 192 if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr)) in ioremap_try_huge_p4d() 195 return p4d_set_huge(p4d, phys_addr, prot); in ioremap_try_huge_p4d() 202 p4d_t *p4d; in ioremap_p4d_range() local 205 p4d = p4d_alloc_track(&init_mm, pgd, addr, mask); in ioremap_p4d_range() 206 if (!p4d) in ioremap_p4d_range() 211 if (ioremap_try_huge_p4d(p4d, addr, next, phys_addr, prot)) { in ioremap_p4d_range() 216 if (ioremap_pud_range(p4d, addr, next, phys_addr, prot, mask)) in ioremap_p4d_range() [all …]
|
D | sparse-vmemmap.c | 183 pud_t * __meminit vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node) in vmemmap_pud_populate() argument 185 pud_t *pud = pud_offset(p4d, addr); in vmemmap_pud_populate() 197 p4d_t *p4d = p4d_offset(pgd, addr); in vmemmap_p4d_populate() local 198 if (p4d_none(*p4d)) { in vmemmap_p4d_populate() 202 p4d_populate(&init_mm, p4d, p); in vmemmap_p4d_populate() 204 return p4d; in vmemmap_p4d_populate() 224 p4d_t *p4d; in vmemmap_populate_basepages() local 233 p4d = vmemmap_p4d_populate(pgd, addr, node); in vmemmap_populate_basepages() 234 if (!p4d) in vmemmap_populate_basepages() 236 pud = vmemmap_pud_populate(p4d, addr, node); in vmemmap_populate_basepages()
|
/kernel/linux/linux-5.10/arch/x86/power/ |
D | hibernate_64.c | 32 p4d_t *p4d = NULL; in set_up_temporary_text_mapping() local 55 p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); in set_up_temporary_text_mapping() 56 if (!p4d) in set_up_temporary_text_mapping() 72 if (p4d) { in set_up_temporary_text_mapping() 74 pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); in set_up_temporary_text_mapping() 76 set_p4d(p4d + p4d_index(restore_jump_address), new_p4d); in set_up_temporary_text_mapping()
|
D | hibernate.c | 153 p4d_t *p4d; in relocate_restore_code() local 167 p4d = p4d_offset(pgd, relocated_restore_code); in relocate_restore_code() 168 if (p4d_large(*p4d)) { in relocate_restore_code() 169 set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX)); in relocate_restore_code() 172 pud = pud_offset(p4d, relocated_restore_code); in relocate_restore_code()
|
/kernel/linux/linux-5.10/arch/sparc/mm/ |
D | hugetlbpage.c | 279 p4d_t *p4d; in huge_pte_alloc() local 284 p4d = p4d_offset(pgd, addr); in huge_pte_alloc() 285 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc() 302 p4d_t *p4d; in huge_pte_offset() local 309 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 310 if (p4d_none(*p4d)) in huge_pte_offset() 312 pud = pud_offset(p4d, addr); in huge_pte_offset() 457 static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, in hugetlb_free_pud_range() argument 466 pud = pud_offset(p4d, addr); in hugetlb_free_pud_range() 489 pud = pud_offset(p4d, start); in hugetlb_free_pud_range() [all …]
|
/kernel/linux/linux-5.10/arch/parisc/mm/ |
D | hugetlbpage.c | 51 p4d_t *p4d; in huge_pte_alloc() local 64 p4d = p4d_offset(pgd, addr); in huge_pte_alloc() 65 pud = pud_alloc(mm, p4d, addr); in huge_pte_alloc() 78 p4d_t *p4d; in huge_pte_offset() local 87 p4d = p4d_offset(pgd, addr); in huge_pte_offset() 88 if (!p4d_none(*p4d)) { in huge_pte_offset() 89 pud = pud_offset(p4d, addr); in huge_pte_offset()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/ |
D | pgtable_64.c | 102 struct page *p4d_page(p4d_t p4d) in p4d_page() argument 104 if (p4d_is_leaf(p4d)) { in p4d_page() 106 VM_WARN_ON(!p4d_huge(p4d)); in p4d_page() 107 return pte_page(p4d_pte(p4d)); in p4d_page() 109 return virt_to_page(p4d_page_vaddr(p4d)); in p4d_page()
|
D | pgtable.c | 299 p4d_t *p4d; in assert_pte_locked() local 307 p4d = p4d_offset(pgd, addr); in assert_pte_locked() 308 BUG_ON(p4d_none(*p4d)); in assert_pte_locked() 309 pud = pud_offset(p4d, addr); in assert_pte_locked() 350 p4d_t p4d, *p4dp; in __find_linux_pte() local 372 p4d = READ_ONCE(*p4dp); in __find_linux_pte() 375 if (p4d_none(p4d)) in __find_linux_pte() 378 if (p4d_is_leaf(p4d)) { in __find_linux_pte() 383 if (is_hugepd(__hugepd(p4d_val(p4d)))) { in __find_linux_pte() 384 hpdp = (hugepd_t *)&p4d; in __find_linux_pte() [all …]
|
/kernel/linux/linux-5.10/arch/ia64/mm/ |
D | hugetlbpage.c | 32 p4d_t *p4d; in huge_pte_alloc() local 38 p4d = p4d_offset(pgd, taddr); in huge_pte_alloc() 39 pud = pud_alloc(mm, p4d, taddr); in huge_pte_alloc() 53 p4d_t *p4d; in huge_pte_offset() local 60 p4d = p4d_offset(pgd, taddr); in huge_pte_offset() 61 if (p4d_present(*p4d)) { in huge_pte_offset() 62 pud = pud_offset(p4d, taddr); in huge_pte_offset()
|
/kernel/linux/linux-5.10/include/linux/ |
D | pgtable.h | 98 static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) in pud_offset() argument 100 return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); in pud_offset() 528 #define p4d_access_permitted(p4d, write) \ argument 529 (p4d_present(p4d) && (!(write) || p4d_write(p4d))) 589 #define set_p4d_safe(p4dp, p4d) \ argument 591 WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ 592 set_p4d(p4dp, p4d); \ 723 #define p4d_clear_bad(p4d) do { } while (0) argument 729 #define pud_clear_bad(p4d) do { } while (0) argument 745 static inline int p4d_none_or_clear_bad(p4d_t *p4d) in p4d_none_or_clear_bad() argument [all …]
|