Home
last modified time | relevance | path

Searched refs:pgd (Results 1 – 25 of 306) sorted by relevance

12345678910>>...13

/arch/nds32/mm/
Dmm-nds32.c41 void pgd_free(struct mm_struct *mm, pgd_t * pgd) in pgd_free() argument
46 if (!pgd) in pgd_free()
49 pmd = (pmd_t *) pgd; in pgd_free()
60 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); in pgd_free()
65 free_pages((unsigned long)pgd, 0); in pgd_free()
76 pgd_t *pgd; in setup_mm_for_reboot() local
80 if (current->mm && current->mm->pgd) in setup_mm_for_reboot()
81 pgd = current->mm->pgd; in setup_mm_for_reboot()
83 pgd = init_mm.pgd; in setup_mm_for_reboot()
87 pmd = pmd_offset(pgd + i, i << PGDIR_SHIFT); in setup_mm_for_reboot()
Dfault.c25 pgd_t *pgd; in show_pte() local
29 pr_alert("pgd = %p\n", mm->pgd); in show_pte()
30 pgd = pgd_offset(mm, addr); in show_pte()
31 pr_alert("[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); in show_pte()
36 if (pgd_none(*pgd)) in show_pte()
39 if (pgd_bad(*pgd)) { in show_pte()
44 pmd = pmd_offset(pgd, addr); in show_pte()
361 pgd_t *pgd, *pgd_k; in do_page_fault() local
366 pgd = (pgd_t *) __va(__nds32__mfsr(NDS32_SR_L1_PPTB)) + index; in do_page_fault()
367 pgd_k = init_mm.pgd + index; in do_page_fault()
[all …]
/arch/arm/mm/
Dpgd.c21 #define __pgd_free(pgd) kfree(pgd) argument
24 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) argument
118 pgd_t *pgd; in pgd_free() local
126 pgd = pgd_base + pgd_index(0); in pgd_free()
127 if (pgd_none_or_clear_bad(pgd)) in pgd_free()
130 pud = pud_offset(pgd, 0); in pgd_free()
147 pgd_clear(pgd); in pgd_free()
154 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { in pgd_free()
155 if (pgd_none_or_clear_bad(pgd)) in pgd_free()
157 if (pgd_val(*pgd) & L_PGD_SWAPPER) in pgd_free()
[all …]
/arch/powerpc/include/asm/nohash/64/
Dpgtable-4k.h56 #define pgd_none(pgd) (!pgd_val(pgd)) argument
57 #define pgd_bad(pgd) (pgd_val(pgd) == 0) argument
58 #define pgd_present(pgd) (pgd_val(pgd) != 0) argument
59 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) argument
68 static inline pte_t pgd_pte(pgd_t pgd) in pgd_pte() argument
70 return __pte(pgd_val(pgd)); in pgd_pte()
77 extern struct page *pgd_page(pgd_t pgd);
/arch/powerpc/include/asm/book3s/64/
Dpgalloc.h39 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) in radix__pgd_free() argument
42 free_page((unsigned long)pgd); in radix__pgd_free()
44 free_pages((unsigned long)pgd, 4); in radix__pgd_free()
50 pgd_t *pgd; in pgd_alloc() local
55 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), in pgd_alloc()
57 if (unlikely(!pgd)) in pgd_alloc()
58 return pgd; in pgd_alloc()
65 kmemleak_no_scan(pgd); in pgd_alloc()
76 memset(pgd, 0, PGD_TABLE_SIZE); in pgd_alloc()
78 return pgd; in pgd_alloc()
[all …]
/arch/sh/mm/
Dhugetlbpage.c28 pgd_t *pgd; in huge_pte_alloc() local
33 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
34 if (pgd) { in huge_pte_alloc()
35 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
49 pgd_t *pgd; in huge_pte_offset() local
54 pgd = pgd_offset(mm, addr); in huge_pte_offset()
55 if (pgd) { in huge_pte_offset()
56 pud = pud_offset(pgd, addr); in huge_pte_offset()
Dfault.c39 pgd_t *pgd; in show_pte() local
42 pgd = mm->pgd; in show_pte()
44 pgd = get_TTB(); in show_pte()
46 if (unlikely(!pgd)) in show_pte()
47 pgd = swapper_pg_dir; in show_pte()
50 printk(KERN_ALERT "pgd = %p\n", pgd); in show_pte()
51 pgd += pgd_index(addr); in show_pte()
53 (u32)(sizeof(*pgd) * 2), (u64)pgd_val(*pgd)); in show_pte()
60 if (pgd_none(*pgd)) in show_pte()
63 if (pgd_bad(*pgd)) { in show_pte()
[all …]
/arch/parisc/include/asm/
Dpgalloc.h26 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, in pgd_alloc() local
28 pgd_t *actual_pgd = pgd; in pgd_alloc()
30 if (likely(pgd != NULL)) { in pgd_alloc()
31 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); in pgd_alloc()
40 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); in pgd_alloc()
43 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); in pgd_alloc()
50 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
53 pgd -= PTRS_PER_PGD; in pgd_free()
55 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); in pgd_free()
62 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) in pgd_populate() argument
[all …]
Dpgtable.h93 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags);\
97 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags);\
346 #define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd))) argument
347 #define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd)) argument
354 static inline void pgd_clear(pgd_t *pgd) { in pgd_clear() argument
356 if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED) in pgd_clear()
361 __pgd_val_set(*pgd, 0); in pgd_clear()
369 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument
370 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument
371 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument
[all …]
/arch/hexagon/include/asm/
Dpgalloc.h23 pgd_t *pgd; in pgd_alloc() local
25 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); in pgd_alloc()
35 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t)); in pgd_alloc()
39 mm->context.ptbase = __pa(pgd); in pgd_alloc()
41 return pgd; in pgd_alloc()
44 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
46 free_page((unsigned long) pgd); in pgd_free()
88 pmdindex = (pgd_t *)pmd - mm->pgd; in pmd_populate_kernel()
89 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; in pmd_populate_kernel()
/arch/arm/include/asm/
Dstage2_pgtable.h19 #define stage2_pgd_none(kvm, pgd) pgd_none(pgd) argument
20 #define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd) argument
21 #define stage2_pgd_present(kvm, pgd) pgd_present(pgd) argument
22 #define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud) argument
23 #define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address) argument
/arch/riscv/include/asm/
Dpgalloc.h44 pgd_t *pgd; in pgd_alloc() local
46 pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc()
47 if (likely(pgd != NULL)) { in pgd_alloc()
48 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc()
50 memcpy(pgd + USER_PTRS_PER_PGD, in pgd_alloc()
51 init_mm.pgd + USER_PTRS_PER_PGD, in pgd_alloc()
54 return pgd; in pgd_alloc()
57 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument
59 free_page((unsigned long)pgd); in pgd_free()
/arch/parisc/mm/
Dhugetlbpage.c51 pgd_t *pgd; in huge_pte_alloc() local
63 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
64 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
76 pgd_t *pgd; in huge_pte_offset() local
83 pgd = pgd_offset(mm, addr); in huge_pte_offset()
84 if (!pgd_none(*pgd)) { in huge_pte_offset()
85 pud = pud_offset(pgd, addr); in huge_pte_offset()
142 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); in set_huge_pte_at()
144 spin_unlock_irqrestore(pgd_spinlock((mm)->pgd), flags); in set_huge_pte_at()
154 spin_lock_irqsave(pgd_spinlock((mm)->pgd), flags); in huge_ptep_get_and_clear()
[all …]
/arch/x86/mm/
Dpgtable.c86 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument
88 struct page *page = virt_to_page(pgd); in pgd_list_add()
93 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument
95 struct page *page = virt_to_page(pgd); in pgd_list_del()
106 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument
108 virt_to_page(pgd)->pt_mm = mm; in pgd_set_mm()
116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, in pgd_ctor()
131 pgd_set_mm(pgd, mm); in pgd_ctor()
132 pgd_list_add(pgd); in pgd_ctor()
[all …]
Dkasan_init_64.c126 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, in kasan_populate_pgd() argument
133 if (pgd_none(*pgd)) { in kasan_populate_pgd()
135 pgd_populate(&init_mm, pgd, p); in kasan_populate_pgd()
138 p4d = p4d_offset(pgd, addr); in kasan_populate_pgd()
148 pgd_t *pgd; in kasan_populate_shadow() local
153 pgd = pgd_offset_k(addr); in kasan_populate_shadow()
156 kasan_populate_pgd(pgd, addr, next, nid); in kasan_populate_shadow()
157 } while (pgd++, addr = next, addr != end); in kasan_populate_shadow()
174 pgd_t *pgd; in clear_pgds() local
179 pgd = pgd_offset_k(start); in clear_pgds()
[all …]
Dpti.c125 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) in __pti_set_user_pgtbl() argument
137 return pgd; in __pti_set_user_pgtbl()
143 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; in __pti_set_user_pgtbl()
158 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && in __pti_set_user_pgtbl()
160 pgd.pgd |= _PAGE_NX; in __pti_set_user_pgtbl()
163 return pgd; in __pti_set_user_pgtbl()
174 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); in pti_user_pagetable_walk_p4d() local
182 if (pgd_none(*pgd)) { in pti_user_pagetable_walk_p4d()
187 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page))); in pti_user_pagetable_walk_p4d()
189 BUILD_BUG_ON(pgd_large(*pgd) != 0); in pti_user_pagetable_walk_p4d()
[all …]
Ddebug_pagetables.c18 if (current->mm->pgd) { in ptdump_curknl_show()
20 ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, false); in ptdump_curknl_show()
31 if (current->mm->pgd) { in ptdump_curusr_show()
33 ptdump_walk_pgd_level_debugfs(m, current->mm->pgd, true); in ptdump_curusr_show()
45 if (efi_mm.pgd) in ptdump_efi_show()
46 ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false); in ptdump_efi_show()
/arch/mips/mm/
Dhugetlbpage.c27 pgd_t *pgd; in huge_pte_alloc() local
31 pgd = pgd_offset(mm, addr); in huge_pte_alloc()
32 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc()
42 pgd_t *pgd; in huge_pte_offset() local
46 pgd = pgd_offset(mm, addr); in huge_pte_offset()
47 if (pgd_present(*pgd)) { in huge_pte_offset()
48 pud = pud_offset(pgd, addr); in huge_pte_offset()
/arch/um/kernel/
Dmem.c98 pgd_t *pgd; in fixrange_init() local
107 pgd = pgd_base + i; in fixrange_init()
109 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { in fixrange_init()
110 pud = pud_offset(pgd, vaddr); in fixrange_init()
126 pgd_t *pgd; in fixaddr_user_init() local
146 pgd = swapper_pg_dir + pgd_index(vaddr); in fixaddr_user_init()
147 pud = pud_offset(pgd, vaddr); in fixaddr_user_init()
196 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc() local
198 if (pgd) { in pgd_alloc()
199 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc()
[all …]
/arch/x86/power/
Dhibernate_64.c28 static int set_up_temporary_text_mapping(pgd_t *pgd) in set_up_temporary_text_mapping() argument
77 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping()
81 set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); in set_up_temporary_text_mapping()
100 pgd_t *pgd; in set_up_temporary_mappings() local
104 pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); in set_up_temporary_mappings()
105 if (!pgd) in set_up_temporary_mappings()
109 result = set_up_temporary_text_mapping(pgd); in set_up_temporary_mappings()
118 result = kernel_ident_mapping_init(&info, pgd, mstart, mend); in set_up_temporary_mappings()
123 temp_pgt = __pa(pgd); in set_up_temporary_mappings()
Dhibernate_32.c30 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument
41 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init()
42 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init()
47 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init()
84 pgd_t *pgd; in resume_physical_mapping_init() local
90 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init()
93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init()
94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init()
147 pgd_t *pgd; in set_up_temporary_text_mapping() local
151 pgd = pgd_base + pgd_index(restore_jump_address); in set_up_temporary_text_mapping()
[all …]
/arch/mips/include/asm/
Dpgtable-64.h191 static inline int pgd_none(pgd_t pgd) in pgd_none() argument
193 return pgd_val(pgd) == (unsigned long)invalid_pud_table; in pgd_none()
196 static inline int pgd_bad(pgd_t pgd) in pgd_bad() argument
198 if (unlikely(pgd_val(pgd) & ~PAGE_MASK)) in pgd_bad()
204 static inline int pgd_present(pgd_t pgd) in pgd_present() argument
206 return pgd_val(pgd) != (unsigned long)invalid_pud_table; in pgd_present()
216 static inline unsigned long pgd_page_vaddr(pgd_t pgd) in pgd_page_vaddr() argument
218 return pgd_val(pgd); in pgd_page_vaddr()
221 #define pgd_phys(pgd) virt_to_phys((void *)pgd_val(pgd)) argument
222 #define pgd_page(pgd) (pfn_to_page(pgd_phys(pgd) >> PAGE_SHIFT)) argument
[all …]
/arch/csky/include/asm/
Dmmu_context.h17 #define TLBMISS_HANDLER_SETUP_PGD(pgd) \ argument
18 setup_pgd(__pa(pgd), false)
20 #define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \ argument
21 setup_pgd(__pa(pgd), true)
44 TLBMISS_HANDLER_SETUP_PGD(next->pgd); in switch_mm()
/arch/x86/kernel/
Dmachine_kexec_32.c57 free_pages((unsigned long)image->arch.pgd, PGD_ALLOCATION_ORDER); in machine_kexec_free_page_tables()
58 image->arch.pgd = NULL; in machine_kexec_free_page_tables()
73 image->arch.pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, in machine_kexec_alloc_page_tables()
81 if (!image->arch.pgd || in machine_kexec_alloc_page_tables()
92 pgd_t *pgd, pmd_t *pmd, pte_t *pte, in machine_kexec_page_table_set_one() argument
98 pgd += pgd_index(vaddr); in machine_kexec_page_table_set_one()
100 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) in machine_kexec_page_table_set_one()
101 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one()
103 p4d = p4d_offset(pgd, vaddr); in machine_kexec_page_table_set_one()
122 image->arch.pgd, pmd, image->arch.pte0, in machine_kexec_prepare_page_tables()
[all …]
/arch/unicore32/kernel/
Dhibernate.c33 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument
38 pud = pud_offset(pgd, 0); in resume_one_md_table_init()
73 pgd_t *pgd; in resume_physical_mapping_init() local
79 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init()
82 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init()
83 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init()

12345678910>>...13