/arch/x86/platform/efi/ |
D | efi_64.c | 64 int pgd; in efi_call_phys_prelog() local 73 for (pgd = 0; pgd < n_pgds; pgd++) { in efi_call_phys_prelog() 74 save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); in efi_call_phys_prelog() 75 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); in efi_call_phys_prelog() 76 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); in efi_call_phys_prelog() 86 int pgd; in efi_call_phys_epilog() local 88 for (pgd = 0; pgd < n_pgds; pgd++) in efi_call_phys_epilog() 89 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]); in efi_call_phys_epilog()
|
/arch/mn10300/mm/ |
D | pgtable.c | 37 pgd_t *pgd; in set_pmd_pfn() local 49 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pmd_pfn() 50 if (pgd_none(*pgd)) { in set_pmd_pfn() 54 pud = pud_offset(pgd, vaddr); in set_pmd_pfn() 103 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 105 struct page *page = virt_to_page(pgd); in pgd_list_add() 113 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 115 struct page *next, **pprev, *page = virt_to_page(pgd); in pgd_list_del() 123 void pgd_ctor(void *pgd) in pgd_ctor() argument 130 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD, in pgd_ctor() [all …]
|
D | cache-inv-icache.c | 32 pgd_t *pgd; in flush_icache_page_range() local 43 pgd = pgd_offset(current->mm, start); in flush_icache_page_range() 44 if (!pgd || !pgd_val(*pgd)) in flush_icache_page_range() 47 pud = pud_offset(pgd, start); in flush_icache_page_range()
|
/arch/arm/mm/ |
D | pgd.c | 24 #define __pgd_free(pgd) kfree(pgd) argument 27 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) argument 109 pgd_t *pgd; in pgd_free() local 117 pgd = pgd_base + pgd_index(0); in pgd_free() 118 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 121 pud = pud_offset(pgd, 0); in pgd_free() 136 pgd_clear(pgd); in pgd_free() 143 for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { in pgd_free() 144 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 146 if (pgd_val(*pgd) & L_PGD_SWAPPER) in pgd_free() [all …]
|
D | idmap.c | 50 static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, in idmap_add_pud() argument 53 pud_t *pud = pud_offset(pgd, addr); in idmap_add_pud() 62 static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) in identity_mapping_add() argument 70 pgd += pgd_index(addr); in identity_mapping_add() 73 idmap_add_pud(pgd, addr, next, prot); in identity_mapping_add() 74 } while (pgd++, addr = next, addr != end); in identity_mapping_add()
|
/arch/x86/mm/ |
D | pgtable.c | 73 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 75 struct page *page = virt_to_page(pgd); in pgd_list_add() 80 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 82 struct page *page = virt_to_page(pgd); in pgd_list_del() 91 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument 93 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm)); in pgd_set_mm() 94 virt_to_page(pgd)->index = (pgoff_t)mm; in pgd_set_mm() 102 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument 110 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, in pgd_ctor() 117 pgd_set_mm(pgd, mm); in pgd_ctor() [all …]
|
D | pgtable_32.c | 29 pgd_t *pgd; in set_pte_vaddr() local 34 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_vaddr() 35 if (pgd_none(*pgd)) { in set_pte_vaddr() 39 pud = pud_offset(pgd, vaddr); in set_pte_vaddr() 70 pgd_t *pgd; in set_pmd_pfn() local 82 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pmd_pfn() 83 if (pgd_none(*pgd)) { in set_pmd_pfn() 87 pud = pud_offset(pgd, vaddr); in set_pmd_pfn()
|
D | init_64.c | 117 pgd_t *pgd; in sync_global_pgds() local 120 pgd = (pgd_t *)page_address(page) + pgd_index(address); in sync_global_pgds() 125 if (pgd_none(*pgd)) in sync_global_pgds() 126 set_pgd(pgd, *pgd_ref); in sync_global_pgds() 128 BUG_ON(pgd_page_vaddr(*pgd) in sync_global_pgds() 160 static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr) in fill_pud() argument 162 if (pgd_none(*pgd)) { in fill_pud() 164 pgd_populate(&init_mm, pgd, pud); in fill_pud() 165 if (pud != pud_offset(pgd, 0)) in fill_pud() 167 pud, pud_offset(pgd, 0)); in fill_pud() [all …]
|
/arch/powerpc/include/asm/ |
D | pgtable-ppc64-4k.h | 55 #define pgd_none(pgd) (!pgd_val(pgd)) argument 56 #define pgd_bad(pgd) (pgd_val(pgd) == 0) argument 57 #define pgd_present(pgd) (pgd_val(pgd) != 0) argument 59 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) argument 60 #define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd)) argument
|
D | mmu_context.h | 23 extern void set_context(unsigned long id, pgd_t *pgd); 51 tsk->thread.pgdir = next->pgd; in switch_mm() 56 get_paca()->pgd = next->pgd; in switch_mm() 112 get_paca()->pgd = NULL; in enter_lazy_tlb()
|
/arch/frv/mm/ |
D | pgalloc.c | 85 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 87 struct page *page = virt_to_page(pgd); in pgd_list_add() 95 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 97 struct page *next, **pprev, *page = virt_to_page(pgd); in pgd_list_del() 105 void pgd_ctor(void *pgd) in pgd_ctor() argument 112 memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4, in pgd_ctor() 119 pgd_list_add(pgd); in pgd_ctor() 121 memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t)); in pgd_ctor() 125 void pgd_dtor(void *pgd) in pgd_dtor() argument 130 pgd_list_del(pgd); in pgd_dtor() [all …]
|
/arch/sh/mm/ |
D | hugetlbpage.c | 27 pgd_t *pgd; in huge_pte_alloc() local 32 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 33 if (pgd) { in huge_pte_alloc() 34 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc() 47 pgd_t *pgd; in huge_pte_offset() local 52 pgd = pgd_offset(mm, addr); in huge_pte_offset() 53 if (pgd) { in huge_pte_offset() 54 pud = pud_offset(pgd, addr); in huge_pte_offset()
|
/arch/parisc/include/asm/ |
D | pgalloc.h | 23 pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, in pgd_alloc() local 25 pgd_t *actual_pgd = pgd; in pgd_alloc() 27 if (likely(pgd != NULL)) { in pgd_alloc() 28 memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); in pgd_alloc() 37 + (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)); in pgd_alloc() 40 __pgd_val_set(*pgd, PxD_FLAG_ATTACHED); in pgd_alloc() 46 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument 49 pgd -= PTRS_PER_PGD; in pgd_free() 51 free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); in pgd_free() 58 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd) in pgd_populate() argument [all …]
|
/arch/hexagon/include/asm/ |
D | pgalloc.h | 36 pgd_t *pgd; in pgd_alloc() local 38 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); in pgd_alloc() 48 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t *)); in pgd_alloc() 52 mm->context.ptbase = __pa(pgd); in pgd_alloc() 54 return pgd; in pgd_alloc() 57 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) in pgd_free() argument 59 free_page((unsigned long) pgd); in pgd_free() 133 pmdindex = (pgd_t *)pmd - mm->pgd; in pmd_populate_kernel() 134 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; in pmd_populate_kernel()
|
/arch/mips/mm/ |
D | hugetlbpage.c | 28 pgd_t *pgd; in huge_pte_alloc() local 32 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 33 pud = pud_alloc(mm, pgd, addr); in huge_pte_alloc() 42 pgd_t *pgd; in huge_pte_offset() local 46 pgd = pgd_offset(mm, addr); in huge_pte_offset() 47 if (pgd_present(*pgd)) { in huge_pte_offset() 48 pud = pud_offset(pgd, addr); in huge_pte_offset()
|
/arch/tile/mm/ |
D | pgtable.c | 92 pgd_t *pgd; in set_pte_pfn() local 97 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_pfn() 98 if (pgd_none(*pgd)) { in set_pte_pfn() 102 pud = pud_offset(pgd, vaddr); in set_pte_pfn() 160 pgd_t *pgd; in shatter_huge_page() local 172 pgd = swapper_pg_dir + pgd_index(addr); in shatter_huge_page() 173 pud = pud_offset(pgd, addr); in shatter_huge_page() 196 pgd = list_to_pgd(pos) + pgd_index(addr); in shatter_huge_page() 197 pud = pud_offset(pgd, addr); in shatter_huge_page() 229 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument [all …]
|
/arch/um/kernel/ |
D | mem.c | 118 pgd_t *pgd; in fixrange_init() local 127 pgd = pgd_base + i; in fixrange_init() 129 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) { in fixrange_init() 130 pud = pud_offset(pgd, vaddr); in fixrange_init() 163 pgd_t *pgd; in init_highmem() local 175 pgd = swapper_pg_dir + pgd_index(vaddr); in init_highmem() 176 pud = pud_offset(pgd, vaddr); in init_highmem() 189 pgd_t *pgd; in fixaddr_user_init() local 205 pgd = swapper_pg_dir + pgd_index(vaddr); in fixaddr_user_init() 206 pud = pud_offset(pgd, vaddr); in fixaddr_user_init() [all …]
|
/arch/x86/power/ |
D | hibernate_32.c | 35 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 45 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init() 46 pud = pud_offset(pgd, 0); in resume_one_md_table_init() 50 pud = pud_offset(pgd, 0); in resume_one_md_table_init() 86 pgd_t *pgd; in resume_physical_mapping_init() local 92 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 95 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 96 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init()
|
/arch/m32r/include/asm/ |
D | pgtable-2level.h | 33 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument 34 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument 35 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument 53 #define pgd_page_vaddr(pgd) \ argument 54 ((unsigned long) __va(pgd_val(pgd) & PAGE_MASK)) 57 #define pgd_page(pgd) (mem_map + ((pgd_val(pgd) >> PAGE_SHIFT) - PFN_BASE)) argument
|
/arch/arm/plat-samsung/include/plat/ |
D | sysmmu.h | 40 void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd); 60 void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd); 89 #define s5p_sysmmu_enable(ips, pgd) do { } while (0) argument 91 #define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0) argument
|
/arch/x86/kernel/ |
D | machine_kexec_32.c | 73 free_page((unsigned long)image->arch.pgd); in machine_kexec_free_page_tables() 84 image->arch.pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); in machine_kexec_alloc_page_tables() 91 if (!image->arch.pgd || in machine_kexec_alloc_page_tables() 103 pgd_t *pgd, pmd_t *pmd, pte_t *pte, in machine_kexec_page_table_set_one() argument 108 pgd += pgd_index(vaddr); in machine_kexec_page_table_set_one() 110 if (!(pgd_val(*pgd) & _PAGE_PRESENT)) in machine_kexec_page_table_set_one() 111 set_pgd(pgd, __pgd(__pa(pmd) | _PAGE_PRESENT)); in machine_kexec_page_table_set_one() 113 pud = pud_offset(pgd, vaddr); in machine_kexec_page_table_set_one() 131 image->arch.pgd, pmd, image->arch.pte0, in machine_kexec_prepare_page_tables() 137 image->arch.pgd, pmd, image->arch.pte1, in machine_kexec_prepare_page_tables() [all …]
|
/arch/powerpc/kernel/ |
D | softemu8xx.c | 42 pgd_t *pgd; in print_8xx_pte() local 47 pgd = pgd_offset(mm, addr & PAGE_MASK); in print_8xx_pte() 48 if (pgd) { in print_8xx_pte() 49 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), in print_8xx_pte() 55 (long)pgd, (long)pte, (long)pte_val(*pte)); in print_8xx_pte() 83 pgd_t *pgd; in get_8xx_pte() local 88 pgd = pgd_offset(mm, addr & PAGE_MASK); in get_8xx_pte() 89 if (pgd) { in get_8xx_pte() 90 pmd = pmd_offset(pud_offset(pgd, addr & PAGE_MASK), in get_8xx_pte()
|
/arch/ia64/xen/ |
D | xencomm.c | 51 pgd_t *pgd; in xencomm_vtop() local 65 pgd = pgd_offset_k(vaddr); in xencomm_vtop() 66 if (pgd_none(*pgd) || pgd_bad(*pgd)) in xencomm_vtop() 69 pud = pud_offset(pgd, vaddr); in xencomm_vtop()
|
/arch/unicore32/kernel/ |
D | hibernate.c | 35 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 40 pud = pud_offset(pgd, 0); in resume_one_md_table_init() 75 pgd_t *pgd; in resume_physical_mapping_init() local 81 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 84 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 85 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init()
|
/arch/microblaze/include/asm/ |
D | mmu_context_mm.h | 54 extern void set_context(mm_context_t context, pgd_t *pgd); 120 tsk->thread.pgdir = next->pgd; in switch_mm() 122 set_context(next->context, next->pgd); in switch_mm() 132 current->thread.pgdir = mm->pgd; in activate_mm() 134 set_context(mm->context, mm->pgd); in activate_mm()
|