/arch/metag/mm/ |
D | mmu-meta2.c | 151 entry = pgd_index(META_MEMORY_BASE); in mmu_init() 154 while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) { in mmu_init() 173 entry = pgd_index(PAGE_OFFSET); in mmu_init()
|
D | init.c | 69 int offset = pgd_index(address); in user_gateway_init() 273 i = pgd_index(vaddr); in allocate_pgtables() 313 pgd = swapper_pg_dir + pgd_index(vaddr); in fixedrange_init()
|
D | mmu-meta1.c | 144 entry = pgd_index(PAGE_OFFSET); in mmu_init()
|
/arch/metag/include/asm/ |
D | pgtable.h | 51 #define FIRST_USER_PGD_NR pgd_index(FIRST_USER_ADDRESS) 180 #define pgd_index(address) ((((address) & ~0x80000000) >> PGDIR_SHIFT) \ macro 183 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 186 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
/arch/tile/mm/ |
D | fault.c | 106 unsigned index = pgd_index(address); in vmalloc_sync_one() 222 pgd += pgd_index(address); in handle_migrating_pte() 886 BUILD_BUG_ON(pgd_index(VMALLOC_END - PAGE_SIZE) != in vmalloc_sync_all() 887 pgd_index(VMALLOC_START)); in vmalloc_sync_all() 901 if (!test_bit(pgd_index(address), insync)) { in vmalloc_sync_all() 915 set_bit(pgd_index(address), insync); in vmalloc_sync_all() 917 if (address == start && test_bit(pgd_index(address), insync)) in vmalloc_sync_all()
|
D | pgtable.c | 112 pgd = swapper_pg_dir + pgd_index(addr); in shatter_huge_page() 135 pgd = list_to_pgd(pos) + pgd_index(addr); in shatter_huge_page() 178 #define KERNEL_PGD_INDEX_START pgd_index(PAGE_OFFSET) 194 BUG_ON(((u64 *)swapper_pg_dir)[pgd_index(MEM_USER_INTRPT)] != 0); in pgd_ctor() 349 pgd = mm ? pgd_offset(mm, addr) : swapper_pg_dir + pgd_index(addr); in virt_to_pte()
|
D | init.c | 154 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va); in get_pmd() 162 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va); in get_pmd() 648 pgd = swapper_pg_dir + pgd_index(vaddr); in permanent_kmaps_init() 760 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1)); in paging_init() 761 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); in paging_init()
|
/arch/mips/include/asm/ |
D | pgtable-32.h | 153 #define __pgd_offset(address) pgd_index(address) 160 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 163 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
|
D | pgtable-64.h | 240 #define __pgd_offset(address) pgd_index(address) 247 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 251 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
|
/arch/x86/platform/efi/ |
D | efi_64.c | 149 pgd = efi_pgd + pgd_index(EFI_VA_END); in efi_alloc_page_tables() 182 BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); in efi_sync_low_kernel_mappings() 186 pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); in efi_sync_low_kernel_mappings() 189 num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET); in efi_sync_low_kernel_mappings() 199 pgd_efi = efi_pgd + pgd_index(EFI_VA_END); in efi_sync_low_kernel_mappings()
|
/arch/arm/mm/ |
D | pgd.c | 59 new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc() 129 pgd = pgd_base + pgd_index(0); in pgd_free()
|
/arch/x86/power/ |
D | hibernate_64.c | 74 set_pgd(pgd + pgd_index(restore_jump_address), in set_up_temporary_text_mapping() 132 pgd = (pgd_t *)__va(read_cr3()) + pgd_index(relocated_restore_code); in relocate_restore_code()
|
/arch/arc/include/asm/ |
D | pgtable.h | 342 #define pgd_index(addr) ((addr) >> PGDIR_SHIFT) macro 343 #define pgd_offset(mm, addr) (((mm)->pgd)+pgd_index(addr)) 362 pgd_base + pgd_index(addr); \
|
/arch/x86/mm/ |
D | pgtable_32.c | 33 pgd = swapper_pg_dir + pgd_index(vaddr); in set_pte_vaddr()
|
D | init_32.c | 109 int pgd_idx = pgd_index(vaddr); in populate_extra_pmd() 138 pgd_idx = pgd_index(vaddr); in page_table_range_init_count() 220 pgd_idx = pgd_index(vaddr); in page_table_range_init() 292 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); in kernel_physical_mapping_init() 420 pgd = swapper_pg_dir + pgd_index(vaddr); in permanent_kmaps_init() 468 pgd = base + pgd_index(va); in native_pagetable_init()
|
/arch/score/include/asm/ |
D | pgtable.h | 70 #define __pgd_offset(address) pgd_index(address) 76 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 79 #define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
|
/arch/sh/include/asm/ |
D | pgtable_64.h | 51 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 52 #define __pgd_offset(address) pgd_index(address) 53 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
D | pgtable_32.h | 407 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 408 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 409 #define __pgd_offset(address) pgd_index(address)
|
/arch/um/kernel/ |
D | mem.c | 97 i = pgd_index(vaddr); in fixrange_init() 134 pgd = swapper_pg_dir + pgd_index(vaddr); in fixaddr_user_init()
|
/arch/openrisc/include/asm/ |
D | pgtable.h | 375 #define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 377 #define __pgd_offset(address) pgd_index(address) 379 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
|
/arch/arm/include/asm/ |
D | stage2_pgtable.h | 55 #define stage2_pgd_index(addr) pgd_index(addr)
|
/arch/cris/arch-v10/mm/ |
D | fault.c | 84 pmd = (pmd_t *)(pgd + pgd_index(address)); in handle_mmu_bus_fault()
|
/arch/powerpc/include/asm/nohash/32/ |
D | pgtable.h | 306 #define pgd_index(address) ((address) >> PGDIR_SHIFT) macro 307 #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
|
/arch/unicore32/kernel/ |
D | hibernate.c | 81 pgd_idx = pgd_index(PAGE_OFFSET); in resume_physical_mapping_init()
|
/arch/cris/include/asm/ |
D | pgtable.h | 232 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) macro 237 return mm->pgd + pgd_index(address); in pgd_offset()
|