/arch/x86/include/asm/ |
D | pgtable_64_types.h | 50 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 51 #define PUD_MASK (~(PUD_SIZE - 1))
|
/arch/powerpc/include/asm/ |
D | pgtable-ppc64-4k.h | 35 #define PUD_SIZE (1UL << PUD_SHIFT) macro 36 #define PUD_MASK (~(PUD_SIZE-1))
|
/arch/s390/mm/ |
D | vmem.c | 99 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { in vmem_add_mem() 103 address += PUD_SIZE; in vmem_add_mem() 165 address += PUD_SIZE; in vmem_remove_range() 170 address += PUD_SIZE; in vmem_remove_range()
|
D | dump_pagetables.c | 166 addr += PUD_SIZE; in walk_pud_level()
|
/arch/tile/mm/ |
D | hugetlbpage.c | 263 } else if (ps >= PUD_SIZE) { in __setup_hugepagesz() 267 if (hv_jpage_size != PUD_SIZE) { in __setup_hugepagesz() 269 PUD_SIZE >> 20, hv_jpage_size); in __setup_hugepagesz()
|
/arch/arm64/include/asm/ |
D | pgtable-hwdef.h | 69 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 70 #define PUD_MASK (~(PUD_SIZE-1))
|
D | kvm_mmu.h | 265 kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); in __kvm_flush_dcache_pud()
|
D | pgtable.h | 43 #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) 46 #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
|
/arch/x86/mm/ |
D | init.c | 230 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 231 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 302 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 315 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 316 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range()
|
D | pageattr.c | 852 if (start & (PUD_SIZE - 1)) { in __unmap_pud_range() 853 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in __unmap_pud_range() 865 while (end - start >= PUD_SIZE) { in __unmap_pud_range() 870 unmap_pmd_range(cpa, pud, start, start + PUD_SIZE); in __unmap_pud_range() 872 start += PUD_SIZE; in __unmap_pud_range() 1046 if (start & (PUD_SIZE - 1)) { in populate_pud() 1048 unsigned long next_page = (start + PUD_SIZE) & PUD_MASK; in populate_pud() 1081 while (end - start >= PUD_SIZE) { in populate_pud() 1085 start += PUD_SIZE; in populate_pud() 1086 cpa->pfn += PUD_SIZE; in populate_pud() [all …]
|
D | init_64.c | 79 next = (addr & PUD_MASK) + PUD_SIZE; in ident_pud_init() 544 next = (addr & PUD_MASK) + PUD_SIZE; in phys_pud_init() 945 if (IS_ALIGNED(addr, PUD_SIZE) && in remove_pud_table() 946 IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 949 get_order(PUD_SIZE)); in remove_pud_table() 961 PUD_SIZE)) { in remove_pud_table() 963 get_order(PUD_SIZE)); in remove_pud_table()
|
D | hugetlbpage.c | 165 } else if (ps == PUD_SIZE && cpu_has_gbpages) { in setup_hugepagesz()
|
D | pgtable.c | 599 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform); in pud_set_huge()
|
/arch/arm64/mm/ |
D | hugetlbpage.c | 114 if (sz == PUD_SIZE) { in huge_pte_alloc() 193 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 307 } else if (ps == PUD_SIZE) { in setup_hugepagesz()
|
D | dump.c | 290 addr = start + i * PUD_SIZE; in walk_pud()
|
/arch/tile/include/asm/ |
D | hugetlb.h | 106 if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) in arch_make_huge_pte()
|
/arch/x86/kernel/ |
D | head64.c | 152 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE); in x86_64_start_kernel()
|
/arch/frv/include/asm/ |
D | pgtable.h | 129 #define PUD_SIZE (1UL << PUD_SHIFT) macro 130 #define PUD_MASK (~(PUD_SIZE - 1))
|
/arch/ia64/include/asm/ |
D | pgtable.h | 110 #define PUD_SIZE (1UL << PUD_SHIFT) macro 111 #define PUD_MASK (~(PUD_SIZE-1))
|
/arch/arm/mm/ |
D | dump.c | 294 addr = start + i * PUD_SIZE; in walk_pud()
|
/arch/sparc/include/asm/ |
D | pgtable_64.h | 57 #define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) macro 58 #define PUD_MASK (~(PUD_SIZE-1))
|
/arch/x86/xen/ |
D | mmu.c | 1173 va += PUD_SIZE; in xen_cleanmfnmap() 1176 xen_free_ro_pages(pa, PUD_SIZE); in xen_cleanmfnmap() 1177 va += PUD_SIZE; in xen_cleanmfnmap() 1262 xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); in xen_pagetable_cleanhighmap() 2098 n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; in xen_relocate_p2m()
|
/arch/sparc/mm/ |
D | init_64.c | 1575 return vstart + PUD_SIZE; in kernel_map_hugepud() 1585 pte_val += PUD_SIZE; in kernel_map_hugepud() 1586 vstart += PUD_SIZE; in kernel_map_hugepud() 1595 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) in kernel_can_map_hugepud()
|
/arch/ia64/mm/ |
D | init.c | 400 end_address += PUD_SIZE; in vmemmap_find_next_valid_pfn()
|
/arch/um/kernel/ |
D | tlb.c | 339 last = ADD_ROUND(addr, PUD_SIZE); in flush_tlb_kernel_range_common()
|