/arch/sh/include/asm/ |
D | pgtable-3level.h | 24 #define PMD_SIZE (1UL << PMD_SHIFT) macro 25 #define PMD_MASK (~(PMD_SIZE-1)) 27 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
/arch/x86/mm/ |
D | init.c | 280 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 281 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 347 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 349 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 351 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 361 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 363 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 366 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range() 367 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 388 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() [all …]
|
D | init_64.c | 330 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping() 382 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap() 393 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap() 473 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; in phys_pmd_init() 965 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table() 966 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table() 969 get_order(PMD_SIZE)); in remove_pmd_table() 981 PMD_SIZE)) { in remove_pmd_table() 983 get_order(PMD_SIZE)); in remove_pmd_table() 1247 all_end = roundup((unsigned long)_brk_end, PMD_SIZE); in mark_rodata_ro() [all …]
|
D | kasan_init_64.c | 43 ((end - addr) == PMD_SIZE) && in kasan_populate_pmd() 44 IS_ALIGNED(addr, PMD_SIZE)) { in kasan_populate_pmd() 45 p = early_alloc(PMD_SIZE, nid, false); in kasan_populate_pmd() 49 memblock_free(__pa(p), PMD_SIZE); in kasan_populate_pmd()
|
/arch/x86/include/asm/ |
D | pgtable_32_types.h | 12 # define PMD_SIZE (1UL << PMD_SHIFT) macro 13 # define PMD_MASK (~(PMD_SIZE - 1))
|
D | pgtable_64_types.h | 71 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 72 #define PMD_MASK (~(PMD_SIZE - 1))
|
/arch/powerpc/include/asm/nohash/64/ |
D | pgtable-64k.h | 38 #define PMD_SIZE (1UL << PMD_SHIFT) macro 39 #define PMD_MASK (~(PMD_SIZE-1))
|
D | pgtable-4k.h | 31 #define PMD_SIZE (1UL << PMD_SHIFT) macro 32 #define PMD_MASK (~(PMD_SIZE-1))
|
/arch/arm64/mm/ |
D | hugetlbpage.c | 65 *pgsize = PMD_SIZE; in find_num_contig() 81 case PMD_SIZE: in num_contig_ptes() 85 *pgsize = PMD_SIZE; in num_contig_ptes() 233 } else if (sz == PMD_SIZE) { in huge_pte_alloc() 239 } else if (sz == (PMD_SIZE * CONT_PMDS)) { in huge_pte_alloc() 276 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset() 299 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 417 case PMD_SIZE * CONT_PMDS: in setup_hugepagesz() 418 case PMD_SIZE: in setup_hugepagesz()
|
/arch/tile/mm/ |
D | hugetlbpage.c | 73 if (sz >= PMD_SIZE) { in huge_pte_alloc() 74 BUG_ON(sz != PMD_SIZE && in huge_pte_alloc() 75 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD])); in huge_pte_alloc() 85 BUG_ON(sz != PMD_SIZE); in huge_pte_alloc() 276 } else if (ps >= PMD_SIZE) { in __setup_hugepagesz() 336 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE || in add_default_hugepagesz()
|
/arch/s390/mm/ |
D | vmem.c | 124 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) && in vmem_add_mem() 127 address += PMD_SIZE; in vmem_add_mem() 191 address += PMD_SIZE; in vmem_remove_range() 196 address += PMD_SIZE; in vmem_remove_range() 267 new_page = vmemmap_alloc_block(PMD_SIZE, node); in vmemmap_populate() 271 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate() 279 address = (address + PMD_SIZE) & PMD_MASK; in vmemmap_populate()
|
/arch/x86/boot/compressed/ |
D | pagetable.c | 134 start = round_down(start, PMD_SIZE); in add_identity_map() 135 end = round_up(end, PMD_SIZE); in add_identity_map()
|
/arch/nios2/mm/ |
D | ioremap.c | 33 if (end > PMD_SIZE) in remap_area_pte() 34 end = PMD_SIZE; in remap_area_pte() 70 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
|
/arch/parisc/kernel/ |
D | pci-dma.c | 92 if (end > PMD_SIZE) in map_pte_uncached() 93 end = PMD_SIZE; in map_pte_uncached() 127 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 128 orig_vaddr += PMD_SIZE; in map_pmd_uncached() 172 if (end > PMD_SIZE) in unmap_uncached_pte() 173 end = PMD_SIZE; in unmap_uncached_pte() 212 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 213 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
|
/arch/m68k/include/asm/ |
D | pgtable_mm.h | 38 #define PMD_SIZE (1UL << PMD_SHIFT) macro 39 #define PMD_MASK (~(PMD_SIZE-1))
|
/arch/mips/mm/ |
D | ioremap.c | 31 if (end > PMD_SIZE) in remap_area_pte() 32 end = PMD_SIZE; in remap_area_pte() 63 address = (address + PMD_SIZE) & PMD_MASK; in remap_area_pmd()
|
/arch/um/include/asm/ |
D | pgtable-3level.h | 28 #define PMD_SIZE (1UL << PMD_SHIFT) macro 29 #define PMD_MASK (~(PMD_SIZE-1))
|
/arch/arm/include/asm/ |
D | pgtable-2level.h | 88 #define PMD_SIZE (1UL << PMD_SHIFT) macro 89 #define PMD_MASK (~(PMD_SIZE-1))
|
D | highmem.h | 7 #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
|
/arch/tile/include/asm/ |
D | pgtable_64.h | 32 #define PMD_SIZE HPAGE_SIZE macro 33 #define PMD_MASK (~(PMD_SIZE-1))
|
/arch/powerpc/mm/ |
D | pgtable-radix.c | 84 if (map_page_size == PMD_SIZE) { in radix__map_kernel_page() 109 if (map_page_size == PMD_SIZE) { in radix__map_kernel_page() 238 else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping() 240 mapping_size = PMD_SIZE; in create_physical_mapping() 247 max_mapping_size = PMD_SIZE; in create_physical_mapping() 251 if (split_text_mapping && (mapping_size == PMD_SIZE) && in create_physical_mapping() 786 split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd); in remove_pmd_table()
|
/arch/metag/include/asm/ |
D | highmem.h | 27 #define PKMAP_BASE (FIXADDR_START - PMD_SIZE)
|
/arch/arm64/include/asm/ |
D | pgtable-hwdef.h | 59 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 60 #define PMD_MASK (~(PMD_SIZE-1)) 108 #define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE)
|
/arch/sparc/mm/ |
D | hugetlbpage.c | 280 if (sz >= PMD_SIZE) in huge_pte_alloc() 320 else if (size >= PMD_SIZE) in set_huge_pte_at() 357 else if (size >= PMD_SIZE) in huge_ptep_get_and_clear() 486 addr += PMD_SIZE; in hugetlb_free_pgd_range() 496 end -= PMD_SIZE; in hugetlb_free_pgd_range()
|
/arch/arm/mm/ |
D | mmu.c | 1092 next = (addr + PMD_SIZE - 1) & PMD_MASK; in fill_pmd_gaps() 1184 if (!IS_ALIGNED(reg->base, PMD_SIZE)) { in adjust_lowmem_bounds() 1187 len = round_up(reg->base, PMD_SIZE) - reg->base; in adjust_lowmem_bounds() 1227 if (!IS_ALIGNED(block_start, PMD_SIZE)) in adjust_lowmem_bounds() 1229 else if (!IS_ALIGNED(block_end, PMD_SIZE)) in adjust_lowmem_bounds() 1248 memblock_limit = round_down(memblock_limit, PMD_SIZE); in adjust_lowmem_bounds() 1273 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) in prepare_page_table() 1278 addr = ((unsigned long)_exiprom + PMD_SIZE - 1) & PMD_MASK; in prepare_page_table() 1280 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) in prepare_page_table() 1295 addr < VMALLOC_START; addr += PMD_SIZE) in prepare_page_table() [all …]
|