/arch/arm64/include/asm/ |
D | kernel-pgtable.h | 68 #define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \ argument 71 #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) argument 74 #define EARLY_PUDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PUD_SHIFT)) argument 76 #define EARLY_PUDS(vstart, vend) (0) argument 80 #define EARLY_PMDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, SWAPPER_TABLE_SHIFT)) argument 82 #define EARLY_PMDS(vstart, vend) (0) argument 85 #define EARLY_PAGES(vstart, vend) ( 1 /* PGDIR page */ \ argument 86 + EARLY_PGDS((vstart), (vend)) /* each PGDIR needs a next level page table */ \ 87 + EARLY_PUDS((vstart), (vend)) /* each PUD needs a next level page table */ \ 88 + EARLY_PMDS((vstart), (vend))) /* each PMD needs a next level page table */
|
/arch/powerpc/kernel/ |
D | io-workarounds.c | 31 unsigned long vstart, vend; in iowa_pci_find() local 39 vend = vstart + phb->pci_io_size - 1; in iowa_pci_find() 40 if ((vaddr >= vstart) && (vaddr <= vend)) in iowa_pci_find()
|
/arch/alpha/boot/ |
D | bootpz.c | 77 check_range(unsigned long vstart, unsigned long vend, in check_range() argument 84 vstart, vend, kstart, kend); in check_range() 87 for (vaddr = vstart; vaddr <= vend; vaddr += PAGE_SIZE) in check_range()
|
/arch/sparc/mm/ |
D | init_64.c | 1700 unsigned long vend, in kernel_map_hugepud() argument 1708 (vend - vstart <= mask16gb)) { in kernel_map_hugepud() 1718 vend = vstart + mask16gb + 1UL; in kernel_map_hugepud() 1719 while (vstart < vend) { in kernel_map_hugepud() 1729 static bool kernel_can_map_hugepud(unsigned long vstart, unsigned long vend, in kernel_can_map_hugepud() argument 1732 if (guard && !(vstart & ~PUD_MASK) && (vend - vstart) >= PUD_SIZE) in kernel_can_map_hugepud() 1739 unsigned long vend, in kernel_map_hugepmd() argument 1748 (vend - vstart <= mask256mb)) { in kernel_map_hugepmd() 1756 (vend - vstart <= mask2gb)) { in kernel_map_hugepmd() 1759 vend = vstart + mask256mb + 1UL; in kernel_map_hugepmd() [all …]
|
D | srmmu.c | 870 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); in map_spbank() local 878 if (vend > max_vaddr || vend < min_vaddr) in map_spbank() 879 vend = max_vaddr; in map_spbank() 881 while (vstart < vend) { in map_spbank()
|
/arch/x86/lib/ |
D | usercopy_64.c | 93 void *vend = addr + size; in clean_cache_range() local 97 p < vend; p += x86_clflush_size) in clean_cache_range()
|
/arch/arm64/kernel/ |
D | head.S | 210 .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count 211 lsr \iend, \vend, \shift 245 .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv 249 compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count 255 compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count 262 compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count 267 compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count
|
/arch/powerpc/mm/book3s64/ |
D | hash_utils.c | 244 int htab_bolt_mapping(unsigned long vstart, unsigned long vend, in htab_bolt_mapping() argument 258 vstart, vend, pstart, prot, psize, ssize); in htab_bolt_mapping() 260 for (vaddr = vstart, paddr = pstart; vaddr < vend; in htab_bolt_mapping() 318 int htab_remove_mapping(unsigned long vstart, unsigned long vend, in htab_remove_mapping() argument 332 for (vaddr = vstart; vaddr < vend; vaddr += step) { in htab_remove_mapping()
|
/arch/powerpc/include/asm/book3s/64/ |
D | mmu-hash.h | 490 extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend, 493 int htab_remove_mapping(unsigned long vstart, unsigned long vend,
|
/arch/mips/include/asm/ |
D | sgiarcs.h | 94 char vend[8], prod[8]; member
|
/arch/x86/mm/ |
D | pageattr.c | 282 void *vend = vaddr + size; in clflush_cache_range_opt() local 284 if (p >= vend) in clflush_cache_range_opt() 287 for (; p < vend; p += clflush_size) in clflush_cache_range_opt()
|