/arch/powerpc/mm/nohash/ |
D | 8xx.c | 29 phys_addr_t v_block_mapped(unsigned long va) in v_block_mapped() argument 33 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) in v_block_mapped() 34 return p + va - VIRT_IMMR_BASE; in v_block_mapped() 37 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) in v_block_mapped() 38 return __pa(va); in v_block_mapped() 59 static pte_t __init *early_hugepd_alloc_kernel(hugepd_t *pmdp, unsigned long va) in early_hugepd_alloc_kernel() argument 70 return hugepte_offset(*(hugepd_t *)pmdp, va, PGDIR_SHIFT); in early_hugepd_alloc_kernel() 73 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, in __early_map_kernel_hugepage() argument 76 pmd_t *pmdp = pmd_off_k(va); in __early_map_kernel_hugepage() 87 ptep = early_pte_alloc_kernel(pmdp, va); in __early_map_kernel_hugepage() [all …]
|
/arch/x86/kernel/ |
D | irq_64.c | 38 void *va; in map_irq_stack() local 47 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack() 48 if (!va) in map_irq_stack() 51 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; in map_irq_stack() 61 void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack() local 63 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; in map_irq_stack()
|
D | ldt.c | 215 static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va) in pgd_to_pmd_walk() argument 223 p4d = p4d_offset(pgd, va); in pgd_to_pmd_walk() 227 pud = pud_offset(p4d, va); in pgd_to_pmd_walk() 231 return pmd_offset(pud, va); in pgd_to_pmd_walk() 290 unsigned long va; in map_ldt_struct() local 318 va = (unsigned long)ldt_slot_va(slot) + offset; in map_ldt_struct() 326 ptep = get_locked_pte(mm, va, &ptl); in map_ldt_struct() 338 set_pte_at(mm, va, ptep, pte); in map_ldt_struct() 351 unsigned long va; in unmap_ldt_struct() local 368 va = (unsigned long)ldt_slot_va(ldt->slot) + offset; in unmap_ldt_struct() [all …]
|
/arch/xtensa/include/asm/ |
D | io.h | 57 unsigned long va = (unsigned long) addr; in iounmap() local 59 if (!(va >= XCHAL_KIO_CACHED_VADDR && in iounmap() 60 va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && in iounmap() 61 !(va >= XCHAL_KIO_BYPASS_VADDR && in iounmap() 62 va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) in iounmap()
|
/arch/arm/mm/ |
D | mm.h | 25 static inline void set_top_pte(unsigned long va, pte_t pte) in set_top_pte() argument 27 pte_t *ptep = pte_offset_kernel(top_pmd, va); in set_top_pte() 29 local_flush_tlb_kernel_page(va); in set_top_pte() 32 static inline pte_t get_top_pte(unsigned long va) in get_top_pte() argument 34 pte_t *ptep = pte_offset_kernel(top_pmd, va); in get_top_pte()
|
D | cache-xsc3l2.c | 58 static inline void l2_unmap_va(unsigned long va) in l2_unmap_va() argument 61 if (va != -1) in l2_unmap_va() 62 kunmap_atomic((void *)va); in l2_unmap_va() 69 unsigned long va = prev_va & PAGE_MASK; in l2_map_va() local 78 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); in l2_map_va() 80 return va + (pa_offset >> (32 - PAGE_SHIFT)); in l2_map_va()
|
/arch/riscv/mm/ |
D | init.c | 38 phys_addr_t (*alloc_pte)(uintptr_t va); 41 phys_addr_t (*alloc_pmd)(uintptr_t va); 262 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) in alloc_pte_early() argument 271 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) in alloc_pte_fixmap() argument 276 static phys_addr_t alloc_pte_late(uintptr_t va) in alloc_pte_late() argument 287 uintptr_t va, phys_addr_t pa, in create_pte_mapping() argument 290 uintptr_t pte_idx = pte_index(va); in create_pte_mapping() 322 static phys_addr_t __init alloc_pmd_early(uintptr_t va) in alloc_pmd_early() argument 324 BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT); in alloc_pmd_early() 329 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) in alloc_pmd_fixmap() argument [all …]
|
/arch/alpha/kernel/ |
D | traps.c | 428 unsigned long count, va, pc; member 437 do_entUna(void * va, unsigned long opcode, unsigned long reg, in do_entUna() argument 446 unaligned[0].va = (unsigned long) va; in do_entUna() 464 : "r"(va), "0"(0)); in do_entUna() 480 : "r"(va), "0"(0)); in do_entUna() 496 : "r"(va), "0"(0)); in do_entUna() 524 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 548 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 572 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 579 pc, va, opcode, reg); in do_entUna() [all …]
|
/arch/openrisc/kernel/ |
D | dma.c | 70 unsigned long va = (unsigned long)cpu_addr; in arch_dma_set_uncached() local 78 error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, in arch_dma_set_uncached() 89 unsigned long va = (unsigned long)cpu_addr; in arch_dma_clear_uncached() local 93 WARN_ON(walk_page_range(&init_mm, va, va + size, in arch_dma_clear_uncached()
|
/arch/x86/platform/efi/ |
D | efi_32.c | 40 void *va; in efi_map_region() local 48 va = __va(md->phys_addr); in efi_map_region() 51 set_memory_uc((unsigned long)va, md->num_pages); in efi_map_region() 53 va = ioremap_cache(md->phys_addr, size); in efi_map_region() 56 md->virt_addr = (unsigned long)va; in efi_map_region() 57 if (!va) in efi_map_region()
|
/arch/powerpc/mm/book3s64/ |
D | hash_native.c | 153 unsigned long va; in ___tlbie() local 164 va = vpn << VPN_SHIFT; in ___tlbie() 171 va &= ~(0xffffULL << 48); in ___tlbie() 176 va &= ~((1ul << (64 - 52)) - 1); in ___tlbie() 177 va |= ssize << 8; in ___tlbie() 179 va |= sllp << 5; in ___tlbie() 181 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) in ___tlbie() 187 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); in ___tlbie() 188 va |= penc << 12; in ___tlbie() 189 va |= ssize << 8; in ___tlbie() [all …]
|
D | radix_tlb.c | 152 static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, in __tlbiel_va() argument 157 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbiel_va() 168 static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, in __tlbie_va() argument 173 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbie_va() 184 static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, in __tlbie_lpid_va() argument 189 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbie_lpid_va() 201 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, in fixup_tlbie_va() argument 206 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB); in fixup_tlbie_va() 211 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va() 215 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, in fixup_tlbie_va_range() argument [all …]
|
/arch/arm/mach-omap1/ |
D | irq.c | 60 void __iomem *va; member 72 return readl_relaxed(irq_banks[bank].va + offset); in irq_bank_readl() 76 writel_relaxed(value, irq_banks[bank].va + offset); in irq_bank_writel() 82 writel_relaxed(0x1, irq_banks[1].va + IRQ_CONTROL_REG_OFFSET); in omap_ack_irq() 84 writel_relaxed(0x1, irq_banks[0].va + IRQ_CONTROL_REG_OFFSET); in omap_ack_irq() 146 void __iomem *l1 = irq_banks[0].va; in omap1_handle_irq() 147 void __iomem *l2 = irq_banks[1].va; in omap1_handle_irq() 223 irq_banks[i].va = ioremap(irq_banks[i].base_reg, 0xff); in omap1_init_irq() 224 if (WARN_ON(!irq_banks[i].va)) in omap1_init_irq() 267 omap_alloc_gc(irq_banks[i].va, irq_base + i * 32, 32); in omap1_init_irq()
|
/arch/parisc/include/asm/ |
D | special_insns.h | 5 #define lpa(va) ({ \ argument 13 : "r" (va) \ 19 #define lpa_user(va) ({ \ argument 27 : "r" (va) \
|
/arch/powerpc/math-emu/ |
D | math_efp.c | 183 union dw_union vc, va, vb; in do_spe_mathemu() local 202 va.wp[0] = current->thread.evr[fa]; in do_spe_mathemu() 203 va.wp[1] = regs->gpr[fa]; in do_spe_mathemu() 211 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); in do_spe_mathemu() 221 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 226 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 235 vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; in do_spe_mathemu() 239 vc.wp[1] = va.wp[1] | SIGN_BIT_S; in do_spe_mathemu() 243 vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; in do_spe_mathemu() 349 FP_UNPACK_DP(DA, va.dp); in do_spe_mathemu() [all …]
|
/arch/microblaze/mm/ |
D | pgtable.c | 135 int map_page(unsigned long va, phys_addr_t pa, int flags) in map_page() argument 144 p4d = p4d_offset(pgd_offset_k(va), va); in map_page() 145 pud = pud_offset(p4d, va); in map_page() 146 pd = pmd_offset(pud, va); in map_page() 148 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ in map_page() 153 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, in map_page() 156 _tlbie(va); in map_page()
|
/arch/powerpc/mm/ |
D | pgtable_32.c | 63 pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) in early_pte_alloc_kernel() argument 70 return pte_offset_kernel(pmdp, va); in early_pte_alloc_kernel() 74 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) in map_kernel_page() argument 81 pd = pmd_off_k(va); in map_kernel_page() 84 pg = pte_alloc_kernel(pd, va); in map_kernel_page() 86 pg = early_pte_alloc_kernel(pd, va); in map_kernel_page() 93 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); in map_kernel_page()
|
D | ioremap.c | 83 unsigned long va; in do_ioremap() local 90 va = (unsigned long)area->addr; in do_ioremap() 92 ret = ioremap_page_range(va, va + size, pa, prot); in do_ioremap() 96 unmap_kernel_range(va, size); in do_ioremap()
|
D | pgtable.c | 197 void unmap_kernel_page(unsigned long va) in unmap_kernel_page() argument 199 pmd_t *pmdp = pmd_off_k(va); in unmap_kernel_page() 200 pte_t *ptep = pte_offset_kernel(pmdp, va); in unmap_kernel_page() 202 pte_clear(&init_mm, va, ptep); in unmap_kernel_page() 203 flush_tlb_kernel_range(va, va + PAGE_SIZE); in unmap_kernel_page() 325 unsigned long vmalloc_to_phys(void *va) in vmalloc_to_phys() argument 327 unsigned long pfn = vmalloc_to_pfn(va); in vmalloc_to_phys() 330 return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va); in vmalloc_to_phys()
|
/arch/parisc/kernel/ |
D | entry.S | 184 va = r8 /* virtual address for which the trap occurred */ define 197 mfctl %pcoq, va 214 mfctl %pcoq, va 228 mfctl %ior,va 246 mfctl %ior,va 260 mfctl %ior, va 278 mfctl %ior, va 290 mfctl %ior,va 306 mfctl %ior,va 320 mfctl %ior,va [all …]
|
/arch/ia64/mm/ |
D | tlb.c | 421 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) in is_tr_overlap() argument 425 u64 va_rr = ia64_get_rr(va); in is_tr_overlap() 427 u64 va_end = va + (1<<log_size) - 1; in is_tr_overlap() 434 if (va > tr_end || p->ifa > va_end) in is_tr_overlap() 454 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) in ia64_itr_entry() argument 475 if (is_tr_overlap(p, va, log_size)) { in ia64_itr_entry() 487 if (is_tr_overlap(p, va, log_size)) { in ia64_itr_entry() 525 ia64_itr(0x1, i, va, pte, log_size); in ia64_itr_entry() 528 p->ifa = va; in ia64_itr_entry() 531 p->rr = ia64_get_rr(va); in ia64_itr_entry() [all …]
|
/arch/sparc/kernel/ |
D | ioport.c | 185 void __iomem *va; /* P3 diag */ in _sparc_alloc_io() local 208 va = _sparc_ioremap(res, busno, phys, size); in _sparc_alloc_io() 210 return va; in _sparc_alloc_io() 322 void *va; in arch_dma_alloc() local 328 va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); in arch_dma_alloc() 329 if (!va) { in arch_dma_alloc() 338 srmmu_mapiorange(0, virt_to_phys(va), addr, size); in arch_dma_alloc() 340 *dma_handle = virt_to_phys(va); in arch_dma_alloc() 344 free_pages((unsigned long)va, get_order(size)); in arch_dma_alloc()
|
/arch/x86/include/asm/ |
D | edac.h | 7 static inline void edac_atomic_scrub(void *va, u32 size) in edac_atomic_scrub() argument 9 u32 i, *virt_addr = va; in edac_atomic_scrub()
|
/arch/powerpc/include/asm/ |
D | edac.h | 19 static __inline__ void edac_atomic_scrub(void *va, u32 size) in edac_atomic_scrub() argument 21 unsigned int *virt_addr = va; in edac_atomic_scrub()
|
/arch/arm/mach-davinci/include/mach/ |
D | hardware.h | 29 #define io_v2p(va) ((va) - IO_OFFSET) argument
|