/arch/x86/kernel/ |
D | irq_64.c | 37 void *va; in map_irq_stack() local 46 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack() 47 if (!va) in map_irq_stack() 50 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; in map_irq_stack() 60 void *va = per_cpu_ptr(&irq_stack_backing_store, cpu); in map_irq_stack() local 62 per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE; in map_irq_stack()
|
D | ldt.c | 131 static pmd_t *pgd_to_pmd_walk(pgd_t *pgd, unsigned long va) in pgd_to_pmd_walk() argument 139 p4d = p4d_offset(pgd, va); in pgd_to_pmd_walk() 143 pud = pud_offset(p4d, va); in pgd_to_pmd_walk() 147 return pmd_offset(pud, va); in pgd_to_pmd_walk() 206 unsigned long va; in map_ldt_struct() local 234 va = (unsigned long)ldt_slot_va(slot) + offset; in map_ldt_struct() 242 ptep = get_locked_pte(mm, va, &ptl); in map_ldt_struct() 254 set_pte_at(mm, va, ptep, pte); in map_ldt_struct() 267 unsigned long va; in unmap_ldt_struct() local 284 va = (unsigned long)ldt_slot_va(ldt->slot) + offset; in unmap_ldt_struct() [all …]
|
/arch/riscv/mm/ |
D | init.c | 187 static phys_addr_t __init alloc_pte(uintptr_t va) in alloc_pte() argument 199 uintptr_t va, phys_addr_t pa, in create_pte_mapping() argument 202 uintptr_t pte_index = pte_index(va); in create_pte_mapping() 232 static phys_addr_t __init alloc_pmd(uintptr_t va) in alloc_pmd() argument 239 pmd_num = (va - PAGE_OFFSET) >> PGDIR_SHIFT; in alloc_pmd() 245 uintptr_t va, phys_addr_t pa, in create_pmd_mapping() argument 250 uintptr_t pmd_index = pmd_index(va); in create_pmd_mapping() 259 pte_phys = alloc_pte(va); in create_pmd_mapping() 268 create_pte_mapping(ptep, va, pa, sz, prot); in create_pmd_mapping() 289 uintptr_t va, phys_addr_t pa, in create_pgd_mapping() argument [all …]
|
/arch/openrisc/kernel/ |
D | dma.c | 90 unsigned long va; in arch_dma_alloc() local 100 va = (unsigned long)page; in arch_dma_alloc() 106 if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops, in arch_dma_alloc() 112 return (void *)va; in arch_dma_alloc() 119 unsigned long va = (unsigned long)vaddr; in arch_dma_free() local 122 WARN_ON(walk_page_range(&init_mm, va, va + size, in arch_dma_free()
|
/arch/xtensa/include/asm/ |
D | io.h | 67 unsigned long va = (unsigned long) addr; in iounmap() local 69 if (!(va >= XCHAL_KIO_CACHED_VADDR && in iounmap() 70 va - XCHAL_KIO_CACHED_VADDR < XCHAL_KIO_SIZE) && in iounmap() 71 !(va >= XCHAL_KIO_BYPASS_VADDR && in iounmap() 72 va - XCHAL_KIO_BYPASS_VADDR < XCHAL_KIO_SIZE)) in iounmap()
|
/arch/alpha/kernel/ |
D | traps.c | 428 unsigned long count, va, pc; member 437 do_entUna(void * va, unsigned long opcode, unsigned long reg, in do_entUna() argument 446 unaligned[0].va = (unsigned long) va; in do_entUna() 464 : "r"(va), "0"(0)); in do_entUna() 480 : "r"(va), "0"(0)); in do_entUna() 496 : "r"(va), "0"(0)); in do_entUna() 524 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 548 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 572 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 579 pc, va, opcode, reg); in do_entUna() [all …]
|
/arch/arm/mm/ |
D | mm.h | 26 static inline void set_top_pte(unsigned long va, pte_t pte) in set_top_pte() argument 28 pte_t *ptep = pte_offset_kernel(top_pmd, va); in set_top_pte() 30 local_flush_tlb_kernel_page(va); in set_top_pte() 33 static inline pte_t get_top_pte(unsigned long va) in get_top_pte() argument 35 pte_t *ptep = pte_offset_kernel(top_pmd, va); in get_top_pte()
|
D | cache-xsc3l2.c | 58 static inline void l2_unmap_va(unsigned long va) in l2_unmap_va() argument 61 if (va != -1) in l2_unmap_va() 62 kunmap_atomic((void *)va); in l2_unmap_va() 69 unsigned long va = prev_va & PAGE_MASK; in l2_map_va() local 78 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); in l2_map_va() 80 return va + (pa_offset >> (32 - PAGE_SHIFT)); in l2_map_va()
|
/arch/parisc/include/asm/ |
D | special_insns.h | 5 #define lpa(va) ({ \ argument 11 : "r" (va) \ 17 #define lpa_user(va) ({ \ argument 23 : "r" (va) \
|
/arch/arm/mach-omap2/ |
D | omap_hwmod_reset.c | 54 void __iomem *va; in omap_hwmod_aess_preprogram() local 56 va = omap_hwmod_get_mpu_rt_va(oh); in omap_hwmod_aess_preprogram() 57 if (!va) in omap_hwmod_aess_preprogram() 60 aess_enable_autogating(va); in omap_hwmod_aess_preprogram()
|
/arch/powerpc/mm/book3s64/ |
D | hash_native.c | 146 unsigned long va; in ___tlbie() local 157 va = vpn << VPN_SHIFT; in ___tlbie() 164 va &= ~(0xffffULL << 48); in ___tlbie() 169 va &= ~((1ul << (64 - 52)) - 1); in ___tlbie() 170 va |= ssize << 8; in ___tlbie() 172 va |= sllp << 5; in ___tlbie() 174 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) in ___tlbie() 180 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); in ___tlbie() 181 va |= penc << 12; in ___tlbie() 182 va |= ssize << 8; in ___tlbie() [all …]
|
D | radix_tlb.c | 151 static __always_inline void __tlbiel_va(unsigned long va, unsigned long pid, in __tlbiel_va() argument 156 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbiel_va() 167 static __always_inline void __tlbie_va(unsigned long va, unsigned long pid, in __tlbie_va() argument 172 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbie_va() 183 static __always_inline void __tlbie_lpid_va(unsigned long va, unsigned long lpid, in __tlbie_lpid_va() argument 188 rb = va & ~(PPC_BITMASK(52, 63)); in __tlbie_lpid_va() 200 static inline void fixup_tlbie_va(unsigned long va, unsigned long pid, in fixup_tlbie_va() argument 205 __tlbie_va(va, 0, ap, RIC_FLUSH_TLB); in fixup_tlbie_va() 210 __tlbie_va(va, pid, ap, RIC_FLUSH_TLB); in fixup_tlbie_va() 214 static inline void fixup_tlbie_va_range(unsigned long va, unsigned long pid, in fixup_tlbie_va_range() argument [all …]
|
/arch/arm/mach-omap1/ |
D | irq.c | 60 void __iomem *va; member 72 return readl_relaxed(irq_banks[bank].va + offset); in irq_bank_readl() 76 writel_relaxed(value, irq_banks[bank].va + offset); in irq_bank_writel() 82 writel_relaxed(0x1, irq_banks[1].va + IRQ_CONTROL_REG_OFFSET); in omap_ack_irq() 84 writel_relaxed(0x1, irq_banks[0].va + IRQ_CONTROL_REG_OFFSET); in omap_ack_irq() 146 void __iomem *l1 = irq_banks[0].va; in omap1_handle_irq() 147 void __iomem *l2 = irq_banks[1].va; in omap1_handle_irq() 223 irq_banks[i].va = ioremap(irq_banks[i].base_reg, 0xff); in omap1_init_irq() 224 if (WARN_ON(!irq_banks[i].va)) in omap1_init_irq() 267 omap_alloc_gc(irq_banks[i].va, irq_base + i * 32, 32); in omap1_init_irq()
|
/arch/arm/include/asm/ |
D | kvm_mmu.h | 225 void *va = kmap_atomic_pfn(pfn); in __clean_dcache_guest_page() local 227 kvm_flush_dcache_to_poc(va, PAGE_SIZE); in __clean_dcache_guest_page() 232 kunmap_atomic(va); in __clean_dcache_guest_page() 275 void *va = kmap_atomic_pfn(pfn); in __invalidate_icache_guest_page() local 276 void *end = va + PAGE_SIZE; in __invalidate_icache_guest_page() 277 void *addr = va; in __invalidate_icache_guest_page() 290 kunmap_atomic(va); in __invalidate_icache_guest_page() 303 void *va = kmap_atomic(pte_page(pte)); in __kvm_flush_dcache_pte() local 305 kvm_flush_dcache_to_poc(va, PAGE_SIZE); in __kvm_flush_dcache_pte() 307 kunmap_atomic(va); in __kvm_flush_dcache_pte() [all …]
|
/arch/powerpc/mm/ |
D | ioremap_64.c | 13 unsigned long va = (unsigned long)ea; in __ioremap_at() local 29 ret = ioremap_page_range(va, va + size, pa, prot); in __ioremap_at() 31 unmap_kernel_range(va, size); in __ioremap_at() 33 ret = early_ioremap_range(va, pa, size, prot); in __ioremap_at()
|
D | pgtable_32.c | 48 static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) in early_pte_alloc_kernel() argument 55 return pte_offset_kernel(pmdp, va); in early_pte_alloc_kernel() 59 int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) in map_kernel_page() argument 66 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va); in map_kernel_page() 69 pg = pte_alloc_kernel(pd, va); in map_kernel_page() 71 pg = early_pte_alloc_kernel(pd, va); in map_kernel_page() 78 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); in map_kernel_page()
|
D | ioremap.c | 82 unsigned long va; in do_ioremap() local 89 va = (unsigned long)area->addr; in do_ioremap() 91 ret = ioremap_page_range(va, va + size, pa, prot); in do_ioremap() 95 unmap_kernel_range(va, size); in do_ioremap()
|
/arch/powerpc/mm/nohash/ |
D | 8xx.c | 27 phys_addr_t v_block_mapped(unsigned long va) in v_block_mapped() argument 33 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) in v_block_mapped() 34 return p + va - VIRT_IMMR_BASE; in v_block_mapped() 35 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) in v_block_mapped() 36 return __pa(va); in v_block_mapped()
|
/arch/powerpc/math-emu/ |
D | math_efp.c | 183 union dw_union vc, va, vb; in do_spe_mathemu() local 202 va.wp[0] = current->thread.evr[fa]; in do_spe_mathemu() 203 va.wp[1] = regs->gpr[fa]; in do_spe_mathemu() 211 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); in do_spe_mathemu() 221 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 226 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 235 vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; in do_spe_mathemu() 239 vc.wp[1] = va.wp[1] | SIGN_BIT_S; in do_spe_mathemu() 243 vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; in do_spe_mathemu() 349 FP_UNPACK_DP(DA, va.dp); in do_spe_mathemu() [all …]
|
/arch/parisc/kernel/ |
D | entry.S | 185 va = r8 /* virtual address for which the trap occurred */ define 198 mfctl %pcoq, va 215 mfctl %pcoq, va 229 mfctl %ior,va 247 mfctl %ior,va 261 mfctl %ior, va 279 mfctl %ior, va 291 mfctl %ior,va 307 mfctl %ior,va 321 mfctl %ior,va [all …]
|
/arch/arm/plat-samsung/ |
D | pm-debug.c | 36 va_list va; in s3c_pm_dbg() local 39 va_start(va, fmt); in s3c_pm_dbg() 40 vsnprintf(buff, sizeof(buff), fmt, va); in s3c_pm_dbg() 41 va_end(va); in s3c_pm_dbg()
|
/arch/ia64/mm/ |
D | tlb.c | 422 static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) in is_tr_overlap() argument 426 u64 va_rr = ia64_get_rr(va); in is_tr_overlap() 428 u64 va_end = va + (1<<log_size) - 1; in is_tr_overlap() 435 if (va > tr_end || p->ifa > va_end) in is_tr_overlap() 455 int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) in ia64_itr_entry() argument 476 if (is_tr_overlap(p, va, log_size)) { in ia64_itr_entry() 488 if (is_tr_overlap(p, va, log_size)) { in ia64_itr_entry() 526 ia64_itr(0x1, i, va, pte, log_size); in ia64_itr_entry() 529 p->ifa = va; in ia64_itr_entry() 532 p->rr = ia64_get_rr(va); in ia64_itr_entry() [all …]
|
/arch/sparc/kernel/ |
D | ioport.c | 185 void __iomem *va; /* P3 diag */ in _sparc_alloc_io() local 208 va = _sparc_ioremap(res, busno, phys, size); in _sparc_alloc_io() 210 return va; in _sparc_alloc_io() 322 void *va; in arch_dma_alloc() local 328 va = (void *) __get_free_pages(gfp | __GFP_ZERO, get_order(size)); in arch_dma_alloc() 329 if (!va) { in arch_dma_alloc() 338 srmmu_mapiorange(0, virt_to_phys(va), addr, size); in arch_dma_alloc() 340 *dma_handle = virt_to_phys(va); in arch_dma_alloc() 344 free_pages((unsigned long)va, get_order(size)); in arch_dma_alloc()
|
/arch/microblaze/mm/ |
D | pgtable.c | 135 int map_page(unsigned long va, phys_addr_t pa, int flags) in map_page() argument 141 pd = pmd_offset(pgd_offset_k(va), va); in map_page() 143 pg = pte_alloc_kernel(pd, va); /* from powerpc - pgtable.c */ in map_page() 148 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, in map_page() 151 _tlbie(va); in map_page()
|
/arch/x86/include/asm/ |
D | edac.h | 7 static inline void edac_atomic_scrub(void *va, u32 size) in edac_atomic_scrub() argument 9 u32 i, *virt_addr = va; in edac_atomic_scrub()
|