/arch/frv/mm/ |
D | dma-alloc.c | 50 static int map_page(unsigned long va, unsigned long pa, pgprot_t prot) in map_page() argument 59 pge = pgd_offset_k(va); in map_page() 60 pue = pud_offset(pge, va); in map_page() 61 pme = pmd_offset(pue, va); in map_page() 64 pte = pte_alloc_kernel(pme, va); in map_page() 84 unsigned long page, va, pa; in consistent_alloc() local 107 va = VMALLOC_VMADDR(area->addr); in consistent_alloc() 108 ret = (void *) va; in consistent_alloc() 123 err = map_page(va + i, pa + i, PAGE_KERNEL_NOCACHE); in consistent_alloc() 126 vfree((void *) va); in consistent_alloc() [all …]
|
/arch/ia64/kvm/ |
D | vtlb.c | 36 static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va) in __is_tr_translated() argument 39 && ((va-trp->vadr) < PSIZE(trp->ps))); in __is_tr_translated() 62 void machine_tlb_purge(u64 va, u64 ps) in machine_tlb_purge() argument 64 ia64_ptcl(va, ps << 2); in machine_tlb_purge() 116 struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag) in vsa_thash() argument 121 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr); in vsa_thash() 130 struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type) in __vtr_lookup() argument 137 rid = vcpu_get_rr(vcpu, va); in __vtr_lookup() 140 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) { in __vtr_lookup() 143 if (__is_tr_translated(trp, rid, va)) in __vtr_lookup() [all …]
|
/arch/arm/mach-omap2/ |
D | omap_hwmod_reset.c | 44 void __iomem *va; in omap_hwmod_aess_preprogram() local 46 va = omap_hwmod_get_mpu_rt_va(oh); in omap_hwmod_aess_preprogram() 47 if (!va) in omap_hwmod_aess_preprogram() 50 aess_enable_autogating(va); in omap_hwmod_aess_preprogram()
|
/arch/arm/include/asm/ |
D | kvm_mmu.h | 194 void *va = kmap_atomic_pfn(pfn); in __coherent_cache_guest_page() local 197 kvm_flush_dcache_to_poc(va, PAGE_SIZE); in __coherent_cache_guest_page() 200 __cpuc_coherent_user_range((unsigned long)va, in __coherent_cache_guest_page() 201 (unsigned long)va + PAGE_SIZE); in __coherent_cache_guest_page() 206 kunmap_atomic(va); in __coherent_cache_guest_page() 218 void *va = kmap_atomic(pte_page(pte)); in __kvm_flush_dcache_pte() local 220 kvm_flush_dcache_to_poc(va, PAGE_SIZE); in __kvm_flush_dcache_pte() 222 kunmap_atomic(va); in __kvm_flush_dcache_pte() 231 void *va = kmap_atomic_pfn(pfn); in __kvm_flush_dcache_pmd() local 233 kvm_flush_dcache_to_poc(va, PAGE_SIZE); in __kvm_flush_dcache_pmd() [all …]
|
D | edac.h | 25 static inline void atomic_scrub(void *va, u32 size) in atomic_scrub() argument 28 unsigned int *virt_addr = va; in atomic_scrub()
|
/arch/alpha/kernel/ |
D | traps.c | 452 unsigned long count, va, pc; member 461 do_entUna(void * va, unsigned long opcode, unsigned long reg, in do_entUna() argument 470 unaligned[0].va = (unsigned long) va; in do_entUna() 492 : "r"(va), "0"(0)); in do_entUna() 512 : "r"(va), "0"(0)); in do_entUna() 532 : "r"(va), "0"(0)); in do_entUna() 566 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 596 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 626 : "r"(va), "r"(una_reg(reg)), "0"(0)); in do_entUna() 633 pc, va, opcode, reg); in do_entUna() [all …]
|
/arch/powerpc/mm/ |
D | hash_native_64.c | 50 unsigned long va; in __tlbie() local 61 va = vpn << VPN_SHIFT; in __tlbie() 67 va &= ~(0xffffULL << 48); in __tlbie() 72 va &= ~((1ul << (64 - 52)) - 1); in __tlbie() 73 va |= ssize << 8; in __tlbie() 76 va |= sllp << 5; in __tlbie() 78 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) in __tlbie() 84 va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1); in __tlbie() 85 va |= penc << 12; in __tlbie() 86 va |= ssize << 8; in __tlbie() [all …]
|
/arch/arm/mm/ |
D | mm.h | 23 static inline void set_top_pte(unsigned long va, pte_t pte) in set_top_pte() argument 25 pte_t *ptep = pte_offset_kernel(top_pmd, va); in set_top_pte() 27 local_flush_tlb_kernel_page(va); in set_top_pte() 30 static inline pte_t get_top_pte(unsigned long va) in get_top_pte() argument 32 pte_t *ptep = pte_offset_kernel(top_pmd, va); in get_top_pte()
|
D | cache-xsc3l2.c | 70 static inline void l2_unmap_va(unsigned long va) in l2_unmap_va() argument 73 if (va != -1) in l2_unmap_va() 74 kunmap_atomic((void *)va); in l2_unmap_va() 81 unsigned long va = prev_va & PAGE_MASK; in l2_map_va() local 90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); in l2_map_va() 92 return va + (pa_offset >> (32 - PAGE_SHIFT)); in l2_map_va()
|
/arch/mn10300/mm/ |
D | tlb-smp.c | 53 unsigned long va); 95 unsigned long va) in flush_tlb_others() argument 120 flush_va = va; in flush_tlb_others() 182 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in flush_tlb_page() argument 191 local_flush_tlb_page(mm, va); in flush_tlb_page() 193 flush_tlb_others(cpu_mask, mm, va); in flush_tlb_page()
|
/arch/parisc/kernel/ |
D | entry.S | 191 va = r8 /* virtual address for which the trap occurred */ define 204 mfctl %pcoq, va 221 mfctl %pcoq, va 235 mfctl %ior,va 253 mfctl %ior,va 267 mfctl %ior, va 285 mfctl %ior, va 297 mfctl %ior,va 313 mfctl %ior,va 327 mfctl %ior,va [all …]
|
/arch/openrisc/kernel/ |
D | dma.c | 88 unsigned long va; in or1k_dma_alloc() local 102 va = (unsigned long)page; in or1k_dma_alloc() 109 if (walk_page_range(va, va + size, &walk)) { in or1k_dma_alloc() 115 return (void *)va; in or1k_dma_alloc() 122 unsigned long va = (unsigned long)vaddr; in or1k_dma_free() local 130 WARN_ON(walk_page_range(va, va + size, &walk)); in or1k_dma_free()
|
/arch/tile/include/asm/ |
D | pgtable_64.h | 24 #define PGD_INDEX(va) HV_L0_INDEX(va) argument 35 #define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) argument 46 #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) argument
|
D | pgtable_32.h | 27 #define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT) argument 38 #define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT) argument
|
/arch/tile/mm/ |
D | highmem.c | 67 unsigned long va; member 97 unsigned long va, pte_t *ptep, pte_t pteval) in kmap_atomic_register() argument 109 amp->va = va; in kmap_atomic_register() 129 static void kmap_atomic_unregister(struct page *page, unsigned long va) in kmap_atomic_unregister() argument 136 if (amp->page == page && amp->cpu == cpu && amp->va == va) in kmap_atomic_unregister() 148 pte_t *ptep = kmap_get_pte(amp->va); in kmap_atomic_fix_one_kpte() 151 flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, in kmap_atomic_fix_one_kpte()
|
/arch/powerpc/math-emu/ |
D | math_efp.c | 186 union dw_union vc, va, vb; in do_spe_mathemu() local 205 va.wp[0] = current->thread.evr[fa]; in do_spe_mathemu() 206 va.wp[1] = regs->gpr[fa]; in do_spe_mathemu() 214 pr_debug("va: %08x %08x\n", va.wp[0], va.wp[1]); in do_spe_mathemu() 224 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 229 FP_UNPACK_SP(SA, va.wp + 1); in do_spe_mathemu() 238 vc.wp[1] = va.wp[1] & ~SIGN_BIT_S; in do_spe_mathemu() 242 vc.wp[1] = va.wp[1] | SIGN_BIT_S; in do_spe_mathemu() 246 vc.wp[1] = va.wp[1] ^ SIGN_BIT_S; in do_spe_mathemu() 352 FP_UNPACK_DP(DA, va.dp); in do_spe_mathemu() [all …]
|
/arch/m32r/kernel/ |
D | smp.c | 315 void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in smp_flush_tlb_page() argument 336 va &= PAGE_MASK; in smp_flush_tlb_page() 337 va |= (*mmc & MMU_CONTEXT_ASID_MASK); in smp_flush_tlb_page() 338 __flush_tlb_page(va); in smp_flush_tlb_page() 342 flush_tlb_others(cpu_mask, mm, vma, va); in smp_flush_tlb_page() 371 struct vm_area_struct *vma, unsigned long va) in flush_tlb_others() argument 408 flush_va = va; in flush_tlb_others() 463 unsigned long va = flush_va; in smp_invalidate_interrupt() local 466 va &= PAGE_MASK; in smp_invalidate_interrupt() 467 va |= (*mmc & MMU_CONTEXT_ASID_MASK); in smp_invalidate_interrupt() [all …]
|
/arch/arm/plat-samsung/ |
D | pm-debug.c | 39 va_list va; in s3c_pm_dbg() local 42 va_start(va, fmt); in s3c_pm_dbg() 43 vsnprintf(buff, sizeof(buff), fmt, va); in s3c_pm_dbg() 44 va_end(va); in s3c_pm_dbg()
|
/arch/tile/include/hv/ |
D | hypervisor.h | 673 int hv_store_mapping(HV_VirtAddr va, unsigned int len, HV_PhysAddr pa); 1398 void hv_bzero_page(HV_VirtAddr va, unsigned int size); 1655 int hv_dev_pread(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, 1698 int hv_dev_pwrite(int devhdl, __hv32 flags, HV_VirtAddr va, __hv32 len, 2480 #define HV_L0_INDEX(va) \ 2481 (((va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) 2486 #define HV_L0_INDEX(va) \ argument 2487 (((HV_VirtAddr)(va) >> HV_LOG2_L1_SPAN) & (HV_L0_ENTRIES - 1)) 2531 #define _HV_L1_INDEX(va, log2_page_size_large) \ 2532 (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1)) [all …]
|
/arch/sparc/kernel/ |
D | ioport.c | 185 void __iomem *va; /* P3 diag */ in _sparc_alloc_io() local 208 va = _sparc_ioremap(res, busno, phys, size); in _sparc_alloc_io() 210 return va; in _sparc_alloc_io() 267 unsigned long va; in sbus_alloc_coherent() local 281 va = __get_free_pages(gfp, order); in sbus_alloc_coherent() 282 if (va == 0) in sbus_alloc_coherent() 300 if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) in sbus_alloc_coherent() 312 free_pages(va, order); in sbus_alloc_coherent() 360 void *va = page_address(page) + offset; in sbus_map_page() local 370 return mmu_get_scsi_one(dev, va, len); in sbus_map_page() [all …]
|
/arch/x86/include/asm/ |
D | edac.h | 6 static inline void atomic_scrub(void *va, u32 size) in atomic_scrub() argument 8 u32 i, *virt_addr = va; in atomic_scrub()
|
/arch/tile/kernel/ |
D | tlb.c | 54 unsigned long va) in flush_tlb_page_mm() argument 59 va, size, size, mm_cpumask(mm), NULL, 0); in flush_tlb_page_mm() 62 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va) in flush_tlb_page() argument 64 flush_tlb_page_mm(vma, vma->vm_mm, va); in flush_tlb_page()
|
/arch/powerpc/include/asm/ |
D | edac.h | 19 static __inline__ void atomic_scrub(void *va, u32 size) in atomic_scrub() argument 21 unsigned int *virt_addr = va; in atomic_scrub()
|
/arch/arm/mach-davinci/include/mach/ |
D | hardware.h | 29 #define io_v2p(va) ((va) - IO_OFFSET) argument
|
/arch/mips/include/asm/ |
D | edac.h | 8 static inline void atomic_scrub(void *va, u32 size) in atomic_scrub() argument 10 unsigned long *virt_addr = va; in atomic_scrub()
|