Home
last modified time | relevance | path

Searched refs:eaddr (Results 1 – 25 of 30) sorted by relevance

12

/arch/powerpc/kvm/
Dbook3s_32_mmu.c81 static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr,
87 static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) in find_sr() argument
89 return kvmppc_get_sr(vcpu, (eaddr >> 28) & 0xf); in find_sr()
92 static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_32_ea_to_vp() argument
98 if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data, false)) in kvmppc_mmu_book3s_32_ea_to_vp()
101 kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_book3s_32_ea_to_vp()
102 return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); in kvmppc_mmu_book3s_32_ea_to_vp()
111 u32 sre, gva_t eaddr, in kvmppc_mmu_book3s_32_get_pteg() argument
118 page = (eaddr & 0x0FFFFFFF) >> 12; in kvmppc_mmu_book3s_32_get_pteg()
129 kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg, in kvmppc_mmu_book3s_32_get_pteg()
[all …]
Dbook3s_64_mmu.c46 gva_t eaddr) in kvmppc_mmu_book3s_64_find_slbe() argument
49 u64 esid = GET_ESID(eaddr); in kvmppc_mmu_book3s_64_find_slbe()
50 u64 esid_1t = GET_ESID_1T(eaddr); in kvmppc_mmu_book3s_64_find_slbe()
66 eaddr, esid, esid_1t); in kvmppc_mmu_book3s_64_find_slbe()
90 static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) in kvmppc_slb_calc_vpn() argument
92 eaddr &= kvmppc_slb_offset_mask(slb); in kvmppc_slb_calc_vpn()
94 return (eaddr >> VPN_SHIFT) | in kvmppc_slb_calc_vpn()
98 static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_book3s_64_ea_to_vp() argument
103 slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp()
107 return kvmppc_slb_calc_vpn(slb, eaddr); in kvmppc_mmu_book3s_64_ea_to_vp()
[all …]
Dtrace_pr.h39 __field( unsigned long, eaddr )
49 __entry->eaddr = orig_pte->eaddr;
57 __entry->flag_w, __entry->flag_x, __entry->eaddr,
70 __field( ulong, eaddr )
79 __entry->eaddr = pte->pte.eaddr;
88 __entry->host_vpn, __entry->pfn, __entry->eaddr,
99 __field( ulong, eaddr )
108 __entry->eaddr = pte->pte.eaddr;
117 __entry->host_vpn, __entry->pfn, __entry->eaddr,
Dbook3s_32_mmu_host.c71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); in kvmppc_mmu_invalidate_pte()
118 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, in kvmppc_mmu_get_pteg() argument
124 page = (eaddr & ~ESID_MASK) >> 12; in kvmppc_mmu_get_pteg()
150 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() local
170 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
173 kvmppc_mmu_map_segment(vcpu, eaddr); in kvmppc_mmu_map_page()
180 ((eaddr & ~ESID_MASK) >> VPN_SHIFT); in kvmppc_mmu_map_page()
188 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); in kvmppc_mmu_map_page()
206 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | in kvmppc_mmu_map_page()
255 orig_pte->eaddr, (ulong)pteg, vpn, in kvmppc_mmu_map_page()
[all …]
De500_mmu.c84 gva_t eaddr, int tlbsel, unsigned int pid, int as) in kvmppc_e500_tlb_index() argument
91 set_base = gtlb0_set_base(vcpu_e500, eaddr); in kvmppc_e500_tlb_index()
94 if (eaddr < vcpu_e500->tlb1_min_eaddr || in kvmppc_e500_tlb_index()
95 eaddr > vcpu_e500->tlb1_max_eaddr) in kvmppc_e500_tlb_index()
107 if (eaddr < get_tlb_eaddr(tlbe)) in kvmppc_e500_tlb_index()
110 if (eaddr > get_tlb_end(tlbe)) in kvmppc_e500_tlb_index()
130 gva_t eaddr, int as) in kvmppc_e500_deliver_tlb_miss() argument
146 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) in kvmppc_e500_deliver_tlb_miss()
158 gva_t eaddr; in kvmppc_recalc_tlb1map_range() local
172 eaddr = get_tlb_eaddr(tlbe); in kvmppc_recalc_tlb1map_range()
[all …]
Dbook3s_mmu_hpte.c38 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) in kvmppc_mmu_hash_pte() argument
40 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); in kvmppc_mmu_hash_pte()
43 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) in kvmppc_mmu_hash_pte_long() argument
45 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, in kvmppc_mmu_hash_pte_long()
78 index = kvmppc_mmu_hash_pte(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map()
82 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); in kvmppc_mmu_hpte_cache_map()
175 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) in kvmppc_mmu_pte_flush_page()
195 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) in kvmppc_mmu_pte_flush_long()
Dbook3s_64_mmu_host.c118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page()
121 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page()
127 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page()
133 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page()
229 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_unmap_page()
322 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) in kvmppc_mmu_map_segment() argument
325 u64 esid = eaddr >> SID_SHIFT; in kvmppc_mmu_map_segment()
326 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; in kvmppc_mmu_map_segment()
333 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); in kvmppc_mmu_map_segment()
De500_mmu_host.c108 static u32 get_host_mas0(unsigned long eaddr) in get_host_mas0() argument
118 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET)); in get_host_mas0()
589 void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr, in kvmppc_mmu_map() argument
609 &priv->ref, eaddr, &stlbe); in kvmppc_mmu_map()
616 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe, in kvmppc_mmu_map()
634 hva_t eaddr; in kvmppc_load_last_inst() local
711 eaddr = (unsigned long)kmap_atomic(page); in kvmppc_load_last_inst()
712 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK)); in kvmppc_load_last_inst()
713 kunmap_atomic((u32 *)eaddr); in kvmppc_load_last_inst()
Dbooke.c1237 unsigned long eaddr = vcpu->arch.fault_dear; in kvmppc_handle_exit() local
1244 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { in kvmppc_handle_exit()
1254 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); in kvmppc_handle_exit()
1268 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit()
1278 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit()
1285 vcpu->arch.vaddr_accessed = eaddr; in kvmppc_handle_exit()
1295 unsigned long eaddr = vcpu->arch.pc; in kvmppc_handle_exit() local
1303 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); in kvmppc_handle_exit()
1316 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); in kvmppc_handle_exit()
1326 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); in kvmppc_handle_exit()
[all …]
De500mc.c63 gva_t eaddr; in kvmppc_e500_tlbil_one() local
72 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one()
79 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
Dbook3s_64_mmu_radix.c32 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, in kvmppc_mmu_radix_xlate() argument
46 switch (eaddr >> 62) { in kvmppc_mmu_radix_xlate()
87 index = (eaddr >> offset) & ((1UL << bits) - 1); in kvmppc_mmu_radix_xlate()
111 gpa += eaddr & ((1ul << offset) - 1); in kvmppc_mmu_radix_xlate()
117 gpte->eaddr = eaddr; in kvmppc_mmu_radix_xlate()
De500.c242 u32 val, eaddr; in kvmppc_e500_tlbil_one() local
274 eaddr = get_tlb_eaddr(gtlbe); in kvmppc_e500_tlbil_one()
279 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); in kvmppc_e500_tlbil_one()
Dbook3s.c426 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, in kvmppc_xlate() argument
435 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite); in kvmppc_xlate()
437 pte->eaddr = eaddr; in kvmppc_xlate()
438 pte->raddr = eaddr & KVM_PAM; in kvmppc_xlate()
439 pte->vpage = VSID_REAL | eaddr >> 12; in kvmppc_xlate()
448 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) in kvmppc_xlate()
Dbook3s_hv_rm_mmu.c946 unsigned long eaddr, unsigned long slb_v, long mmio_update) in mmio_cache_search() argument
956 if ((entry->eaddr >> pshift) == (eaddr >> pshift) && in mmio_cache_search()
980 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, in kvmppc_hv_find_lock_hpte() argument
1009 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_hv_find_lock_hpte()
1011 avpn |= (eaddr & somask) >> 16; in kvmppc_hv_find_lock_hpte()
1168 cache_entry->eaddr = addr; in kvmppc_hpte_hv_fault()
/arch/sh/mm/
Dcache-sh5.c34 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, in sh64_setup_dtlb_cache_slot() argument
38 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); in sh64_setup_dtlb_cache_slot()
87 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) in sh64_icache_inv_user_page() argument
95 addr = eaddr; in sh64_icache_inv_user_page()
159 unsigned long eaddr; in sh64_icache_inv_user_page_range() local
188 eaddr = aligned_start; in sh64_icache_inv_user_page_range()
189 while (eaddr < vma_end) { in sh64_icache_inv_user_page_range()
190 sh64_icache_inv_user_page(vma, eaddr); in sh64_icache_inv_user_page_range()
191 eaddr += PAGE_SIZE; in sh64_icache_inv_user_page_range()
244 unsigned long long eaddr, eaddr0, eaddr1; in sh64_dcache_purge_sets() local
[all …]
Dtlb-sh5.c120 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, in sh64_setup_tlb_slot() argument
125 pteh = neff_sign_extend(eaddr); in sh64_setup_tlb_slot()
/arch/blackfin/kernel/cplb-nompu/
Dcplbinit.c135 dcplb_bounds[i_d].eaddr = uncached_end; in generate_cplb_tables_all()
137 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1); in generate_cplb_tables_all()
141 dcplb_bounds[i_d].eaddr = _ramend; in generate_cplb_tables_all()
146 dcplb_bounds[i_d].eaddr = physical_mem_end; in generate_cplb_tables_all()
151 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE; in generate_cplb_tables_all()
154 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; in generate_cplb_tables_all()
157 dcplb_bounds[i_d].eaddr = BOOT_ROM_START; in generate_cplb_tables_all()
160 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + BOOT_ROM_LENGTH; in generate_cplb_tables_all()
164 dcplb_bounds[i_d].eaddr = L2_START; in generate_cplb_tables_all()
167 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH; in generate_cplb_tables_all()
[all …]
Dcplbmgr.c101 unsigned long i_data, base, addr1, eaddr; in icplb_miss() local
110 eaddr = icplb_bounds[idx].eaddr; in icplb_miss()
111 if (addr < eaddr) in icplb_miss()
113 base = eaddr; in icplb_miss()
126 if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { in icplb_miss()
148 unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags; in dcplb_miss() local
157 eaddr = dcplb_bounds[idx].eaddr; in dcplb_miss()
158 if (addr < eaddr) in dcplb_miss()
160 base = eaddr; in dcplb_miss()
184 if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) { in dcplb_miss()
/arch/unicore32/mm/
Dalignment.c293 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local
304 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm()
310 eaddr = newaddr; in do_alignment_ldmstm()
313 eaddr += 4; in do_alignment_ldmstm()
319 if (addr != eaddr) { in do_alignment_ldmstm()
322 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm()
336 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm()
339 uregs[rd + reg_correction], eaddr); in do_alignment_ldmstm()
340 eaddr += 4; in do_alignment_ldmstm()
/arch/arm/mm/
Dalignment.c507 unsigned long eaddr, newaddr; in do_alignment_ldmstm() local
521 newaddr = eaddr = regs->uregs[rn]; in do_alignment_ldmstm()
527 eaddr = newaddr; in do_alignment_ldmstm()
530 eaddr += 4; in do_alignment_ldmstm()
544 if (addr != eaddr) { in do_alignment_ldmstm()
547 instruction_pointer(regs), instr, addr, eaddr); in do_alignment_ldmstm()
559 get32t_unaligned_check(val, eaddr); in do_alignment_ldmstm()
562 put32t_unaligned_check(regs->uregs[rd], eaddr); in do_alignment_ldmstm()
563 eaddr += 4; in do_alignment_ldmstm()
572 get32_unaligned_check(val, eaddr); in do_alignment_ldmstm()
[all …]
/arch/powerpc/include/asm/
Dkvm_book3s.h164 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
165 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
170 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
189 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
206 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
Dkvm_host.h354 ulong eaddr; member
374 int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr,
379 u64 (*ea_to_vp)(struct kvm_vcpu *vcpu, gva_t eaddr, bool data);
446 unsigned long eaddr; member
Dkvm_ppc.h94 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
96 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
117 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
118 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
120 gva_t eaddr);
123 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
/arch/blackfin/include/asm/
Dcplbinit.h27 unsigned long eaddr; /* End of this region. */ member
/arch/sh/include/asm/
Dtlb_64.h59 void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,

12