/arch/x86/kvm/ |
D | x86.h | 142 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info() argument 153 vcpu->arch.mmio_gva = mmu_is_nested(vcpu) ? 0 : gva & PAGE_MASK; in vcpu_cache_mmio_info() 170 static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) in vcpu_clear_mmio_info() argument 172 if (gva != MMIO_GVA_ANY && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) in vcpu_clear_mmio_info() 178 static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) in vcpu_match_mmio_gva() argument 181 vcpu->arch.mmio_gva == (gva & PAGE_MASK)) in vcpu_match_mmio_gva()
|
D | mmutrace.h | 252 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 254 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), 258 __field(gva_t, gva) 268 __entry->gva = gva; 278 __entry->gva, __print_flags(__entry->error_code, "|",
|
D | mmu.c | 2094 static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) in nonpaging_invlpg() argument 3205 static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, in handle_abnormal_pfn() argument 3215 vcpu_cache_mmio_info(vcpu, gva, gfn, access); in handle_abnormal_pfn() 3309 static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level, in fast_page_fault() argument 3329 for_each_shadow_entry_lockless(vcpu, gva, iterator, spte) in fast_page_fault() 3407 trace_fast_page_fault(vcpu, gva, error_code, iterator.sptep, in fast_page_fault() 3415 gva_t gva, kvm_pfn_t *pfn, bool write, bool *writable); 3888 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, in nonpaging_page_fault() argument 3891 gfn_t gfn = gva >> PAGE_SHIFT; in nonpaging_page_fault() 3894 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); in nonpaging_page_fault() [all …]
|
D | trace.h | 769 TP_PROTO(gva_t gva, gpa_t gpa, bool write, bool gpa_match), 770 TP_ARGS(gva, gpa, write, gpa_match), 773 __field(gva_t, gva) 780 __entry->gva = gva; 786 TP_printk("gva %#lx gpa %#llx %s %s", __entry->gva, __entry->gpa,
|
D | paging_tmpl.h | 874 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) in FNAME() 881 vcpu_clear_mmio_info(vcpu, gva); in FNAME() 895 for_each_shadow_entry(vcpu, gva, iterator) { in FNAME()
|
D | x86.c | 4606 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument 4610 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read() 4613 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument 4618 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch() 4621 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument 4626 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write() 4630 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument 4633 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system() 4797 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument 4805 trace_vcpu_match_mmio(gva, gpa, write, true); in vcpu_is_mmio_gpa() [all …]
|
D | vmx.c | 1605 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva) in __invvpid() argument 1610 u64 gva; in __invvpid() member 1611 } operand = { vpid, 0, gva }; in __invvpid() 7542 gva_t gva; in nested_vmx_get_vmptr() local 7546 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) in nested_vmx_get_vmptr() 7549 if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) { in nested_vmx_get_vmptr() 8014 gva_t gva = 0; in handle_vmread() local 8040 vmx_instruction_info, true, &gva)) in handle_vmread() 8043 if (kvm_write_guest_virt_system(vcpu, gva, &field_value, in handle_vmread() 8059 gva_t gva; in handle_vmwrite() local [all …]
|
/arch/s390/kvm/ |
D | gaccess.c | 493 static int trans_exc(struct kvm_vcpu *vcpu, int code, unsigned long gva, in trans_exc() argument 534 tec->addr = gva >> PAGE_SHIFT; in trans_exc() 614 static unsigned long guest_translate(struct kvm_vcpu *vcpu, unsigned long gva, in guest_translate() argument 618 union vaddress vaddr = {.addr = gva}; in guest_translate() 619 union raddress raddr = {.addr = gva}; in guest_translate() 904 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar, in guest_translate_address() argument 912 gva = kvm_s390_logical_to_effective(vcpu, gva); in guest_translate_address() 913 rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode); in guest_translate_address() 916 if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) { in guest_translate_address() 918 return trans_exc(vcpu, PGM_PROTECTION, gva, 0, in guest_translate_address() [all …]
|
D | gaccess.h | 164 int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, 166 int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
|
/arch/mips/kvm/ |
D | mmu.c | 1051 unsigned long gva, in kvm_mips_handle_mapped_seg_tlb_fault() argument 1057 unsigned int idx = TLB_LO_IDX(*tlb, gva); in kvm_mips_handle_mapped_seg_tlb_fault() 1067 if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) in kvm_mips_handle_mapped_seg_tlb_fault() 1087 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); in kvm_mips_handle_mapped_seg_tlb_fault() 1089 kvm_err("No ptep for gva %lx\n", gva); in kvm_mips_handle_mapped_seg_tlb_fault() 1098 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); in kvm_mips_handle_mapped_seg_tlb_fault() 1206 unsigned long gva, in kvm_trap_emul_gva_fault() argument 1213 if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { in kvm_trap_emul_gva_fault() 1214 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) in kvm_trap_emul_gva_fault() 1216 } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || in kvm_trap_emul_gva_fault() [all …]
|
D | tlb.c | 304 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_guest_tlb_lookup() argument 322 write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); in kvm_vz_guest_tlb_lookup() 364 pa = entrylo[!!(gva & pagemaskbit)]; in kvm_vz_guest_tlb_lookup() 378 pa |= gva & ~(pagemask | pagemaskbit); in kvm_vz_guest_tlb_lookup()
|
D | trap_emul.c | 23 static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) in kvm_trap_emul_gva_to_gpa_cb() argument 26 gva_t kseg = KSEGX(gva); in kvm_trap_emul_gva_to_gpa_cb() 27 gva_t gkseg = KVM_GUEST_KSEGX(gva); in kvm_trap_emul_gva_to_gpa_cb() 30 gpa = CPHYSADDR(gva); in kvm_trap_emul_gva_to_gpa_cb() 32 gpa = KVM_GUEST_CPHYSADDR(gva); in kvm_trap_emul_gva_to_gpa_cb() 34 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); in kvm_trap_emul_gva_to_gpa_cb() 39 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); in kvm_trap_emul_gva_to_gpa_cb()
|
D | vz.c | 183 static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) in kvm_vz_gva_to_gpa_cb() argument 186 return gva; in kvm_vz_gva_to_gpa_cb() 714 static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in kvm_vz_gva_to_gpa() argument 717 u32 gva32 = gva; in kvm_vz_gva_to_gpa() 720 if ((long)gva == (s32)gva32) { in kvm_vz_gva_to_gpa() 775 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { in kvm_vz_gva_to_gpa() 783 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { in kvm_vz_gva_to_gpa() 797 *gpa = gva & 0x07ffffffffffffff; in kvm_vz_gva_to_gpa() 803 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); in kvm_vz_gva_to_gpa()
|
/arch/x86/include/asm/ |
D | kvm_host.h | 338 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, 342 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 348 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 1261 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 1268 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, 1270 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, 1272 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, 1274 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1281 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u64 error_code, 1283 void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
|
/arch/mips/include/asm/ |
D | kvm_host.h | 793 gpa_t (*gva_to_gpa)(gva_t gva); 863 unsigned long gva, 882 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, 930 unsigned long gva,
|