/arch/powerpc/kvm/ |
D | 44x_emulate.c | 43 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument 45 vcpu->arch.pc = vcpu->arch.srr0; in kvmppc_emul_rfi() 46 kvmppc_set_msr(vcpu, vcpu->arch.srr1); in kvmppc_emul_rfi() 49 int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, in kvmppc_core_emulate_op() argument 65 kvmppc_emul_rfi(vcpu); in kvmppc_core_emulate_op() 66 kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); in kvmppc_core_emulate_op() 81 vcpu->arch.gpr[rt] = vcpu->arch.msr; in kvmppc_core_emulate_op() 82 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); in kvmppc_core_emulate_op() 87 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); in kvmppc_core_emulate_op() 88 kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]); in kvmppc_core_emulate_op() [all …]
|
D | emulate.c | 33 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) in kvmppc_emulate_dec() argument 35 if (vcpu->arch.tcr & TCR_DIE) { in kvmppc_emulate_dec() 41 nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy; in kvmppc_emulate_dec() 42 mod_timer(&vcpu->arch.dec_timer, in kvmppc_emulate_dec() 45 del_timer(&vcpu->arch.dec_timer); in kvmppc_emulate_dec() 65 int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_emulate_instruction() argument 67 u32 inst = vcpu->arch.last_inst; in kvmppc_emulate_instruction() 78 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); in kvmppc_emulate_instruction() 82 vcpu->arch.esr |= ESR_PTR; in kvmppc_emulate_instruction() 83 kvmppc_core_queue_program(vcpu); in kvmppc_emulate_instruction() [all …]
|
D | booke.c | 62 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument 66 printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); in kvmppc_dump_vcpu() 67 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); in kvmppc_dump_vcpu() 68 printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); in kvmppc_dump_vcpu() 70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu() 74 vcpu->arch.gpr[i], in kvmppc_dump_vcpu() 75 vcpu->arch.gpr[i+1], in kvmppc_dump_vcpu() 76 vcpu->arch.gpr[i+2], in kvmppc_dump_vcpu() 77 vcpu->arch.gpr[i+3]); in kvmppc_dump_vcpu() 81 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, in kvmppc_booke_queue_irqprio() argument [all …]
|
D | timing.h | 27 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu); 28 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu); 29 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id); 30 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu); 32 static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) in kvmppc_set_exit_type() argument 34 vcpu->arch.last_exit_type = type; in kvmppc_set_exit_type() 39 static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_init_timing_stats() argument 40 static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_update_timing_stats() argument 41 static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, in kvmppc_create_vcpu_debugfs() argument 43 static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} in kvmppc_remove_vcpu_debugfs() argument [all …]
|
D | powerpc.c | 50 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) in kvmppc_emulate_mmio() argument 55 er = kvmppc_emulate_instruction(run, vcpu); in kvmppc_emulate_mmio() 73 vcpu->arch.last_inst); in kvmppc_emulate_mmio() 175 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local 176 vcpu = kvmppc_core_vcpu_create(kvm, id); in kvm_arch_vcpu_create() 177 kvmppc_create_vcpu_debugfs(vcpu, id); in kvm_arch_vcpu_create() 178 return vcpu; in kvm_arch_vcpu_create() 181 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument 183 kvmppc_remove_vcpu_debugfs(vcpu); in kvm_arch_vcpu_free() 184 kvmppc_core_vcpu_free(vcpu); in kvm_arch_vcpu_free() [all …]
|
D | timing.c | 32 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) in kvmppc_init_timing_stats() argument 38 mutex_lock(&vcpu->mutex); in kvmppc_init_timing_stats() 40 vcpu->arch.last_exit_type = 0xDEAD; in kvmppc_init_timing_stats() 42 vcpu->arch.timing_count_type[i] = 0; in kvmppc_init_timing_stats() 43 vcpu->arch.timing_max_duration[i] = 0; in kvmppc_init_timing_stats() 44 vcpu->arch.timing_min_duration[i] = 0xFFFFFFFF; in kvmppc_init_timing_stats() 45 vcpu->arch.timing_sum_duration[i] = 0; in kvmppc_init_timing_stats() 46 vcpu->arch.timing_sum_quad_duration[i] = 0; in kvmppc_init_timing_stats() 48 vcpu->arch.timing_last_exit = 0; in kvmppc_init_timing_stats() 49 vcpu->arch.timing_exit.tv64 = 0; in kvmppc_init_timing_stats() [all …]
|
D | 44x.c | 42 void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) in kvmppc_core_load_host_debugstate() argument 46 mtspr(SPRN_IAC1, vcpu->arch.host_iac[0]); in kvmppc_core_load_host_debugstate() 47 mtspr(SPRN_IAC2, vcpu->arch.host_iac[1]); in kvmppc_core_load_host_debugstate() 48 mtspr(SPRN_IAC3, vcpu->arch.host_iac[2]); in kvmppc_core_load_host_debugstate() 49 mtspr(SPRN_IAC4, vcpu->arch.host_iac[3]); in kvmppc_core_load_host_debugstate() 50 mtspr(SPRN_DBCR1, vcpu->arch.host_dbcr1); in kvmppc_core_load_host_debugstate() 51 mtspr(SPRN_DBCR2, vcpu->arch.host_dbcr2); in kvmppc_core_load_host_debugstate() 52 mtspr(SPRN_DBCR0, vcpu->arch.host_dbcr0); in kvmppc_core_load_host_debugstate() 53 mtmsr(vcpu->arch.host_msr); in kvmppc_core_load_host_debugstate() 56 void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) in kvmppc_core_load_guest_debugstate() argument [all …]
|
D | 44x_tlb.c | 47 void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) in kvmppc_dump_tlbs() argument 52 printk("vcpu %d TLB dump:\n", vcpu->vcpu_id); in kvmppc_dump_tlbs() 140 void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu) in kvmppc_44x_tlb_load() argument 142 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); in kvmppc_44x_tlb_load() 160 void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu) in kvmppc_44x_tlb_put() argument 162 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); in kvmppc_44x_tlb_put() 178 int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid, in kvmppc_44x_tlb_index() argument 181 struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); in kvmppc_44x_tlb_index() 211 int kvmppc_44x_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) in kvmppc_44x_itlb_index() argument 213 unsigned int as = !!(vcpu->arch.msr & MSR_IS); in kvmppc_44x_itlb_index() [all …]
|
/arch/s390/kvm/ |
D | interrupt.c | 20 static int psw_extint_disabled(struct kvm_vcpu *vcpu) in psw_extint_disabled() argument 22 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT); in psw_extint_disabled() 25 static int psw_interrupts_disabled(struct kvm_vcpu *vcpu) in psw_interrupts_disabled() argument 27 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) || in psw_interrupts_disabled() 28 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) || in psw_interrupts_disabled() 29 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT)) in psw_interrupts_disabled() 34 static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu, in __interrupt_is_deliverable() argument 39 if (psw_extint_disabled(vcpu)) in __interrupt_is_deliverable() 41 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul) in __interrupt_is_deliverable() 45 if (psw_extint_disabled(vcpu)) in __interrupt_is_deliverable() [all …]
|
D | intercept.c | 23 static int handle_lctlg(struct kvm_vcpu *vcpu) in handle_lctlg() argument 25 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; in handle_lctlg() 26 int reg3 = vcpu->arch.sie_block->ipa & 0x000f; in handle_lctlg() 27 int base2 = vcpu->arch.sie_block->ipb >> 28; in handle_lctlg() 28 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + in handle_lctlg() 29 ((vcpu->arch.sie_block->ipb & 0xff00) << 4); in handle_lctlg() 33 vcpu->stat.instruction_lctlg++; in handle_lctlg() 34 if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) in handle_lctlg() 39 useraddr += vcpu->arch.guest_gprs[base2]; in handle_lctlg() 42 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_lctlg() [all …]
|
D | priv.c | 23 static int handle_set_prefix(struct kvm_vcpu *vcpu) in handle_set_prefix() argument 25 int base2 = vcpu->arch.sie_block->ipb >> 28; in handle_set_prefix() 26 int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); in handle_set_prefix() 31 vcpu->stat.instruction_spx++; in handle_set_prefix() 35 operand2 += vcpu->arch.guest_gprs[base2]; in handle_set_prefix() 39 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in handle_set_prefix() 44 if (get_guest_u32(vcpu, operand2, &address)) { in handle_set_prefix() 45 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); in handle_set_prefix() 52 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || in handle_set_prefix() 53 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { in handle_set_prefix() [all …]
|
D | kvm-s390.c | 195 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument 197 VCPU_EVENT(vcpu, 3, "%s", "free cpu"); in kvm_arch_vcpu_destroy() 198 free_page((unsigned long)(vcpu->arch.sie_block)); in kvm_arch_vcpu_destroy() 199 kvm_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy() 200 kfree(vcpu); in kvm_arch_vcpu_destroy() 229 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument 234 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_uninit() argument 239 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument 241 save_fp_regs(&vcpu->arch.host_fpregs); in kvm_arch_vcpu_load() 242 save_access_regs(vcpu->arch.host_acrs); in kvm_arch_vcpu_load() [all …]
|
D | diag.c | 18 static int __diag_time_slice_end(struct kvm_vcpu *vcpu) in __diag_time_slice_end() argument 20 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); in __diag_time_slice_end() 21 vcpu->stat.diagnose_44++; in __diag_time_slice_end() 22 vcpu_put(vcpu); in __diag_time_slice_end() 24 vcpu_load(vcpu); in __diag_time_slice_end() 28 static int __diag_ipl_functions(struct kvm_vcpu *vcpu) in __diag_ipl_functions() argument 30 unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; in __diag_ipl_functions() 31 unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff; in __diag_ipl_functions() 33 VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); in __diag_ipl_functions() 36 vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; in __diag_ipl_functions() [all …]
|
D | sigp.c | 46 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, in __sigp_sense() argument 49 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; in __sigp_sense() 69 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc); in __sigp_sense() 73 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr) in __sigp_emergency() argument 75 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; in __sigp_emergency() 106 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr); in __sigp_emergency() 110 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store) in __sigp_stop() argument 112 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; in __sigp_stop() 146 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); in __sigp_stop() 150 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter) in __sigp_set_arch() argument [all …]
|
/arch/x86/kvm/ |
D | x86.c | 144 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument 146 if (irqchip_in_kernel(vcpu->kvm)) in kvm_get_apic_base() 147 return vcpu->arch.apic_base; in kvm_get_apic_base() 149 return vcpu->arch.apic_base; in kvm_get_apic_base() 153 void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data) in kvm_set_apic_base() argument 156 if (irqchip_in_kernel(vcpu->kvm)) in kvm_set_apic_base() 157 kvm_lapic_set_base(vcpu, data); in kvm_set_apic_base() 159 vcpu->arch.apic_base = data; in kvm_set_apic_base() 163 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument 165 WARN_ON(vcpu->arch.exception.pending); in kvm_queue_exception() [all …]
|
D | vmx.c | 63 struct kvm_vcpu vcpu; member 101 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() argument 103 return container_of(vcpu, struct vcpu_vmx, vcpu); in to_vmx() 350 if (vmx->vcpu.cpu == cpu) in __vcpu_clear() 354 rdtscll(vmx->vcpu.arch.host_tsc); in __vcpu_clear() 356 vmx->vcpu.cpu = -1; in __vcpu_clear() 362 if (vmx->vcpu.cpu == -1) in vcpu_clear() 364 smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); in vcpu_clear() 476 static void update_exception_bitmap(struct kvm_vcpu *vcpu) in update_exception_bitmap() argument 481 if (!vcpu->fpu_active) in update_exception_bitmap() [all …]
|
D | lapic.h | 17 struct kvm_vcpu *vcpu; member 23 int kvm_create_lapic(struct kvm_vcpu *vcpu); 24 void kvm_free_lapic(struct kvm_vcpu *vcpu); 26 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu); 27 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu); 28 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu); 29 void kvm_lapic_reset(struct kvm_vcpu *vcpu); 30 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu); 31 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8); 32 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value); [all …]
|
D | mmu.c | 51 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg); 53 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {} in kvm_mmu_audit() argument 149 int (*entry)(struct kvm_shadow_walk *walk, struct kvm_vcpu *vcpu, 157 typedef int (*mmu_parent_walk_fn) (struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp); 198 static int is_write_protection(struct kvm_vcpu *vcpu) in is_write_protection() argument 200 return vcpu->arch.cr0 & X86_CR0_WP; in is_write_protection() 208 static int is_nx(struct kvm_vcpu *vcpu) in is_nx() argument 210 return vcpu->arch.shadow_efer & EFER_NX; in is_nx() 310 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) in mmu_topup_memory_caches() argument 314 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, in mmu_topup_memory_caches() [all …]
|
D | svm.c | 64 static void svm_flush_tlb(struct kvm_vcpu *vcpu); 66 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) in to_svm() argument 68 return container_of(vcpu, struct vcpu_svm, vcpu); in to_svm() 114 static inline u8 pop_irq(struct kvm_vcpu *vcpu) in pop_irq() argument 116 int word_index = __ffs(vcpu->arch.irq_summary); in pop_irq() 117 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]); in pop_irq() 120 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]); in pop_irq() 121 if (!vcpu->arch.irq_pending[word_index]) in pop_irq() 122 clear_bit(word_index, &vcpu->arch.irq_summary); in pop_irq() 126 static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) in push_irq() argument [all …]
|
D | kvm_cache_regs.h | 4 static inline unsigned long kvm_register_read(struct kvm_vcpu *vcpu, in kvm_register_read() argument 7 if (!test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail)) in kvm_register_read() 8 kvm_x86_ops->cache_reg(vcpu, reg); in kvm_register_read() 10 return vcpu->arch.regs[reg]; in kvm_register_read() 13 static inline void kvm_register_write(struct kvm_vcpu *vcpu, in kvm_register_write() argument 17 vcpu->arch.regs[reg] = val; in kvm_register_write() 18 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_write() 19 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_write() 22 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument 24 return kvm_register_read(vcpu, VCPU_REGS_RIP); in kvm_rip_read() [all …]
|
/arch/ia64/kvm/ |
D | vcpu.c | 95 void physical_mode_init(struct kvm_vcpu *vcpu) in physical_mode_init() argument 97 vcpu->arch.mode_flags = GUEST_IN_PHY; in physical_mode_init() 100 void switch_to_physical_rid(struct kvm_vcpu *vcpu) in switch_to_physical_rid() argument 106 ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0); in switch_to_physical_rid() 108 ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4); in switch_to_physical_rid() 116 void switch_to_virtual_rid(struct kvm_vcpu *vcpu) in switch_to_virtual_rid() argument 121 ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0); in switch_to_virtual_rid() 123 ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4); in switch_to_virtual_rid() 134 void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr, in switch_mm_mode() argument 143 switch_to_physical_rid(vcpu); in switch_mm_mode() [all …]
|
D | process.c | 81 static void collect_interruption(struct kvm_vcpu *vcpu) in collect_interruption() argument 87 struct kvm_pt_regs *regs = vcpu_regs(vcpu); in collect_interruption() 89 vpsr = vcpu_get_psr(vcpu); in collect_interruption() 90 vcpu_bsw0(vcpu); in collect_interruption() 102 vcpu_set_ipsr(vcpu, vpsr); in collect_interruption() 109 vcpu_set_iip(vcpu , regs->cr_iip); in collect_interruption() 112 vifs = VCPU(vcpu, ifs); in collect_interruption() 114 vcpu_set_ifs(vcpu, vifs); in collect_interruption() 116 vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa)); in collect_interruption() 119 vdcr = VCPU(vcpu, dcr); in collect_interruption() [all …]
|
D | kvm-ia64.c | 201 static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu, in vcpu_find_mmio_dev() argument 206 dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr, len, is_write); in vcpu_find_mmio_dev() 211 static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in handle_vm_error() argument 218 static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in handle_mmio() argument 223 p = kvm_get_vcpu_ioreq(vcpu); in handle_mmio() 227 vcpu->mmio_needed = 1; in handle_mmio() 228 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; in handle_mmio() 229 vcpu->mmio_size = kvm_run->mmio.len = p->size; in handle_mmio() 230 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; in handle_mmio() 232 if (vcpu->mmio_is_write) in handle_mmio() [all …]
|
/arch/powerpc/include/asm/ |
D | kvm_ppc.h | 39 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 43 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); 44 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 47 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 51 struct kvm_vcpu *vcpu); 52 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 53 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 55 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, 58 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); 59 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); [all …]
|
/arch/x86/include/asm/ |
D | kvm_host.h | 227 void (*new_cr3)(struct kvm_vcpu *vcpu); 228 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 229 void (*free)(struct kvm_vcpu *vcpu); 230 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 231 void (*prefetch_page)(struct kvm_vcpu *vcpu, 233 int (*sync_page)(struct kvm_vcpu *vcpu, 235 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva); 441 void (*vcpu_free)(struct kvm_vcpu *vcpu); 442 int (*vcpu_reset)(struct kvm_vcpu *vcpu); 444 void (*prepare_guest_switch)(struct kvm_vcpu *vcpu); [all …]
|