Home
last modified time | relevance | path

Searched refs:vcpu (Results 1 – 25 of 208) sorted by relevance

123456789

/arch/arm64/include/asm/
Dkvm_emulate.h36 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
39 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
41 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
42 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
44 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) in vcpu_el1_is_32bit() argument
46 return !(vcpu->arch.hcr_el2 & HCR_RW); in vcpu_el1_is_32bit()
49 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) in vcpu_reset_hcr() argument
51 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; in vcpu_reset_hcr()
[all …]
/arch/powerpc/kvm/
Dbooke_emulate.c24 static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfi() argument
26 vcpu->arch.regs.nip = vcpu->arch.shared->srr0; in kvmppc_emul_rfi()
27 kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); in kvmppc_emul_rfi()
30 static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu) in kvmppc_emul_rfdi() argument
32 vcpu->arch.regs.nip = vcpu->arch.dsrr0; in kvmppc_emul_rfdi()
33 kvmppc_set_msr(vcpu, vcpu->arch.dsrr1); in kvmppc_emul_rfdi()
36 static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu) in kvmppc_emul_rfci() argument
38 vcpu->arch.regs.nip = vcpu->arch.csrr0; in kvmppc_emul_rfci()
39 kvmppc_set_msr(vcpu, vcpu->arch.csrr1); in kvmppc_emul_rfci()
42 int kvmppc_booke_emulate_op(struct kvm_vcpu *vcpu, in kvmppc_booke_emulate_op() argument
[all …]
Dbook3s_emulate.c74 static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level) in spr_allowed() argument
77 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER)) in spr_allowed()
81 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM) in spr_allowed()
88 static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu) in kvmppc_copyto_vcpu_tm() argument
90 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0], in kvmppc_copyto_vcpu_tm()
91 sizeof(vcpu->arch.gpr_tm)); in kvmppc_copyto_vcpu_tm()
92 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp, in kvmppc_copyto_vcpu_tm()
94 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr, in kvmppc_copyto_vcpu_tm()
96 vcpu->arch.ppr_tm = vcpu->arch.ppr; in kvmppc_copyto_vcpu_tm()
97 vcpu->arch.dscr_tm = vcpu->arch.dscr; in kvmppc_copyto_vcpu_tm()
[all …]
Dbooke.c64 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) in kvmppc_dump_vcpu() argument
68 printk("pc: %08lx msr: %08llx\n", vcpu->arch.regs.nip, in kvmppc_dump_vcpu()
69 vcpu->arch.shared->msr); in kvmppc_dump_vcpu()
70 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.regs.link, in kvmppc_dump_vcpu()
71 vcpu->arch.regs.ctr); in kvmppc_dump_vcpu()
72 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, in kvmppc_dump_vcpu()
73 vcpu->arch.shared->srr1); in kvmppc_dump_vcpu()
75 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); in kvmppc_dump_vcpu()
79 kvmppc_get_gpr(vcpu, i), in kvmppc_dump_vcpu()
80 kvmppc_get_gpr(vcpu, i+1), in kvmppc_dump_vcpu()
[all …]
Demulate_loadstore.c28 static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_fp_disabled() argument
30 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) { in kvmppc_check_fp_disabled()
31 kvmppc_core_queue_fpunavail(vcpu); in kvmppc_check_fp_disabled()
40 static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_vsx_disabled() argument
42 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) { in kvmppc_check_vsx_disabled()
43 kvmppc_core_queue_vsx_unavail(vcpu); in kvmppc_check_vsx_disabled()
52 static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu) in kvmppc_check_altivec_disabled() argument
54 if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) { in kvmppc_check_altivec_disabled()
55 kvmppc_core_queue_vec_unavail(vcpu); in kvmppc_check_altivec_disabled()
72 int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) in kvmppc_emulate_loadstore() argument
[all …]
Dbook3s_pr.c52 static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
66 static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) in kvmppc_is_split_real() argument
68 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_is_split_real()
72 static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) in kvmppc_fixup_split_real() argument
74 ulong msr = kvmppc_get_msr(vcpu); in kvmppc_fixup_split_real()
75 ulong pc = kvmppc_get_pc(vcpu); in kvmppc_fixup_split_real()
82 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) in kvmppc_fixup_split_real()
89 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; in kvmppc_fixup_split_real()
90 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); in kvmppc_fixup_split_real()
[all …]
Dbook3s_hv_tm.c16 static void emulate_tx_failure(struct kvm_vcpu *vcpu, u64 failure_cause) in emulate_tx_failure() argument
19 u64 msr = vcpu->arch.shregs.msr; in emulate_tx_failure()
21 tfiar = vcpu->arch.regs.nip & ~0x3ull; in emulate_tx_failure()
23 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) in emulate_tx_failure()
29 vcpu->arch.tfiar = tfiar; in emulate_tx_failure()
31 vcpu->arch.texasr = (vcpu->arch.texasr & 0x3ffffff) | texasr; in emulate_tx_failure()
42 int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu) in kvmhv_p9_tm_emulation() argument
44 u32 instr = vcpu->arch.emul_inst; in kvmhv_p9_tm_emulation()
45 u64 msr = vcpu->arch.shregs.msr; in kvmhv_p9_tm_emulation()
63 newmsr = vcpu->arch.shregs.srr1; in kvmhv_p9_tm_emulation()
[all …]
Demulate.c26 void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) in kvmppc_emulate_dec() argument
31 pr_debug("mtDEC: %lx\n", vcpu->arch.dec); in kvmppc_emulate_dec()
32 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_emulate_dec()
36 kvmppc_core_dequeue_dec(vcpu); in kvmppc_emulate_dec()
41 if (vcpu->arch.dec == 0) in kvmppc_emulate_dec()
51 dec_time = vcpu->arch.dec; in kvmppc_emulate_dec()
58 hrtimer_start(&vcpu->arch.dec_timer, in kvmppc_emulate_dec()
60 vcpu->arch.dec_jiffies = get_tb(); in kvmppc_emulate_dec()
63 u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) in kvmppc_get_dec() argument
65 u64 jd = tb - vcpu->arch.dec_jiffies; in kvmppc_get_dec()
[all …]
Dtiming.h15 void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
16 void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu);
17 void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, unsigned int id);
18 void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu);
20 static inline void kvmppc_set_exit_type(struct kvm_vcpu *vcpu, int type) in kvmppc_set_exit_type() argument
22 vcpu->arch.last_exit_type = type; in kvmppc_set_exit_type()
27 static inline void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_init_timing_stats() argument
28 static inline void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) {} in kvmppc_update_timing_stats() argument
29 static inline void kvmppc_create_vcpu_debugfs(struct kvm_vcpu *vcpu, in kvmppc_create_vcpu_debugfs() argument
31 static inline void kvmppc_remove_vcpu_debugfs(struct kvm_vcpu *vcpu) {} in kvmppc_remove_vcpu_debugfs() argument
[all …]
Dbook3s.c74 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, in kvmppc_update_int_pending() argument
77 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_update_int_pending()
80 kvmppc_set_int_pending(vcpu, 1); in kvmppc_update_int_pending()
82 kvmppc_set_int_pending(vcpu, 0); in kvmppc_update_int_pending()
85 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) in kvmppc_critical_section() argument
91 if (is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_critical_section()
94 crit_raw = kvmppc_get_critical(vcpu); in kvmppc_critical_section()
95 crit_r1 = kvmppc_get_gpr(vcpu, 1); in kvmppc_critical_section()
98 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { in kvmppc_critical_section()
106 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR); in kvmppc_critical_section()
[all …]
Dbook3s_paired_singles.c150 static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) in kvmppc_sync_qpr() argument
152 kvm_cvt_df(&VCPU_FPR(vcpu, rt), &vcpu->arch.qpr[rt]); in kvmppc_sync_qpr()
155 static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) in kvmppc_inject_pf() argument
158 u64 msr = kvmppc_get_msr(vcpu); in kvmppc_inject_pf()
162 kvmppc_set_msr(vcpu, msr); in kvmppc_inject_pf()
163 kvmppc_set_dar(vcpu, eaddr); in kvmppc_inject_pf()
168 kvmppc_set_dsisr(vcpu, dsisr); in kvmppc_inject_pf()
169 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); in kvmppc_inject_pf()
172 static int kvmppc_emulate_fpr_load(struct kvm_vcpu *vcpu, in kvmppc_emulate_fpr_load() argument
184 r = kvmppc_ld(vcpu, &addr, len, tmp, true); in kvmppc_emulate_fpr_load()
[all …]
Dbook3s_pr_papr.c23 static unsigned long get_pteg_addr(struct kvm_vcpu *vcpu, long pte_index) in get_pteg_addr() argument
25 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); in get_pteg_addr()
36 static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) in kvmppc_h_pr_enter() argument
38 long flags = kvmppc_get_gpr(vcpu, 4); in kvmppc_h_pr_enter()
39 long pte_index = kvmppc_get_gpr(vcpu, 5); in kvmppc_h_pr_enter()
47 pteg_addr = get_pteg_addr(vcpu, pte_index); in kvmppc_h_pr_enter()
49 mutex_lock(&vcpu->kvm->arch.hpt_mutex); in kvmppc_h_pr_enter()
70 hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6)); in kvmppc_h_pr_enter()
71 hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7)); in kvmppc_h_pr_enter()
76 kvmppc_set_gpr(vcpu, 4, pte_index | i); in kvmppc_h_pr_enter()
[all …]
/arch/arm64/kvm/
Ddebug.c36 static void save_guest_debug_regs(struct kvm_vcpu *vcpu) in save_guest_debug_regs() argument
38 u64 val = vcpu_read_sys_reg(vcpu, MDSCR_EL1); in save_guest_debug_regs()
40 vcpu->arch.guest_debug_preserved.mdscr_el1 = val; in save_guest_debug_regs()
43 vcpu->arch.guest_debug_preserved.mdscr_el1); in save_guest_debug_regs()
46 static void restore_guest_debug_regs(struct kvm_vcpu *vcpu) in restore_guest_debug_regs() argument
48 u64 val = vcpu->arch.guest_debug_preserved.mdscr_el1; in restore_guest_debug_regs()
50 vcpu_write_sys_reg(vcpu, val, MDSCR_EL1); in restore_guest_debug_regs()
53 vcpu_read_sys_reg(vcpu, MDSCR_EL1)); in restore_guest_debug_regs()
83 static void kvm_arm_setup_mdcr_el2(struct kvm_vcpu *vcpu) in kvm_arm_setup_mdcr_el2() argument
89 vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK; in kvm_arm_setup_mdcr_el2()
[all …]
Dhandle_exit.c29 static void kvm_handle_guest_serror(struct kvm_vcpu *vcpu, u32 esr) in kvm_handle_guest_serror() argument
32 kvm_inject_vabt(vcpu); in kvm_handle_guest_serror()
35 static int handle_hvc(struct kvm_vcpu *vcpu) in handle_hvc() argument
39 trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), in handle_hvc()
40 kvm_vcpu_hvc_get_imm(vcpu)); in handle_hvc()
41 vcpu->stat.hvc_exit_stat++; in handle_hvc()
43 ret = kvm_hvc_call_handler(vcpu); in handle_hvc()
45 vcpu_set_reg(vcpu, 0, ~0UL); in handle_hvc()
52 static int handle_smc(struct kvm_vcpu *vcpu) in handle_smc() argument
62 vcpu_set_reg(vcpu, 0, ~0UL); in handle_smc()
[all …]
Dreset.c72 static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) in kvm_vcpu_enable_sve() argument
77 vcpu->arch.sve_max_vl = kvm_sve_max_vl; in kvm_vcpu_enable_sve()
84 vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE; in kvm_vcpu_enable_sve()
93 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu) in kvm_vcpu_finalize_sve() argument
98 vl = vcpu->arch.sve_max_vl; in kvm_vcpu_finalize_sve()
113 vcpu->arch.sve_state = buf; in kvm_vcpu_finalize_sve()
114 vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED; in kvm_vcpu_finalize_sve()
118 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature) in kvm_arm_vcpu_finalize() argument
122 if (!vcpu_has_sve(vcpu)) in kvm_arm_vcpu_finalize()
125 if (kvm_arm_vcpu_sve_finalized(vcpu)) in kvm_arm_vcpu_finalize()
[all …]
Dinject_fault.c17 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) in inject_abt64() argument
19 unsigned long cpsr = *vcpu_cpsr(vcpu); in inject_abt64()
20 bool is_aarch32 = vcpu_mode_is_32bit(vcpu); in inject_abt64()
23 vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | in inject_abt64()
27 vcpu_write_sys_reg(vcpu, addr, FAR_EL1); in inject_abt64()
33 if (kvm_vcpu_trap_il_is32bit(vcpu)) in inject_abt64()
48 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1); in inject_abt64()
51 static void inject_undef64(struct kvm_vcpu *vcpu) in inject_undef64() argument
55 vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 | in inject_undef64()
63 if (kvm_vcpu_trap_il_is32bit(vcpu)) in inject_undef64()
[all …]
/arch/s390/kvm/
Dpriv.c34 static int handle_ri(struct kvm_vcpu *vcpu) in handle_ri() argument
36 vcpu->stat.instruction_ri++; in handle_ri()
38 if (test_kvm_facility(vcpu->kvm, 64)) { in handle_ri()
39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)"); in handle_ri()
40 vcpu->arch.sie_block->ecb3 |= ECB3_RI; in handle_ri()
41 kvm_s390_retry_instr(vcpu); in handle_ri()
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); in handle_ri()
47 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu) in kvm_s390_handle_aa() argument
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4) in kvm_s390_handle_aa()
50 return handle_ri(vcpu); in kvm_s390_handle_aa()
[all …]
Dintercept.c25 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu) in kvm_s390_get_ilen() argument
27 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block; in kvm_s390_get_ilen()
30 switch (vcpu->arch.sie_block->icptcode) { in kvm_s390_get_ilen()
37 ilen = insn_length(vcpu->arch.sie_block->ipa >> 8); in kvm_s390_get_ilen()
47 ilen = vcpu->arch.sie_block->pgmilc & 0x6; in kvm_s390_get_ilen()
53 static int handle_stop(struct kvm_vcpu *vcpu) in handle_stop() argument
55 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; in handle_stop()
59 vcpu->stat.exit_stop_request++; in handle_stop()
62 if (kvm_s390_vcpu_has_irq(vcpu, 1)) in handle_stop()
68 stop_pending = kvm_s390_is_stop_irq_pending(vcpu); in handle_stop()
[all …]
Ddiag.c20 static int diag_release_pages(struct kvm_vcpu *vcpu) in diag_release_pages() argument
23 unsigned long prefix = kvm_s390_get_prefix(vcpu); in diag_release_pages()
25 start = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4]; in diag_release_pages()
26 end = vcpu->run->s.regs.gprs[vcpu->arch.sie_block->ipa & 0xf] + PAGE_SIZE; in diag_release_pages()
27 vcpu->stat.diagnose_10++; in diag_release_pages()
31 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); in diag_release_pages()
33 VCPU_EVENT(vcpu, 5, "diag release pages %lX %lX", start, end); in diag_release_pages()
40 gmap_discard(vcpu->arch.gmap, start, end); in diag_release_pages()
48 gmap_discard(vcpu->arch.gmap, start, prefix); in diag_release_pages()
50 gmap_discard(vcpu->arch.gmap, 0, PAGE_SIZE); in diag_release_pages()
[all …]
Dkvm-s390.h24 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & ECB_TE)) argument
26 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1)) argument
60 static inline void kvm_s390_set_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_set_cpuflags() argument
62 atomic_or(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_set_cpuflags()
65 static inline void kvm_s390_clear_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_clear_cpuflags() argument
67 atomic_andnot(flags, &vcpu->arch.sie_block->cpuflags); in kvm_s390_clear_cpuflags()
70 static inline bool kvm_s390_test_cpuflags(struct kvm_vcpu *vcpu, u32 flags) in kvm_s390_test_cpuflags() argument
72 return (atomic_read(&vcpu->arch.sie_block->cpuflags) & flags) == flags; in kvm_s390_test_cpuflags()
75 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) in is_vcpu_stopped() argument
77 return kvm_s390_test_cpuflags(vcpu, CPUSTAT_STOPPED); in is_vcpu_stopped()
[all …]
Dguestdbg.c59 static void enable_all_hw_bp(struct kvm_vcpu *vcpu) in enable_all_hw_bp() argument
62 u64 *cr9 = &vcpu->arch.sie_block->gcr[9]; in enable_all_hw_bp()
63 u64 *cr10 = &vcpu->arch.sie_block->gcr[10]; in enable_all_hw_bp()
64 u64 *cr11 = &vcpu->arch.sie_block->gcr[11]; in enable_all_hw_bp()
67 if (vcpu->arch.guestdbg.nr_hw_bp <= 0 || in enable_all_hw_bp()
68 vcpu->arch.guestdbg.hw_bp_info == NULL) in enable_all_hw_bp()
79 for (i = 0; i < vcpu->arch.guestdbg.nr_hw_bp; i++) { in enable_all_hw_bp()
80 start = vcpu->arch.guestdbg.hw_bp_info[i].addr; in enable_all_hw_bp()
81 len = vcpu->arch.guestdbg.hw_bp_info[i].len; in enable_all_hw_bp()
99 static void enable_all_hw_wp(struct kvm_vcpu *vcpu) in enable_all_hw_wp() argument
[all …]
/arch/x86/kvm/
Dkvm_cache_regs.h13 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
15 return vcpu->arch.regs[VCPU_REGS_##uname]; \
17 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
20 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
40 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
43 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
46 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
49 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
52 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
55 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
[all …]
Dx86.h50 static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) in kvm_clear_exception_queue() argument
52 vcpu->arch.exception.pending = false; in kvm_clear_exception_queue()
53 vcpu->arch.exception.injected = false; in kvm_clear_exception_queue()
56 static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, in kvm_queue_interrupt() argument
59 vcpu->arch.interrupt.injected = true; in kvm_queue_interrupt()
60 vcpu->arch.interrupt.soft = soft; in kvm_queue_interrupt()
61 vcpu->arch.interrupt.nr = vector; in kvm_queue_interrupt()
64 static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) in kvm_clear_interrupt_queue() argument
66 vcpu->arch.interrupt.injected = false; in kvm_clear_interrupt_queue()
69 static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) in kvm_event_needs_reinjection() argument
[all …]
Dx86.c88 ((struct kvm_vcpu *)(ctxt)->vcpu)
106 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
107 static void process_nmi(struct kvm_vcpu *vcpu);
108 static void process_smi(struct kvm_vcpu *vcpu);
109 static void enter_smm(struct kvm_vcpu *vcpu);
110 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
111 static void store_regs(struct kvm_vcpu *vcpu);
112 static int sync_regs(struct kvm_vcpu *vcpu);
262 static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr, in kvm_msr_ignored_check() argument
293 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
[all …]
/arch/mips/kvm/
Demulate.c41 static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, in kvm_compute_return_epc() argument
46 struct kvm_vcpu_arch *arch = &vcpu->arch; in kvm_compute_return_epc()
57 err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); in kvm_compute_return_epc()
244 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) in update_pc() argument
249 err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, in update_pc()
250 &vcpu->arch.pc); in update_pc()
254 vcpu->arch.pc += 4; in update_pc()
257 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); in update_pc()
273 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) in kvm_get_badinstr() argument
276 *out = vcpu->arch.host_cp0_badinstr; in kvm_get_badinstr()
[all …]

123456789