Lines Matching full:vcpu
103 ((struct kvm_vcpu *)(ctxt)->vcpu)
125 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
126 static void process_nmi(struct kvm_vcpu *vcpu);
127 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
128 static void store_regs(struct kvm_vcpu *vcpu);
129 static int sync_regs(struct kvm_vcpu *vcpu);
130 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
132 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
133 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
272 STATS_DESC_COUNTER(VCPU, pf_taken),
273 STATS_DESC_COUNTER(VCPU, pf_fixed),
274 STATS_DESC_COUNTER(VCPU, pf_emulate),
275 STATS_DESC_COUNTER(VCPU, pf_spurious),
276 STATS_DESC_COUNTER(VCPU, pf_fast),
277 STATS_DESC_COUNTER(VCPU, pf_mmio_spte_created),
278 STATS_DESC_COUNTER(VCPU, pf_guest),
279 STATS_DESC_COUNTER(VCPU, tlb_flush),
280 STATS_DESC_COUNTER(VCPU, invlpg),
281 STATS_DESC_COUNTER(VCPU, exits),
282 STATS_DESC_COUNTER(VCPU, io_exits),
283 STATS_DESC_COUNTER(VCPU, mmio_exits),
284 STATS_DESC_COUNTER(VCPU, signal_exits),
285 STATS_DESC_COUNTER(VCPU, irq_window_exits),
286 STATS_DESC_COUNTER(VCPU, nmi_window_exits),
287 STATS_DESC_COUNTER(VCPU, l1d_flush),
288 STATS_DESC_COUNTER(VCPU, halt_exits),
289 STATS_DESC_COUNTER(VCPU, request_irq_exits),
290 STATS_DESC_COUNTER(VCPU, irq_exits),
291 STATS_DESC_COUNTER(VCPU, host_state_reload),
292 STATS_DESC_COUNTER(VCPU, fpu_reload),
293 STATS_DESC_COUNTER(VCPU, insn_emulation),
294 STATS_DESC_COUNTER(VCPU, insn_emulation_fail),
295 STATS_DESC_COUNTER(VCPU, hypercalls),
296 STATS_DESC_COUNTER(VCPU, irq_injections),
297 STATS_DESC_COUNTER(VCPU, nmi_injections),
298 STATS_DESC_COUNTER(VCPU, req_event),
299 STATS_DESC_COUNTER(VCPU, nested_run),
300 STATS_DESC_COUNTER(VCPU, directed_yield_attempted),
301 STATS_DESC_COUNTER(VCPU, directed_yield_successful),
302 STATS_DESC_COUNTER(VCPU, preemption_reported),
303 STATS_DESC_COUNTER(VCPU, preemption_other),
304 STATS_DESC_IBOOLEAN(VCPU, guest_mode),
305 STATS_DESC_COUNTER(VCPU, notify_window_exits),
355 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
359 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
474 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
476 return vcpu->arch.apic_base; in kvm_get_apic_base()
479 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) in kvm_get_apic_mode() argument
481 return kvm_apic_mode(kvm_get_apic_base(vcpu)); in kvm_get_apic_mode()
485 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
487 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); in kvm_set_apic_base()
489 u64 reserved_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu) | 0x2ff | in kvm_set_apic_base()
490 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
501 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
502 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
573 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu, in kvm_deliver_exception_payload() argument
586 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
603 vcpu->arch.dr6 |= DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
604 vcpu->arch.dr6 |= ex->payload; in kvm_deliver_exception_payload()
605 vcpu->arch.dr6 ^= ex->payload & DR6_ACTIVE_LOW; in kvm_deliver_exception_payload()
613 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
616 vcpu->arch.cr2 = ex->payload; in kvm_deliver_exception_payload()
625 static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vector, in kvm_queue_exception_vmexit() argument
629 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit; in kvm_queue_exception_vmexit()
640 /* Forcibly leave the nested mode in cases like a vCPU reset */
641 static void kvm_leave_nested(struct kvm_vcpu *vcpu) in kvm_leave_nested() argument
643 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_leave_nested()
646 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
653 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
662 if (!reinject && is_guest_mode(vcpu) && in kvm_multiple_exception()
663 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, nr, error_code)) { in kvm_multiple_exception()
664 kvm_queue_exception_vmexit(vcpu, nr, has_error, error_code, in kvm_multiple_exception()
669 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
679 WARN_ON_ONCE(kvm_is_exception_pending(vcpu)); in kvm_multiple_exception()
680 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
690 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
691 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
693 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
694 vcpu->arch.exception.vector = nr; in kvm_multiple_exception()
695 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
696 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
697 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
698 if (!is_guest_mode(vcpu)) in kvm_multiple_exception()
699 kvm_deliver_exception_payload(vcpu, in kvm_multiple_exception()
700 &vcpu->arch.exception); in kvm_multiple_exception()
705 prev_nr = vcpu->arch.exception.vector; in kvm_multiple_exception()
708 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
719 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
720 vcpu->arch.exception.pending = false; in kvm_multiple_exception()
722 kvm_queue_exception_e(vcpu, DF_VECTOR, 0); in kvm_multiple_exception()
731 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
733 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); in kvm_queue_exception()
737 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
739 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); in kvm_requeue_exception()
743 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_p() argument
746 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); in kvm_queue_exception_p()
750 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_e_p() argument
753 kvm_multiple_exception(vcpu, nr, true, error_code, in kvm_queue_exception_e_p()
757 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
760 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
762 return kvm_skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
768 static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err) in complete_emulated_insn_gp() argument
771 kvm_inject_gp(vcpu, 0); in complete_emulated_insn_gp()
775 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP | in complete_emulated_insn_gp()
779 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
781 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
787 if (is_guest_mode(vcpu) && fault->async_page_fault) in kvm_inject_page_fault()
788 kvm_queue_exception_vmexit(vcpu, PF_VECTOR, in kvm_inject_page_fault()
792 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
796 void kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, in kvm_inject_emulated_page_fault() argument
802 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
803 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
811 kvm_mmu_invalidate_addr(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
814 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
818 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
820 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
821 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
824 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
826 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); in kvm_queue_exception_e()
830 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
832 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); in kvm_requeue_exception_e()
840 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
842 if (static_call(kvm_x86_get_cpl)(vcpu) <= required_cpl) in kvm_require_cpl()
844 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
848 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
850 if ((dr != 4 && dr != 5) || !kvm_is_cr4_bit_set(vcpu, X86_CR4_DE)) in kvm_require_dr()
853 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
858 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) in pdptr_rsvd_bits() argument
860 return vcpu->arch.reserved_gpa_bits | rsvd_bits(5, 8) | rsvd_bits(1, 2); in pdptr_rsvd_bits()
866 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) in load_pdptrs() argument
868 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in load_pdptrs()
879 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn), in load_pdptrs()
885 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(real_gpa), pdpte, in load_pdptrs()
892 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { in load_pdptrs()
902 kvm_mmu_free_roots(vcpu->kvm, mmu, KVM_MMU_ROOT_CURRENT); in load_pdptrs()
905 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in load_pdptrs()
906 kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu); in load_pdptrs()
907 vcpu->arch.pdptrs_from_userspace = false; in load_pdptrs()
913 static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_is_valid_cr0() argument
926 return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); in kvm_is_valid_cr0()
929 void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) in kvm_post_set_cr0() argument
944 kvm_init_mmu(vcpu); in kvm_post_set_cr0()
950 kvm_clear_async_pf_completion_queue(vcpu); in kvm_post_set_cr0()
951 kvm_async_pf_hash_reset(vcpu); in kvm_post_set_cr0()
958 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_post_set_cr0()
962 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr0()
965 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_post_set_cr0()
966 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_post_set_cr0()
967 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_post_set_cr0()
971 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
973 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
975 if (!kvm_is_valid_cr0(vcpu, cr0)) in kvm_set_cr0()
984 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
988 if (!is_pae(vcpu)) in kvm_set_cr0()
990 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
995 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
996 is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) && in kvm_set_cr0()
997 !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
1001 (is_64_bit_mode(vcpu) || kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE))) in kvm_set_cr0()
1004 static_call(kvm_x86_set_cr0)(vcpu, cr0); in kvm_set_cr0()
1006 kvm_post_set_cr0(vcpu, old_cr0, cr0); in kvm_set_cr0()
1012 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
1014 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
1018 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_guest_xsave_state() argument
1020 if (vcpu->arch.guest_state_protected) in kvm_load_guest_xsave_state()
1023 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_guest_xsave_state()
1025 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
1026 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
1028 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && in kvm_load_guest_xsave_state()
1029 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
1030 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
1034 vcpu->arch.pkru != vcpu->arch.host_pkru && in kvm_load_guest_xsave_state()
1035 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_guest_xsave_state()
1036 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) in kvm_load_guest_xsave_state()
1037 write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
1041 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_host_xsave_state() argument
1043 if (vcpu->arch.guest_state_protected) in kvm_load_host_xsave_state()
1047 ((vcpu->arch.xcr0 & XFEATURE_MASK_PKRU) || in kvm_load_host_xsave_state()
1048 kvm_is_cr4_bit_set(vcpu, X86_CR4_PKE))) { in kvm_load_host_xsave_state()
1049 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
1050 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
1051 write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
1054 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_host_xsave_state()
1056 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
1059 if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && in kvm_load_host_xsave_state()
1060 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
1068 static inline u64 kvm_guest_supported_xfd(struct kvm_vcpu *vcpu) in kvm_guest_supported_xfd() argument
1070 return vcpu->arch.guest_supported_xcr0 & XFEATURE_MASK_USER_DYNAMIC; in kvm_guest_supported_xfd()
1074 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
1077 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
1093 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
1112 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
1115 kvm_update_cpuid_runtime(vcpu); in __kvm_set_xcr()
1119 int kvm_emulate_xsetbv(struct kvm_vcpu *vcpu) in kvm_emulate_xsetbv() argument
1122 if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || in kvm_emulate_xsetbv()
1123 __kvm_set_xcr(vcpu, kvm_rcx_read(vcpu), kvm_read_edx_eax(vcpu))) { in kvm_emulate_xsetbv()
1124 kvm_inject_gp(vcpu, 0); in kvm_emulate_xsetbv()
1128 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_xsetbv()
1132 bool __kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in __kvm_is_valid_cr4() argument
1137 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in __kvm_is_valid_cr4()
1144 static bool kvm_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_is_valid_cr4() argument
1146 return __kvm_is_valid_cr4(vcpu, cr4) && in kvm_is_valid_cr4()
1147 static_call(kvm_x86_is_valid_cr4)(vcpu, cr4); in kvm_is_valid_cr4()
1150 void kvm_post_set_cr4(struct kvm_vcpu *vcpu, unsigned long old_cr4, unsigned long cr4) in kvm_post_set_cr4() argument
1153 kvm_mmu_reset_context(vcpu); in kvm_post_set_cr4()
1165 kvm_mmu_unload(vcpu); in kvm_post_set_cr4()
1177 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_post_set_cr4()
1187 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_post_set_cr4()
1192 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
1194 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
1196 if (!kvm_is_valid_cr4(vcpu, cr4)) in kvm_set_cr4()
1199 if (is_long_mode(vcpu)) { in kvm_set_cr4()
1204 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
1206 && !load_pdptrs(vcpu, kvm_read_cr3(vcpu))) in kvm_set_cr4()
1211 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
1215 static_call(kvm_x86_set_cr4)(vcpu, cr4); in kvm_set_cr4()
1217 kvm_post_set_cr4(vcpu, old_cr4, cr4); in kvm_set_cr4()
1223 static void kvm_invalidate_pcid(struct kvm_vcpu *vcpu, unsigned long pcid) in kvm_invalidate_pcid() argument
1225 struct kvm_mmu *mmu = vcpu->arch.mmu; in kvm_invalidate_pcid()
1237 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_invalidate_pcid()
1246 if (kvm_get_active_pcid(vcpu) == pcid) { in kvm_invalidate_pcid()
1247 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); in kvm_invalidate_pcid()
1248 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_invalidate_pcid()
1256 if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) in kvm_invalidate_pcid()
1260 if (kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd) == pcid) in kvm_invalidate_pcid()
1263 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free); in kvm_invalidate_pcid()
1266 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
1271 if (kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)) { in kvm_set_cr3()
1279 if (cr3 == kvm_read_cr3(vcpu) && !is_pae_paging(vcpu)) in kvm_set_cr3()
1285 * the current vCPU mode is accurate. in kvm_set_cr3()
1287 if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) in kvm_set_cr3()
1290 if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) in kvm_set_cr3()
1293 if (cr3 != kvm_read_cr3(vcpu)) in kvm_set_cr3()
1294 kvm_mmu_new_pgd(vcpu, cr3); in kvm_set_cr3()
1296 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1297 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in kvm_set_cr3()
1309 kvm_invalidate_pcid(vcpu, pcid); in kvm_set_cr3()
1315 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
1319 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
1320 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
1322 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1327 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
1329 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
1330 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
1332 return vcpu->arch.cr8; in kvm_get_cr8()
1336 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
1340 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1342 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1346 void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
1350 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1351 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1353 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1354 static_call(kvm_x86_set_dr7)(vcpu, dr7); in kvm_update_dr7()
1355 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1357 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1361 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
1365 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) in kvm_dr6_fixed()
1368 if (!guest_cpuid_has(vcpu, X86_FEATURE_BUS_LOCK_DETECT)) in kvm_dr6_fixed()
1373 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
1375 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_set_dr()
1379 vcpu->arch.db[array_index_nospec(dr, size)] = val; in kvm_set_dr()
1380 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_set_dr()
1381 vcpu->arch.eff_db[dr] = val; in kvm_set_dr()
1387 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in kvm_set_dr()
1393 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in kvm_set_dr()
1394 kvm_update_dr7(vcpu); in kvm_set_dr()
1402 void kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
1404 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1408 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1412 *val = vcpu->arch.dr6; in kvm_get_dr()
1416 *val = vcpu->arch.dr7; in kvm_get_dr()
1422 int kvm_emulate_rdpmc(struct kvm_vcpu *vcpu) in kvm_emulate_rdpmc() argument
1424 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdpmc()
1427 if (kvm_pmu_rdpmc(vcpu, ecx, &data)) { in kvm_emulate_rdpmc()
1428 kvm_inject_gp(vcpu, 0); in kvm_emulate_rdpmc()
1432 kvm_rax_write(vcpu, (u32)data); in kvm_emulate_rdpmc()
1433 kvm_rdx_write(vcpu, data >> 32); in kvm_emulate_rdpmc()
1434 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_rdpmc()
1590 * patch, are immutable once the vCPU model is defined.
1702 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr_feature() argument
1725 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument
1727 if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS)) in __kvm_valid_efer()
1730 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer()
1733 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer()
1737 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) in __kvm_valid_efer()
1740 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer()
1746 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
1751 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer()
1755 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_efer() argument
1757 u64 old_efer = vcpu->arch.efer; in set_efer()
1765 if (!__kvm_valid_efer(vcpu, efer)) in set_efer()
1768 if (is_paging(vcpu) && in set_efer()
1769 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1774 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1776 r = static_call(kvm_x86_set_efer)(vcpu, efer); in set_efer()
1783 kvm_mmu_reset_context(vcpu); in set_efer()
1794 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) in kvm_msr_allowed() argument
1798 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1843 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, in __kvm_set_msr() argument
1854 if (is_noncanonical_address(data, vcpu)) in __kvm_set_msr()
1871 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); in __kvm_set_msr()
1878 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_set_msr()
1879 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) in __kvm_set_msr()
1891 if (guest_cpuid_is_intel(vcpu) && (data >> 32) != 0) in __kvm_set_msr()
1902 return static_call(kvm_x86_set_msr)(vcpu, &msr); in __kvm_set_msr()
1905 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_set_msr_ignored_check() argument
1908 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); in kvm_set_msr_ignored_check()
1923 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in __kvm_get_msr() argument
1935 !guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) && in __kvm_get_msr()
1936 !guest_cpuid_has(vcpu, X86_FEATURE_RDPID)) in __kvm_get_msr()
1944 ret = static_call(kvm_x86_get_msr)(vcpu, &msr); in __kvm_get_msr()
1950 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_get_msr_ignored_check() argument
1953 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); in kvm_get_msr_ignored_check()
1965 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr_with_filter() argument
1967 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) in kvm_get_msr_with_filter()
1969 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr_with_filter()
1972 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr_with_filter() argument
1974 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) in kvm_set_msr_with_filter()
1976 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr_with_filter()
1979 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr() argument
1981 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr()
1985 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr() argument
1987 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr()
1991 static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu) in complete_userspace_rdmsr() argument
1993 if (!vcpu->run->msr.error) { in complete_userspace_rdmsr()
1994 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_userspace_rdmsr()
1995 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_userspace_rdmsr()
1999 static int complete_emulated_msr_access(struct kvm_vcpu *vcpu) in complete_emulated_msr_access() argument
2001 return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error); in complete_emulated_msr_access()
2004 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) in complete_emulated_rdmsr() argument
2006 complete_userspace_rdmsr(vcpu); in complete_emulated_rdmsr()
2007 return complete_emulated_msr_access(vcpu); in complete_emulated_rdmsr()
2010 static int complete_fast_msr_access(struct kvm_vcpu *vcpu) in complete_fast_msr_access() argument
2012 return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error); in complete_fast_msr_access()
2015 static int complete_fast_rdmsr(struct kvm_vcpu *vcpu) in complete_fast_rdmsr() argument
2017 complete_userspace_rdmsr(vcpu); in complete_fast_rdmsr()
2018 return complete_fast_msr_access(vcpu); in complete_fast_rdmsr()
2033 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, in kvm_msr_user_space() argument
2035 int (*completion)(struct kvm_vcpu *vcpu), in kvm_msr_user_space() argument
2041 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
2044 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
2045 vcpu->run->msr.error = 0; in kvm_msr_user_space()
2046 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
2047 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
2048 vcpu->run->msr.index = index; in kvm_msr_user_space()
2049 vcpu->run->msr.data = data; in kvm_msr_user_space()
2050 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
2055 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) in kvm_emulate_rdmsr() argument
2057 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr()
2061 r = kvm_get_msr_with_filter(vcpu, ecx, &data); in kvm_emulate_rdmsr()
2066 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
2067 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
2070 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0, in kvm_emulate_rdmsr()
2076 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); in kvm_emulate_rdmsr()
2080 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) in kvm_emulate_wrmsr() argument
2082 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr()
2083 u64 data = kvm_read_edx_eax(vcpu); in kvm_emulate_wrmsr()
2086 r = kvm_set_msr_with_filter(vcpu, ecx, data); in kvm_emulate_wrmsr()
2092 if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data, in kvm_emulate_wrmsr()
2101 return static_call(kvm_x86_complete_emulated_msr)(vcpu, r); in kvm_emulate_wrmsr()
2105 int kvm_emulate_as_nop(struct kvm_vcpu *vcpu) in kvm_emulate_as_nop() argument
2107 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_as_nop()
2110 int kvm_emulate_invd(struct kvm_vcpu *vcpu) in kvm_emulate_invd() argument
2113 return kvm_emulate_as_nop(vcpu); in kvm_emulate_invd()
2117 int kvm_handle_invalid_op(struct kvm_vcpu *vcpu) in kvm_handle_invalid_op() argument
2119 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_handle_invalid_op()
2125 static int kvm_emulate_monitor_mwait(struct kvm_vcpu *vcpu, const char *insn) in kvm_emulate_monitor_mwait() argument
2127 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS) && in kvm_emulate_monitor_mwait()
2128 !guest_cpuid_has(vcpu, X86_FEATURE_MWAIT)) in kvm_emulate_monitor_mwait()
2129 return kvm_handle_invalid_op(vcpu); in kvm_emulate_monitor_mwait()
2132 return kvm_emulate_as_nop(vcpu); in kvm_emulate_monitor_mwait()
2134 int kvm_emulate_mwait(struct kvm_vcpu *vcpu) in kvm_emulate_mwait() argument
2136 return kvm_emulate_monitor_mwait(vcpu, "MWAIT"); in kvm_emulate_mwait()
2140 int kvm_emulate_monitor(struct kvm_vcpu *vcpu) in kvm_emulate_monitor() argument
2142 return kvm_emulate_monitor_mwait(vcpu, "MONITOR"); in kvm_emulate_monitor()
2146 static inline bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) in kvm_vcpu_exit_request() argument
2149 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
2160 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_x2apic_icr_irqoff() argument
2162 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
2169 return kvm_x2apic_icr_write(vcpu->arch.apic, data); in handle_fastpath_set_x2apic_icr_irqoff()
2174 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_tscdeadline() argument
2176 if (!kvm_can_use_hv_timer(vcpu)) in handle_fastpath_set_tscdeadline()
2179 kvm_set_lapic_tscdeadline_msr(vcpu, data); in handle_fastpath_set_tscdeadline()
2183 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) in handle_fastpath_set_msr_irqoff() argument
2185 u32 msr = kvm_rcx_read(vcpu); in handle_fastpath_set_msr_irqoff()
2189 kvm_vcpu_srcu_read_lock(vcpu); in handle_fastpath_set_msr_irqoff()
2193 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2194 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
2195 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
2200 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
2201 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
2202 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
2213 kvm_vcpu_srcu_read_unlock(vcpu); in handle_fastpath_set_msr_irqoff()
2222 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
2224 return kvm_get_msr_ignored_check(vcpu, index, data, true); in do_get_msr()
2227 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
2233 * not support modifying the guest vCPU model on the fly, e.g. changing in do_set_msr()
2238 if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) { in do_set_msr()
2239 if (do_get_msr(vcpu, index, &val) || *data != val) in do_set_msr()
2245 return kvm_set_msr_ignored_check(vcpu, index, *data, true); in do_set_msr()
2360 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, in kvm_write_system_time() argument
2363 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2365 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2367 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_write_system_time()
2372 vcpu->arch.time = system_time; in kvm_write_system_time()
2373 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_write_system_time()
2377 kvm_gpc_activate(&vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2380 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvm_write_system_time()
2433 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier);
2435 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
2441 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); in set_tsc_khz()
2448 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2449 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2467 kvm_vcpu_write_tsc_multiplier(vcpu, ratio); in set_tsc_khz()
2471 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) in kvm_set_tsc_khz() argument
2479 kvm_vcpu_write_tsc_multiplier(vcpu, kvm_caps.default_tsc_scaling_ratio); in kvm_set_tsc_khz()
2485 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2486 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2487 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2502 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); in kvm_set_tsc_khz()
2505 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
2507 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2508 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2509 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2510 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2521 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
2525 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2529 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2541 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
2543 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2544 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2574 static u64 kvm_compute_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_l1_tsc_offset() argument
2578 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio); in kvm_compute_l1_tsc_offset()
2583 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
2585 return vcpu->arch.l1_tsc_offset + in kvm_read_l1_tsc()
2586 kvm_scale_tsc(host_tsc, vcpu->arch.l1_tsc_scaling_ratio); in kvm_read_l1_tsc()
2615 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset) in kvm_vcpu_write_tsc_offset() argument
2617 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in kvm_vcpu_write_tsc_offset()
2618 vcpu->arch.l1_tsc_offset, in kvm_vcpu_write_tsc_offset()
2621 vcpu->arch.l1_tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2628 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_offset()
2629 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset( in kvm_vcpu_write_tsc_offset()
2631 static_call(kvm_x86_get_l2_tsc_offset)(vcpu), in kvm_vcpu_write_tsc_offset()
2632 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_offset()
2634 vcpu->arch.tsc_offset = l1_offset; in kvm_vcpu_write_tsc_offset()
2636 static_call(kvm_x86_write_tsc_offset)(vcpu); in kvm_vcpu_write_tsc_offset()
2639 static void kvm_vcpu_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 l1_multiplier) in kvm_vcpu_write_tsc_multiplier() argument
2641 vcpu->arch.l1_tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2644 if (is_guest_mode(vcpu)) in kvm_vcpu_write_tsc_multiplier()
2645 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier( in kvm_vcpu_write_tsc_multiplier()
2647 static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu)); in kvm_vcpu_write_tsc_multiplier()
2649 vcpu->arch.tsc_scaling_ratio = l1_multiplier; in kvm_vcpu_write_tsc_multiplier()
2652 static_call(kvm_x86_write_tsc_multiplier)(vcpu); in kvm_vcpu_write_tsc_multiplier()
2670 * offset for the vcpu and tracks the TSC matching generation that the vcpu
2673 static void __kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 offset, u64 tsc, in __kvm_synchronize_tsc() argument
2676 struct kvm *kvm = vcpu->kvm; in __kvm_synchronize_tsc()
2686 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in __kvm_synchronize_tsc()
2689 vcpu->arch.last_guest_tsc = tsc; in __kvm_synchronize_tsc()
2691 kvm_vcpu_write_tsc_offset(vcpu, offset); in __kvm_synchronize_tsc()
2708 } else if (vcpu->arch.this_tsc_generation != kvm->arch.cur_tsc_generation) { in __kvm_synchronize_tsc()
2712 /* Keep track of which generation this VCPU has synchronized to */ in __kvm_synchronize_tsc()
2713 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in __kvm_synchronize_tsc()
2714 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in __kvm_synchronize_tsc()
2715 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in __kvm_synchronize_tsc()
2717 kvm_track_tsc_matching(vcpu); in __kvm_synchronize_tsc()
2720 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) in kvm_synchronize_tsc() argument
2722 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2729 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2733 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2736 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2743 nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2744 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2762 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2766 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2768 offset = kvm_compute_l1_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2773 __kvm_synchronize_tsc(vcpu, offset, data, ns, matched); in kvm_synchronize_tsc()
2777 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
2780 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2781 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest()
2784 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
2786 if (vcpu->arch.l1_tsc_scaling_ratio != kvm_caps.default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2789 vcpu->arch.l1_tsc_scaling_ratio); in adjust_tsc_offset_host()
2790 adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
3008 struct kvm_vcpu *vcpu; in kvm_end_pvclock_update() local
3013 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
3014 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_end_pvclock_update()
3017 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_end_pvclock_update()
3018 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_end_pvclock_update()
3104 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_guest_pvclock() local
3121 * This VCPU is paused, but it's legal for a guest to read another in kvm_setup_guest_pvclock()
3122 * VCPU's kvmclock, so we really have to follow the specification where in kvm_setup_guest_pvclock()
3127 guest_hv_clock->version = vcpu->hv_clock.version = (guest_hv_clock->version + 1) | 1; in kvm_setup_guest_pvclock()
3131 vcpu->hv_clock.flags |= (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_guest_pvclock()
3133 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_guest_pvclock()
3134 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_guest_pvclock()
3135 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_guest_pvclock()
3138 memcpy(guest_hv_clock, &vcpu->hv_clock, sizeof(*guest_hv_clock)); in kvm_setup_guest_pvclock()
3141 guest_hv_clock->version = ++vcpu->hv_clock.version; in kvm_setup_guest_pvclock()
3146 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_guest_pvclock()
3153 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
3195 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
3201 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
3217 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
3219 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
3220 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
3221 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
3225 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
3226 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
3227 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
3234 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
3236 if (vcpu->pv_time.active) in kvm_guest_time_update()
3237 kvm_setup_guest_pvclock(v, &vcpu->pv_time, 0); in kvm_guest_time_update()
3238 if (vcpu->xen.vcpu_info_cache.active) in kvm_guest_time_update()
3239 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_info_cache, in kvm_guest_time_update()
3241 if (vcpu->xen.vcpu_time_info_cache.active) in kvm_guest_time_update()
3242 kvm_setup_guest_pvclock(v, &vcpu->xen.vcpu_time_info_cache, 0); in kvm_guest_time_update()
3243 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
3248 * kvmclock updates which are isolated to a given vcpu, such as
3249 * vcpu->cpu migration, should not allow system_timestamp from
3251 * correction applies to one vcpu's system_timestamp but not
3257 * The time for a remote vcpu to update its kvmclock is bound
3270 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
3272 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
3273 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
3274 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
3317 static bool can_set_mci_status(struct kvm_vcpu *vcpu) in can_set_mci_status() argument
3320 if (guest_cpuid_is_amd_compatible(vcpu)) in can_set_mci_status()
3321 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
3326 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_msr_mce() argument
3328 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
3336 vcpu->arch.mcg_status = data; in set_msr_mce()
3344 vcpu->arch.mcg_ctl = data; in set_msr_mce()
3358 vcpu->arch.mci_ctl2_banks[offset] = data; in set_msr_mce()
3385 data != 0 && !can_set_mci_status(vcpu)) in set_msr_mce()
3390 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
3398 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) in kvm_pv_async_pf_enabled() argument
3402 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
3405 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
3413 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && in kvm_pv_enable_async_pf()
3417 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && in kvm_pv_enable_async_pf()
3421 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf()
3424 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
3426 if (!kvm_pv_async_pf_enabled(vcpu)) { in kvm_pv_enable_async_pf()
3427 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
3428 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
3432 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
3436 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
3437 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
3439 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
3444 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf_int() argument
3450 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf_int()
3453 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3455 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3460 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
3462 kvm_gpc_deactivate(&vcpu->arch.pv_time); in kvmclock_reset()
3463 vcpu->arch.time = 0; in kvmclock_reset()
3466 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_all() argument
3468 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3469 static_call(kvm_x86_flush_tlb_all)(vcpu); in kvm_vcpu_flush_tlb_all()
3472 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_vcpu_flush_tlb_all()
3475 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_guest() argument
3477 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3486 kvm_mmu_sync_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3487 kvm_mmu_sync_prev_roots(vcpu); in kvm_vcpu_flush_tlb_guest()
3490 static_call(kvm_x86_flush_tlb_guest)(vcpu); in kvm_vcpu_flush_tlb_guest()
3496 kvm_hv_vcpu_purge_flush_tlb(vcpu); in kvm_vcpu_flush_tlb_guest()
3500 static inline void kvm_vcpu_flush_tlb_current(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_current() argument
3502 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_current()
3503 static_call(kvm_x86_flush_tlb_current)(vcpu); in kvm_vcpu_flush_tlb_current()
3512 void kvm_service_local_tlb_flush_requests(struct kvm_vcpu *vcpu) in kvm_service_local_tlb_flush_requests() argument
3514 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) in kvm_service_local_tlb_flush_requests()
3515 kvm_vcpu_flush_tlb_current(vcpu); in kvm_service_local_tlb_flush_requests()
3517 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) in kvm_service_local_tlb_flush_requests()
3518 kvm_vcpu_flush_tlb_guest(vcpu); in kvm_service_local_tlb_flush_requests()
3522 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
3524 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3527 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3531 if (kvm_xen_msr_enabled(vcpu->kvm)) { in record_steal_time()
3532 kvm_xen_runstate_set_running(vcpu); in record_steal_time()
3536 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3539 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3542 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3550 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || in record_steal_time()
3560 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { in record_steal_time()
3579 vcpu->arch.st.preempted = 0; in record_steal_time()
3581 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3584 kvm_vcpu_flush_tlb_guest(vcpu); in record_steal_time()
3593 vcpu->arch.st.preempted = 0; in record_steal_time()
3607 vcpu->arch.st.last_steal; in record_steal_time()
3608 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3617 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in record_steal_time()
3632 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
3637 if (msr && msr == vcpu->kvm->arch.xen_hvm_config.msr) in kvm_set_msr_common()
3638 return kvm_xen_write_hypercall_page(vcpu, data); in kvm_set_msr_common()
3653 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3658 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3668 * disallows changing feature MSRs after the vCPU has run; PMU in kvm_set_msr_common()
3669 * refresh will bug the VM if called after the vCPU has run. in kvm_set_msr_common()
3671 if (vcpu->arch.perf_capabilities == data) in kvm_set_msr_common()
3674 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3675 kvm_pmu_refresh(vcpu); in kvm_set_msr_common()
3678 if (!msr_info->host_initiated && !guest_has_pred_cmd_msr(vcpu)) in kvm_set_msr_common()
3690 !guest_cpuid_has(vcpu, X86_FEATURE_FLUSH_L1D)) in kvm_set_msr_common()
3701 return set_efer(vcpu, msr_info); in kvm_set_msr_common()
3709 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3711 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3717 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3725 vcpu->arch.pat = data; in kvm_set_msr_common()
3729 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
3731 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
3733 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
3735 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
3738 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { in kvm_set_msr_common()
3740 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3741 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3745 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
3747 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3751 u64 old_val = vcpu->arch.ia32_misc_enable_msr; in kvm_set_msr_common()
3763 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3765 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) in kvm_set_msr_common()
3767 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3768 kvm_update_cpuid_runtime(vcpu); in kvm_set_msr_common()
3770 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3777 vcpu->arch.smbase = data; in kvm_set_msr_common()
3780 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3784 kvm_synchronize_tsc(vcpu, data); in kvm_set_msr_common()
3786 u64 adj = kvm_compute_l1_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3787 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3788 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3793 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_set_msr_common()
3802 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3803 kvm_update_cpuid_runtime(vcpu); in kvm_set_msr_common()
3808 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3811 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3814 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3815 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3818 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3821 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
3822 kvm_write_wall_clock(vcpu->kvm, data, 0); in kvm_set_msr_common()
3825 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3828 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3831 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3834 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3837 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_set_msr_common()
3840 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
3844 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3847 if (kvm_pv_enable_async_pf_int(vcpu, data)) in kvm_set_msr_common()
3851 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3854 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3855 kvm_check_async_pf_completion(vcpu); in kvm_set_msr_common()
3859 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_set_msr_common()
3868 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3873 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
3877 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_set_msr_common()
3880 if (kvm_lapic_set_pv_eoi(vcpu, data, sizeof(u8))) in kvm_set_msr_common()
3885 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_set_msr_common()
3892 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3899 return set_msr_mce(vcpu, msr_info); in kvm_set_msr_common()
3905 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3906 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
3909 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3931 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
3937 kvm_pr_unimpl_wrmsr(vcpu, msr, data); in kvm_set_msr_common()
3940 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3942 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3945 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3947 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3952 cpuid_fault_enabled(vcpu))) in kvm_set_msr_common()
3954 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3959 !supports_cpuid_fault(vcpu))) in kvm_set_msr_common()
3961 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3966 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) in kvm_set_msr_common()
3969 if (data & ~kvm_guest_supported_xfd(vcpu)) in kvm_set_msr_common()
3972 fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data); in kvm_set_msr_common()
3976 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) in kvm_set_msr_common()
3979 if (data & ~kvm_guest_supported_xfd(vcpu)) in kvm_set_msr_common()
3982 vcpu->arch.guest_fpu.xfd_err = data; in kvm_set_msr_common()
3986 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3987 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
4003 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) in get_msr_mce() argument
4006 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
4016 data = vcpu->arch.mcg_cap; in get_msr_mce()
4021 data = vcpu->arch.mcg_ctl; in get_msr_mce()
4024 data = vcpu->arch.mcg_status; in get_msr_mce()
4035 data = vcpu->arch.mci_ctl2_banks[offset]; in get_msr_mce()
4044 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
4053 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
4091 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4092 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
4096 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
4100 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_get_msr_common()
4102 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
4106 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) in kvm_get_msr_common()
4108 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
4111 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
4126 offset = vcpu->arch.l1_tsc_offset; in kvm_get_msr_common()
4127 ratio = vcpu->arch.l1_tsc_scaling_ratio; in kvm_get_msr_common()
4129 offset = vcpu->arch.tsc_offset; in kvm_get_msr_common()
4130 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_get_msr_common()
4137 msr_info->data = vcpu->arch.pat; in kvm_get_msr_common()
4142 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4161 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
4164 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
4166 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
4169 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
4172 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
4177 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
4180 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
4189 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
4192 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
4195 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4198 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
4201 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
4204 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
4207 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4210 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
4213 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
4216 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_get_msr_common()
4219 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
4222 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
4225 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
4228 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
4234 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_get_msr_common()
4237 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
4240 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_get_msr_common()
4243 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
4246 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_get_msr_common()
4249 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
4258 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
4262 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_get_msr_common()
4264 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
4288 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
4305 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
4307 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
4310 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
4312 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
4316 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
4318 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
4321 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
4324 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
4329 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) in kvm_get_msr_common()
4332 msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd; in kvm_get_msr_common()
4336 !guest_cpuid_has(vcpu, X86_FEATURE_XFD)) in kvm_get_msr_common()
4339 msr_info->data = vcpu->arch.guest_fpu.xfd_err; in kvm_get_msr_common()
4343 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
4344 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
4367 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
4369 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
4375 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
4386 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
4387 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
4411 r = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
4428 static int kvm_ioctl_get_supported_hv_cpuid(struct kvm_vcpu *vcpu, in kvm_ioctl_get_supported_hv_cpuid() argument
4438 r = kvm_get_hv_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_ioctl_get_supported_hv_cpuid()
4804 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
4806 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4809 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
4812 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
4814 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4815 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4816 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4820 static_call(kvm_x86_vcpu_load)(vcpu, cpu); in kvm_arch_vcpu_load()
4823 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4826 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4827 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4828 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4829 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4832 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4833 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4834 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4839 u64 offset = kvm_compute_l1_tsc_offset(vcpu, in kvm_arch_vcpu_load()
4840 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4841 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
4842 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4845 if (kvm_lapic_hv_timer_in_use(vcpu)) in kvm_arch_vcpu_load()
4846 kvm_lapic_restart_hv_timer(vcpu); in kvm_arch_vcpu_load()
4850 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4852 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4853 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4854 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4855 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); in kvm_arch_vcpu_load()
4856 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4859 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
4862 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) in kvm_steal_time_set_preempted() argument
4864 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
4868 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
4871 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
4874 * when this is true, for example allowing the vCPU to be marked in kvm_steal_time_set_preempted()
4877 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4878 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
4882 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
4883 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4886 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4890 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
4893 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
4904 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4906 mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); in kvm_steal_time_set_preempted()
4909 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
4913 if (vcpu->preempted) { in kvm_arch_vcpu_put()
4914 if (!vcpu->arch.guest_state_protected) in kvm_arch_vcpu_put()
4915 vcpu->arch.preempted_in_kernel = !static_call(kvm_x86_get_cpl)(vcpu); in kvm_arch_vcpu_put()
4921 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4922 if (kvm_xen_msr_enabled(vcpu->kvm)) in kvm_arch_vcpu_put()
4923 kvm_xen_runstate_set_preempted(vcpu); in kvm_arch_vcpu_put()
4925 kvm_steal_time_set_preempted(vcpu); in kvm_arch_vcpu_put()
4926 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4929 static_call(kvm_x86_vcpu_put)(vcpu); in kvm_arch_vcpu_put()
4930 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4933 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
4936 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in kvm_vcpu_ioctl_get_lapic()
4938 return kvm_apic_get_state(vcpu, s); in kvm_vcpu_ioctl_get_lapic()
4941 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
4946 r = kvm_apic_set_state(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
4949 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
4954 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
4962 if (kvm_cpu_has_extint(vcpu)) in kvm_cpu_accept_dm_intr()
4966 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
4967 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
4970 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
4979 return (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4980 kvm_cpu_accept_dm_intr(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4981 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4982 !kvm_is_exception_pending(vcpu)); in kvm_vcpu_ready_for_interrupt_injection()
4985 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
4991 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4992 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4993 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
5001 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
5004 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
5007 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
5008 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
5012 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
5014 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
5019 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
5024 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
5028 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
5040 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
5043 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5046 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
5048 vcpu->arch.mci_ctl2_banks[bank] = 0; in kvm_vcpu_ioctl_x86_setup_mce()
5051 kvm_apic_after_set_mcg_cap(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
5053 static_call(kvm_x86_setup_mce)(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
5075 static int kvm_vcpu_x86_set_ucna(struct kvm_vcpu *vcpu, struct kvm_x86_mce *mce, u64* banks) in kvm_vcpu_x86_set_ucna() argument
5077 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_x86_set_ucna()
5082 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_x86_set_ucna()
5085 !(vcpu->arch.mci_ctl2_banks[mce->bank] & MCI_CTL2_CMCI_EN)) in kvm_vcpu_x86_set_ucna()
5088 if (lapic_in_kernel(vcpu)) in kvm_vcpu_x86_set_ucna()
5089 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTCMCI); in kvm_vcpu_x86_set_ucna()
5094 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
5097 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
5099 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
5107 return kvm_vcpu_x86_set_ucna(vcpu, mce, banks); in kvm_vcpu_ioctl_x86_set_mce()
5114 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
5123 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
5124 !kvm_is_cr4_bit_set(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
5125 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
5132 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
5134 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
5147 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
5152 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5155 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5156 process_smi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5166 if (vcpu->arch.exception_vmexit.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5167 !vcpu->arch.exception.pending && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5168 !vcpu->arch.exception.injected) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5169 ex = &vcpu->arch.exception_vmexit; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5171 ex = &vcpu->arch.exception; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5181 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
5183 kvm_deliver_exception_payload(vcpu, ex); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5201 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5211 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5212 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5213 events->interrupt.shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5215 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5216 events->nmi.pending = kvm_get_nr_pending_nmis(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5217 events->nmi.masked = static_call(kvm_x86_get_nmi_mask)(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5222 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5223 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
5225 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5227 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5232 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
5234 if (vcpu->kvm->arch.triple_fault_event) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
5235 events->triple_fault.pending = kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
5240 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
5252 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5270 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5273 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5283 vcpu->arch.exception_from_userspace = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5285 vcpu->arch.exception_vmexit.pending = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5287 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5288 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5289 vcpu->arch.exception.vector = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5290 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5291 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5292 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5293 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5295 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5296 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5297 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5299 static_call(kvm_x86_set_interrupt_shadow)(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
5302 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5304 vcpu->arch.nmi_pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5305 atomic_set(&vcpu->arch.nmi_queued, events->nmi.pending); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5307 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5309 static_call(kvm_x86_set_nmi_mask)(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5312 lapic_in_kernel(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5313 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5317 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5318 kvm_leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5319 kvm_smm_changed(vcpu, events->smi.smm); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5322 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5326 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5328 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
5337 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
5339 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5341 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5346 if (!vcpu->kvm->arch.triple_fault_event) in kvm_vcpu_ioctl_x86_set_vcpu_events()
5349 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5351 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5354 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
5359 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
5365 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
5366 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
5368 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
5371 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
5382 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
5383 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
5384 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
5385 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
5386 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
5392 static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave2() argument
5407 u64 supported_xcr0 = vcpu->arch.guest_supported_xcr0 | in kvm_vcpu_ioctl_x86_get_xsave2()
5410 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_get_xsave2()
5413 fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu, state, size, in kvm_vcpu_ioctl_x86_get_xsave2()
5414 supported_xcr0, vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_get_xsave2()
5417 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
5420 return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
5424 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
5427 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_vcpu_ioctl_x86_set_xsave()
5430 return fpu_copy_uabi_to_guest_fpstate(&vcpu->arch.guest_fpu, in kvm_vcpu_ioctl_x86_set_xsave()
5433 &vcpu->arch.pkru); in kvm_vcpu_ioctl_x86_set_xsave()
5436 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
5447 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
5450 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
5464 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
5479 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
5481 if (!vcpu->arch.pv_time.active) in kvm_set_guest_paused()
5483 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
5484 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
5488 static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_has_attr() argument
5504 static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_get_attr() argument
5516 if (put_user(vcpu->arch.l1_tsc_offset, uaddr)) in kvm_arch_tsc_get_attr()
5527 static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu, in kvm_arch_tsc_set_attr() argument
5531 struct kvm *kvm = vcpu->kvm; in kvm_arch_tsc_set_attr()
5549 matched = (vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5550 kvm->arch.last_tsc_khz == vcpu->arch.virtual_tsc_khz && in kvm_arch_tsc_set_attr()
5553 tsc = kvm_scale_tsc(rdtsc(), vcpu->arch.l1_tsc_scaling_ratio) + offset; in kvm_arch_tsc_set_attr()
5556 __kvm_synchronize_tsc(vcpu, offset, tsc, ns, matched); in kvm_arch_tsc_set_attr()
5569 static int kvm_vcpu_ioctl_device_attr(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_device_attr() argument
5584 r = kvm_arch_tsc_has_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5587 r = kvm_arch_tsc_get_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5590 r = kvm_arch_tsc_set_attr(vcpu, &attr); in kvm_vcpu_ioctl_device_attr()
5597 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
5614 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
5616 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
5621 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
5633 return static_call(kvm_x86_enable_l2_tlb_flush)(vcpu); in kvm_vcpu_ioctl_enable_cap()
5636 return kvm_hv_set_enforce_cpuid(vcpu, cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
5639 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
5640 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
5641 kvm_update_pv_runtime(vcpu); in kvm_vcpu_ioctl_enable_cap()
5652 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
5663 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
5669 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5677 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5688 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5696 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
5705 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
5709 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
5713 r = kvm_inject_smi(vcpu); in kvm_arch_vcpu_ioctl()
5723 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5733 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5744 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5755 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5756 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
5757 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5761 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5762 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
5763 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5772 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
5786 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
5791 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5792 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
5793 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5802 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
5811 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
5817 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
5832 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl()
5833 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
5834 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl()
5840 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
5857 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
5862 if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave)) in kvm_arch_vcpu_ioctl()
5870 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5879 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5887 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5892 int size = vcpu->arch.guest_fpu.uabi_size; in kvm_arch_vcpu_ioctl()
5899 kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size); in kvm_arch_vcpu_ioctl()
5915 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5931 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5947 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
5953 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5957 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
5966 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
5982 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
6026 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
6027 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
6028 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
6032 r = kvm_ioctl_get_supported_hv_cpuid(vcpu, argp); in kvm_arch_vcpu_ioctl()
6041 r = kvm_xen_vcpu_get_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
6052 r = kvm_xen_vcpu_set_attr(vcpu, &xva); in kvm_arch_vcpu_ioctl()
6061 __get_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
6075 r = __set_sregs2(vcpu, u.sregs2); in kvm_arch_vcpu_ioctl()
6081 r = kvm_vcpu_ioctl_device_attr(vcpu, ioctl, argp); in kvm_arch_vcpu_ioctl()
6089 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
6093 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
6267 struct kvm_vcpu *vcpu; in kvm_arch_sync_dirty_log() local
6270 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_sync_dirty_log()
6271 kvm_vcpu_kick(vcpu); in kvm_arch_sync_dirty_log()
6685 struct kvm_vcpu *vcpu; in kvm_arch_suspend_notifier() local
6690 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_suspend_notifier()
6691 if (!vcpu->arch.pv_time.active) in kvm_arch_suspend_notifier()
6694 ret = kvm_set_guest_paused(vcpu); in kvm_arch_suspend_notifier()
6696 kvm_err("Failed to pause guest VCPU%d: %d\n", in kvm_arch_suspend_notifier()
6697 vcpu->vcpu_id, ret); in kvm_arch_suspend_notifier()
7247 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
7255 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_write()
7256 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
7257 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
7268 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
7275 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_read()
7276 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
7278 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
7290 void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
7293 static_call(kvm_x86_set_segment)(vcpu, var, seg); in kvm_set_segment()
7296 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
7299 static_call(kvm_x86_get_segment)(vcpu, var, seg); in kvm_get_segment()
7302 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access, in translate_nested_gpa() argument
7305 struct kvm_mmu *mmu = vcpu->arch.mmu; in translate_nested_gpa()
7308 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
7312 t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception); in translate_nested_gpa()
7317 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
7320 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_read()
7322 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
7323 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
7327 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
7330 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_write()
7332 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
7334 return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
7339 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
7342 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_mmu_gva_to_gpa_system()
7344 return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
7348 struct kvm_vcpu *vcpu, u64 access, in kvm_read_guest_virt_helper() argument
7351 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_read_guest_virt_helper()
7356 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_read_guest_virt_helper()
7363 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
7383 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
7384 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fetch_guest_virt()
7385 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
7390 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
7398 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
7406 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, in kvm_read_guest_virt() argument
7410 u64 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
7419 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
7428 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std() local
7433 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) in emulator_read_std()
7436 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); in emulator_read_std()
7440 struct kvm_vcpu *vcpu, u64 access, in kvm_write_guest_virt_helper() argument
7443 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_write_guest_virt_helper()
7448 gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception); in kvm_write_guest_virt_helper()
7455 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_helper()
7473 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_write_std() local
7478 else if (static_call(kvm_x86_get_cpl)(vcpu) == 3) in emulator_write_std()
7481 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in emulator_write_std()
7485 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, in kvm_write_guest_virt_system() argument
7489 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
7491 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in kvm_write_guest_virt_system()
7496 static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type, in kvm_can_emulate_insn() argument
7499 return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type, in kvm_can_emulate_insn()
7503 int handle_ud(struct kvm_vcpu *vcpu) in handle_ud() argument
7511 if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0))) in handle_ud()
7515 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), in handle_ud()
7519 kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) & ~X86_EFLAGS_RF); in handle_ud()
7520 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); in handle_ud()
7524 return kvm_emulate_instruction(vcpu, emul_type); in handle_ud()
7528 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument
7535 if (vcpu_match_mmio_gpa(vcpu, gpa)) { in vcpu_is_mmio_gpa()
7543 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
7547 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vcpu_mmio_gva_to_gpa()
7548 u64 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
7556 if (vcpu_match_mmio_gva(vcpu, gva) && (!is_paging(vcpu) || in vcpu_mmio_gva_to_gpa()
7557 !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
7558 vcpu->arch.mmio_access, 0, access))) { in vcpu_mmio_gva_to_gpa()
7559 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
7565 *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
7570 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); in vcpu_mmio_gva_to_gpa()
7573 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
7578 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
7581 kvm_page_track_write(vcpu, gpa, val, bytes); in emulator_write_phys()
7586 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
7588 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
7590 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7592 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
7597 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
7599 if (vcpu->mmio_read_completed) { in read_prepare()
7601 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
7602 vcpu->mmio_read_completed = 0; in read_prepare()
7609 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
7612 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
7615 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
7618 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
7621 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
7624 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
7627 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
7634 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
7637 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
7639 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
7660 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
7667 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
7679 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); in emulator_read_write_onepage()
7681 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
7686 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
7692 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
7700 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
7701 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
7714 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
7719 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
7722 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
7730 vcpu, ops); in emulator_read_write()
7742 vcpu, ops); in emulator_read_write()
7746 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
7749 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
7751 vcpu->mmio_needed = 1; in emulator_read_write()
7752 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
7754 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
7755 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
7756 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
7757 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
7759 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
7792 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
7802 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
7820 hva = kvm_vcpu_gfn_to_hva(vcpu, gpa_to_gfn(gpa)); in emulator_cmpxchg_emulated()
7853 kvm_vcpu_mark_page_dirty(vcpu, gpa_to_gfn(gpa)); in emulator_cmpxchg_emulated()
7858 kvm_page_track_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
7868 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
7875 WARN_ON_ONCE(vcpu->arch.pio.count); in emulator_pio_in_out()
7878 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, port, size, data); in emulator_pio_in_out()
7880 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, port, size, data); in emulator_pio_in_out()
7900 vcpu->arch.pio.port = port; in emulator_pio_in_out()
7901 vcpu->arch.pio.in = in; in emulator_pio_in_out()
7902 vcpu->arch.pio.count = count; in emulator_pio_in_out()
7903 vcpu->arch.pio.size = size; in emulator_pio_in_out()
7906 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_out()
7908 memcpy(vcpu->arch.pio_data, data, size * count); in emulator_pio_in_out()
7910 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
7911 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
7912 vcpu->run->io.size = size; in emulator_pio_in_out()
7913 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
7914 vcpu->run->io.count = count; in emulator_pio_in_out()
7915 vcpu->run->io.port = port; in emulator_pio_in_out()
7919 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, in emulator_pio_in() argument
7922 int r = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in()
7929 static void complete_emulator_pio_in(struct kvm_vcpu *vcpu, void *val) in complete_emulator_pio_in() argument
7931 int size = vcpu->arch.pio.size; in complete_emulator_pio_in()
7932 unsigned int count = vcpu->arch.pio.count; in complete_emulator_pio_in()
7933 memcpy(val, vcpu->arch.pio_data, size * count); in complete_emulator_pio_in()
7934 trace_kvm_pio(KVM_PIO_IN, vcpu->arch.pio.port, size, count, vcpu->arch.pio_data); in complete_emulator_pio_in()
7935 vcpu->arch.pio.count = 0; in complete_emulator_pio_in()
7942 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_in_emulated() local
7943 if (vcpu->arch.pio.count) { in emulator_pio_in_emulated()
7947 * can modify ECX before rerunning the vCPU. Ignore any such in emulator_pio_in_emulated()
7951 complete_emulator_pio_in(vcpu, val); in emulator_pio_in_emulated()
7955 return emulator_pio_in(vcpu, size, port, val, count); in emulator_pio_in_emulated()
7958 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_out() argument
7963 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out()
7973 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
7975 return static_call(kvm_x86_get_segment_base)(vcpu, seg); in get_segment_base()
7983 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
7985 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
7991 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
7992 on_each_cpu_mask(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
7995 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
8001 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
8003 kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
8004 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
8035 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
8040 value = kvm_read_cr0(vcpu); in emulator_get_cr()
8043 value = vcpu->arch.cr2; in emulator_get_cr()
8046 value = kvm_read_cr3(vcpu); in emulator_get_cr()
8049 value = kvm_read_cr4(vcpu); in emulator_get_cr()
8052 value = kvm_get_cr8(vcpu); in emulator_get_cr()
8064 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
8069 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
8072 vcpu->arch.cr2 = val; in emulator_set_cr()
8075 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
8078 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
8081 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
8162 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
8184 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
8191 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_msr_with_filter() local
8194 r = kvm_get_msr_with_filter(vcpu, msr_index, pdata); in emulator_get_msr_with_filter()
8199 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0, in emulator_get_msr_with_filter()
8214 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_msr_with_filter() local
8217 r = kvm_set_msr_with_filter(vcpu, msr_index, data); in emulator_set_msr_with_filter()
8222 if (kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data, in emulator_set_msr_with_filter()
8386 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
8388 u32 int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in toggle_interruptibility()
8399 static_call(kvm_x86_set_interrupt_shadow)(vcpu, mask); in toggle_interruptibility()
8401 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
8405 static void inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
8407 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
8410 kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
8412 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
8415 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
8418 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) in alloc_emulate_ctxt() argument
8424 pr_err("failed to allocate vcpu's emulator\n"); in alloc_emulate_ctxt()
8428 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
8430 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
8435 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
8437 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
8440 static_call(kvm_x86_get_cs_db_l_bits)(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
8443 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
8446 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
8447 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
8449 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
8458 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
8461 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
8463 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
8466 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
8474 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_inject_realmode_interrupt()
8477 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
8478 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
8483 static void prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in prepare_emulation_failure_exit() argument
8486 struct kvm_run *run = vcpu->run; in prepare_emulation_failure_exit()
8496 static_call(kvm_x86_get_exit_info)(vcpu, (u32 *)&info[0], &info[1], in prepare_emulation_failure_exit()
8534 static void prepare_emulation_ctxt_failure_exit(struct kvm_vcpu *vcpu) in prepare_emulation_ctxt_failure_exit() argument
8536 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in prepare_emulation_ctxt_failure_exit()
8538 prepare_emulation_failure_exit(vcpu, NULL, 0, ctxt->fetch.data, in prepare_emulation_ctxt_failure_exit()
8542 void __kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu, u64 *data, in __kvm_prepare_emulation_failure_exit() argument
8545 prepare_emulation_failure_exit(vcpu, data, ndata, NULL, 0); in __kvm_prepare_emulation_failure_exit()
8549 void kvm_prepare_emulation_failure_exit(struct kvm_vcpu *vcpu) in kvm_prepare_emulation_failure_exit() argument
8551 __kvm_prepare_emulation_failure_exit(vcpu, NULL, 0); in kvm_prepare_emulation_failure_exit()
8555 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) in handle_emulation_failure() argument
8557 struct kvm *kvm = vcpu->kvm; in handle_emulation_failure()
8559 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
8560 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
8563 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in handle_emulation_failure()
8569 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
8573 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
8575 if (!is_guest_mode(vcpu) && static_call(kvm_x86_get_cpl)(vcpu) == 0) { in handle_emulation_failure()
8576 prepare_emulation_ctxt_failure_exit(vcpu); in handle_emulation_failure()
8583 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in reexecute_instruction() argument
8592 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in reexecute_instruction()
8596 if (!vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8601 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in reexecute_instruction()
8617 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8629 if (vcpu->arch.mmu->root_role.direct) { in reexecute_instruction()
8632 write_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
8633 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
8634 write_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
8637 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8647 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
8660 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
8663 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
8664 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
8679 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
8684 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in retry_instruction()
8694 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
8695 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
8697 if (!vcpu->arch.mmu->root_role.direct) in retry_instruction()
8698 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in retry_instruction()
8700 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
8705 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
8706 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
8723 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) in kvm_vcpu_do_singlestep() argument
8725 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
8727 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
8729 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
8734 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); in kvm_vcpu_do_singlestep()
8738 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) in kvm_skip_emulated_instruction() argument
8740 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); in kvm_skip_emulated_instruction()
8743 r = static_call(kvm_x86_skip_emulated_instruction)(vcpu); in kvm_skip_emulated_instruction()
8747 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); in kvm_skip_emulated_instruction()
8758 r = kvm_vcpu_do_singlestep(vcpu); in kvm_skip_emulated_instruction()
8763 static bool kvm_is_code_breakpoint_inhibited(struct kvm_vcpu *vcpu) in kvm_is_code_breakpoint_inhibited() argument
8767 if (kvm_get_rflags(vcpu) & X86_EFLAGS_RF) in kvm_is_code_breakpoint_inhibited()
8775 shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu); in kvm_is_code_breakpoint_inhibited()
8777 guest_cpuid_is_intel(vcpu); in kvm_is_code_breakpoint_inhibited()
8780 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, in kvm_vcpu_check_code_breakpoint() argument
8803 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
8804 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
8805 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
8806 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
8808 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
8809 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
8821 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
8822 !kvm_is_code_breakpoint_inhibited(vcpu)) { in kvm_vcpu_check_code_breakpoint()
8823 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
8825 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
8826 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
8829 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); in kvm_vcpu_check_code_breakpoint()
8879 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, in x86_decode_emulated_instruction() argument
8882 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
8885 init_emulate_ctxt(vcpu); in x86_decode_emulated_instruction()
8889 trace_kvm_emulate_insn_start(vcpu); in x86_decode_emulated_instruction()
8890 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
8896 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in x86_emulate_instruction() argument
8900 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
8903 if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len))) in x86_emulate_instruction()
8906 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
8909 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
8916 if (kvm_vcpu_check_code_breakpoint(vcpu, emulation_type, &r)) in x86_emulate_instruction()
8919 r = x86_decode_emulated_instruction(vcpu, emulation_type, in x86_emulate_instruction()
8924 kvm_queue_exception(vcpu, UD_VECTOR); in x86_emulate_instruction()
8927 if (reexecute_instruction(vcpu, cr2_or_gpa, in x86_emulate_instruction()
8939 inject_emulated_exception(vcpu); in x86_emulate_instruction()
8942 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
8948 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in x86_emulate_instruction()
8969 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
8971 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
8980 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
8981 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
8991 if (vcpu->arch.mmu->root_role.direct) { in x86_emulate_instruction()
9006 if (reexecute_instruction(vcpu, cr2_or_gpa, emulation_type)) in x86_emulate_instruction()
9009 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
9013 WARN_ON_ONCE(vcpu->mmio_needed && !vcpu->mmio_is_write); in x86_emulate_instruction()
9014 vcpu->mmio_needed = false; in x86_emulate_instruction()
9016 inject_emulated_exception(vcpu); in x86_emulate_instruction()
9017 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
9018 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
9020 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
9023 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
9026 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
9027 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
9029 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
9032 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
9033 } else if (vcpu->arch.complete_userspace_io) { in x86_emulate_instruction()
9043 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu); in x86_emulate_instruction()
9044 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
9045 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
9054 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS); in x86_emulate_instruction()
9056 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS); in x86_emulate_instruction()
9057 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
9058 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
9059 r = kvm_vcpu_do_singlestep(vcpu); in x86_emulate_instruction()
9060 static_call_cond(kvm_x86_update_emulated_instruction)(vcpu); in x86_emulate_instruction()
9061 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
9071 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
9073 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
9078 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) in kvm_emulate_instruction() argument
9080 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
9084 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, in kvm_emulate_instruction_from_buffer() argument
9087 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
9091 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) in complete_fast_pio_out_port_0x7e() argument
9093 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
9097 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) in complete_fast_pio_out() argument
9099 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
9101 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
9104 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_out()
9107 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_out() argument
9110 unsigned long val = kvm_rax_read(vcpu); in kvm_fast_pio_out()
9111 int ret = emulator_pio_out(vcpu, size, port, &val, 1); in kvm_fast_pio_out()
9121 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
9122 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
9124 kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio_out()
9126 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
9127 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
9132 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) in complete_fast_pio_in() argument
9137 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
9139 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
9140 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
9145 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
9147 complete_emulator_pio_in(vcpu, &val); in complete_fast_pio_in()
9148 kvm_rax_write(vcpu, val); in complete_fast_pio_in()
9150 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_in()
9153 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_in() argument
9160 val = (size < 4) ? kvm_rax_read(vcpu) : 0; in kvm_fast_pio_in()
9162 ret = emulator_pio_in(vcpu, size, port, &val, 1); in kvm_fast_pio_in()
9164 kvm_rax_write(vcpu, val); in kvm_fast_pio_in()
9168 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
9169 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
9174 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) in kvm_fast_pio() argument
9179 ret = kvm_fast_pio_in(vcpu, size, port); in kvm_fast_pio()
9181 ret = kvm_fast_pio_out(vcpu, size, port); in kvm_fast_pio()
9182 return ret && kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio()
9241 struct kvm_vcpu *vcpu; in __kvmclock_cpufreq_notifier() local
9256 * the TSC for each VCPU. We must flag these local variables in __kvmclock_cpufreq_notifier()
9275 * anytime after the setting of the VCPU's request bit, the in __kvmclock_cpufreq_notifier()
9288 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvmclock_cpufreq_notifier()
9289 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
9291 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in __kvmclock_cpufreq_notifier()
9292 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
9372 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
9377 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
9378 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
9480 * vCPU's FPU state as a fxregs_state struct. in __kvm_x86_vendor_init()
9648 static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason) in __kvm_emulate_halt() argument
9651 * The vCPU has halted, e.g. executed HLT. Update the run state if the in __kvm_emulate_halt()
9653 * state and halt the vCPU. Exit to userspace if the local APIC is in __kvm_emulate_halt()
9657 ++vcpu->stat.halt_exits; in __kvm_emulate_halt()
9658 if (lapic_in_kernel(vcpu)) { in __kvm_emulate_halt()
9659 vcpu->arch.mp_state = state; in __kvm_emulate_halt()
9662 vcpu->run->exit_reason = reason; in __kvm_emulate_halt()
9667 int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_halt_noskip() argument
9669 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT); in kvm_emulate_halt_noskip()
9673 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
9675 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_halt()
9680 return kvm_emulate_halt_noskip(vcpu) && ret; in kvm_emulate_halt()
9684 int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu) in kvm_emulate_ap_reset_hold() argument
9686 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_ap_reset_hold()
9688 return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, in kvm_emulate_ap_reset_hold()
9694 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, in kvm_pv_clock_pairing() argument
9709 if (vcpu->arch.tsc_always_catchup) in kvm_pv_clock_pairing()
9717 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); in kvm_pv_clock_pairing()
9722 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
9731 * kvm_pv_kick_cpu_op: Kick a vcpu.
9733 * @apicid - apicid of vcpu to be kicked.
9757 bool kvm_vcpu_apicv_activated(struct kvm_vcpu *vcpu) in kvm_vcpu_apicv_activated() argument
9759 ulong vm_reasons = READ_ONCE(vcpu->kvm->arch.apicv_inhibit_reasons); in kvm_vcpu_apicv_activated()
9760 ulong vcpu_reasons = static_call(kvm_x86_vcpu_get_apicv_inhibit_reasons)(vcpu); in kvm_vcpu_apicv_activated()
9790 static void kvm_sched_yield(struct kvm_vcpu *vcpu, unsigned long dest_id) in kvm_sched_yield() argument
9795 vcpu->stat.directed_yield_attempted++; in kvm_sched_yield()
9801 map = rcu_dereference(vcpu->kvm->arch.apic_map); in kvm_sched_yield()
9804 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
9812 if (vcpu == target) in kvm_sched_yield()
9818 vcpu->stat.directed_yield_successful++; in kvm_sched_yield()
9824 static int complete_hypercall_exit(struct kvm_vcpu *vcpu) in complete_hypercall_exit() argument
9826 u64 ret = vcpu->run->hypercall.ret; in complete_hypercall_exit()
9828 if (!is_64_bit_hypercall(vcpu)) in complete_hypercall_exit()
9830 kvm_rax_write(vcpu, ret); in complete_hypercall_exit()
9831 ++vcpu->stat.hypercalls; in complete_hypercall_exit()
9832 return kvm_skip_emulated_instruction(vcpu); in complete_hypercall_exit()
9835 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
9840 if (kvm_xen_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
9841 return kvm_xen_hypercall(vcpu); in kvm_emulate_hypercall()
9843 if (kvm_hv_hypercall_enabled(vcpu)) in kvm_emulate_hypercall()
9844 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
9846 nr = kvm_rax_read(vcpu); in kvm_emulate_hypercall()
9847 a0 = kvm_rbx_read(vcpu); in kvm_emulate_hypercall()
9848 a1 = kvm_rcx_read(vcpu); in kvm_emulate_hypercall()
9849 a2 = kvm_rdx_read(vcpu); in kvm_emulate_hypercall()
9850 a3 = kvm_rsi_read(vcpu); in kvm_emulate_hypercall()
9854 op_64_bit = is_64_bit_hypercall(vcpu); in kvm_emulate_hypercall()
9863 if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { in kvm_emulate_hypercall()
9875 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) in kvm_emulate_hypercall()
9878 kvm_pv_kick_cpu_op(vcpu->kvm, a1); in kvm_emulate_hypercall()
9879 kvm_sched_yield(vcpu, a1); in kvm_emulate_hypercall()
9884 ret = kvm_pv_clock_pairing(vcpu, a0, a1); in kvm_emulate_hypercall()
9888 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) in kvm_emulate_hypercall()
9891 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
9894 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) in kvm_emulate_hypercall()
9897 kvm_sched_yield(vcpu, a0); in kvm_emulate_hypercall()
9904 if (!(vcpu->kvm->arch.hypercall_exit_enabled & (1 << KVM_HC_MAP_GPA_RANGE))) in kvm_emulate_hypercall()
9913 vcpu->run->exit_reason = KVM_EXIT_HYPERCALL; in kvm_emulate_hypercall()
9914 vcpu->run->hypercall.nr = KVM_HC_MAP_GPA_RANGE; in kvm_emulate_hypercall()
9915 vcpu->run->hypercall.args[0] = gpa; in kvm_emulate_hypercall()
9916 vcpu->run->hypercall.args[1] = npages; in kvm_emulate_hypercall()
9917 vcpu->run->hypercall.args[2] = attrs; in kvm_emulate_hypercall()
9918 vcpu->run->hypercall.flags = 0; in kvm_emulate_hypercall()
9920 vcpu->run->hypercall.flags |= KVM_EXIT_HYPERCALL_LONG_MODE; in kvm_emulate_hypercall()
9922 WARN_ON_ONCE(vcpu->run->hypercall.flags & KVM_EXIT_HYPERCALL_MBZ); in kvm_emulate_hypercall()
9923 vcpu->arch.complete_userspace_io = complete_hypercall_exit; in kvm_emulate_hypercall()
9933 kvm_rax_write(vcpu, ret); in kvm_emulate_hypercall()
9935 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
9936 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
9942 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
9944 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
9950 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_FIX_HYPERCALL_INSN)) { in emulator_fix_hypercall()
9957 static_call(kvm_x86_patch_hypercall)(vcpu, instruction); in emulator_fix_hypercall()
9963 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
9965 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
9966 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
9970 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
9972 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
9974 kvm_run->if_flag = static_call(kvm_x86_get_if_flag)(vcpu); in post_kvm_run_save()
9975 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
9976 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
9979 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
9980 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
9982 if (is_smm(vcpu)) in post_kvm_run_save()
9986 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
9993 if (!lapic_in_kernel(vcpu)) in update_cr8_intercept()
9996 if (vcpu->arch.apic->apicv_active) in update_cr8_intercept()
9999 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
10000 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
10007 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
10009 static_call(kvm_x86_update_cr8_intercept)(vcpu, tpr, max_irr); in update_cr8_intercept()
10013 int kvm_check_nested_events(struct kvm_vcpu *vcpu) in kvm_check_nested_events() argument
10015 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in kvm_check_nested_events()
10016 kvm_x86_ops.nested_ops->triple_fault(vcpu); in kvm_check_nested_events()
10020 return kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_check_nested_events()
10023 static void kvm_inject_exception(struct kvm_vcpu *vcpu) in kvm_inject_exception() argument
10026 * Suppress the error code if the vCPU is in Real Mode, as Real Mode in kvm_inject_exception()
10032 vcpu->arch.exception.has_error_code &= is_protmode(vcpu); in kvm_inject_exception()
10034 trace_kvm_inj_exception(vcpu->arch.exception.vector, in kvm_inject_exception()
10035 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
10036 vcpu->arch.exception.error_code, in kvm_inject_exception()
10037 vcpu->arch.exception.injected); in kvm_inject_exception()
10039 static_call(kvm_x86_inject_exception)(vcpu); in kvm_inject_exception()
10081 static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu, in kvm_check_and_inject_events() argument
10092 if (is_guest_mode(vcpu)) in kvm_check_and_inject_events()
10093 r = kvm_check_nested_events(vcpu); in kvm_check_and_inject_events()
10120 if (vcpu->arch.exception.injected) in kvm_check_and_inject_events()
10121 kvm_inject_exception(vcpu); in kvm_check_and_inject_events()
10122 else if (kvm_is_exception_pending(vcpu)) in kvm_check_and_inject_events()
10124 else if (vcpu->arch.nmi_injected) in kvm_check_and_inject_events()
10125 static_call(kvm_x86_inject_nmi)(vcpu); in kvm_check_and_inject_events()
10126 else if (vcpu->arch.interrupt.injected) in kvm_check_and_inject_events()
10127 static_call(kvm_x86_inject_irq)(vcpu, true); in kvm_check_and_inject_events()
10134 WARN_ON_ONCE(vcpu->arch.exception.injected && in kvm_check_and_inject_events()
10135 vcpu->arch.exception.pending); in kvm_check_and_inject_events()
10154 WARN_ON_ONCE(vcpu->arch.exception_vmexit.injected || in kvm_check_and_inject_events()
10155 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10162 can_inject = !kvm_event_needs_reinjection(vcpu); in kvm_check_and_inject_events()
10164 if (vcpu->arch.exception.pending) { in kvm_check_and_inject_events()
10175 if (exception_type(vcpu->arch.exception.vector) == EXCPT_FAULT) in kvm_check_and_inject_events()
10176 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in kvm_check_and_inject_events()
10179 if (vcpu->arch.exception.vector == DB_VECTOR) { in kvm_check_and_inject_events()
10180 kvm_deliver_exception_payload(vcpu, &vcpu->arch.exception); in kvm_check_and_inject_events()
10181 if (vcpu->arch.dr7 & DR7_GD) { in kvm_check_and_inject_events()
10182 vcpu->arch.dr7 &= ~DR7_GD; in kvm_check_and_inject_events()
10183 kvm_update_dr7(vcpu); in kvm_check_and_inject_events()
10187 kvm_inject_exception(vcpu); in kvm_check_and_inject_events()
10189 vcpu->arch.exception.pending = false; in kvm_check_and_inject_events()
10190 vcpu->arch.exception.injected = true; in kvm_check_and_inject_events()
10196 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) in kvm_check_and_inject_events()
10211 if (vcpu->arch.smi_pending) { in kvm_check_and_inject_events()
10212 r = can_inject ? static_call(kvm_x86_smi_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10216 vcpu->arch.smi_pending = false; in kvm_check_and_inject_events()
10217 ++vcpu->arch.smi_count; in kvm_check_and_inject_events()
10218 enter_smm(vcpu); in kvm_check_and_inject_events()
10221 static_call(kvm_x86_enable_smi_window)(vcpu); in kvm_check_and_inject_events()
10225 if (vcpu->arch.nmi_pending) { in kvm_check_and_inject_events()
10226 r = can_inject ? static_call(kvm_x86_nmi_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10230 --vcpu->arch.nmi_pending; in kvm_check_and_inject_events()
10231 vcpu->arch.nmi_injected = true; in kvm_check_and_inject_events()
10232 static_call(kvm_x86_inject_nmi)(vcpu); in kvm_check_and_inject_events()
10234 WARN_ON(static_call(kvm_x86_nmi_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10236 if (vcpu->arch.nmi_pending) in kvm_check_and_inject_events()
10237 static_call(kvm_x86_enable_nmi_window)(vcpu); in kvm_check_and_inject_events()
10240 if (kvm_cpu_has_injectable_intr(vcpu)) { in kvm_check_and_inject_events()
10241 r = can_inject ? static_call(kvm_x86_interrupt_allowed)(vcpu, true) : -EBUSY; in kvm_check_and_inject_events()
10245 int irq = kvm_cpu_get_interrupt(vcpu); in kvm_check_and_inject_events()
10248 kvm_queue_interrupt(vcpu, irq, false); in kvm_check_and_inject_events()
10249 static_call(kvm_x86_inject_irq)(vcpu, false); in kvm_check_and_inject_events()
10250 WARN_ON(static_call(kvm_x86_interrupt_allowed)(vcpu, true) < 0); in kvm_check_and_inject_events()
10253 if (kvm_cpu_has_injectable_intr(vcpu)) in kvm_check_and_inject_events()
10254 static_call(kvm_x86_enable_irq_window)(vcpu); in kvm_check_and_inject_events()
10257 if (is_guest_mode(vcpu) && in kvm_check_and_inject_events()
10259 kvm_x86_ops.nested_ops->has_events(vcpu, true)) in kvm_check_and_inject_events()
10265 * to the VMCS/VMCB. Queueing a new exception can put the vCPU into an in kvm_check_and_inject_events()
10270 * vCPU into an infinite loop. Triple fault can be queued when running in kvm_check_and_inject_events()
10274 WARN_ON_ONCE(vcpu->arch.exception.pending || in kvm_check_and_inject_events()
10275 vcpu->arch.exception_vmexit.pending); in kvm_check_and_inject_events()
10286 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
10292 * incoming NMIs as quickly as bare metal, e.g. if the vCPU is in process_nmi()
10300 if (static_call(kvm_x86_get_nmi_mask)(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
10307 * tracked in vcpu->arch.nmi_pending. in process_nmi()
10309 if (static_call(kvm_x86_is_vnmi_pending)(vcpu)) in process_nmi()
10312 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
10313 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
10315 if (vcpu->arch.nmi_pending && in process_nmi()
10316 (static_call(kvm_x86_set_vnmi_pending)(vcpu))) in process_nmi()
10317 vcpu->arch.nmi_pending--; in process_nmi()
10319 if (vcpu->arch.nmi_pending) in process_nmi()
10320 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
10324 int kvm_get_nr_pending_nmis(struct kvm_vcpu *vcpu) in kvm_get_nr_pending_nmis() argument
10326 return vcpu->arch.nmi_pending + in kvm_get_nr_pending_nmis()
10327 static_call(kvm_x86_is_vnmi_pending)(vcpu); in kvm_get_nr_pending_nmis()
10341 void __kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in __kvm_vcpu_update_apicv() argument
10343 struct kvm_lapic *apic = vcpu->arch.apic; in __kvm_vcpu_update_apicv()
10346 if (!lapic_in_kernel(vcpu)) in __kvm_vcpu_update_apicv()
10349 down_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10353 activate = kvm_vcpu_apicv_activated(vcpu) && in __kvm_vcpu_update_apicv()
10354 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED); in __kvm_vcpu_update_apicv()
10360 kvm_apic_update_apicv(vcpu); in __kvm_vcpu_update_apicv()
10361 static_call(kvm_x86_refresh_apicv_exec_ctrl)(vcpu); in __kvm_vcpu_update_apicv()
10370 kvm_make_request(KVM_REQ_EVENT, vcpu); in __kvm_vcpu_update_apicv()
10374 up_read(&vcpu->kvm->arch.apicv_update_lock); in __kvm_vcpu_update_apicv()
10378 static void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in kvm_vcpu_update_apicv() argument
10380 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_update_apicv()
10385 * deleted if any vCPU has xAPIC virtualization and x2APIC enabled, but in kvm_vcpu_update_apicv()
10390 * the vCPU would incorrectly be able to access the vAPIC page via MMIO in kvm_vcpu_update_apicv()
10394 if (apic_x2apic_mode(vcpu->arch.apic) && in kvm_vcpu_update_apicv()
10396 kvm_inhibit_apic_access_page(vcpu); in kvm_vcpu_update_apicv()
10398 __kvm_vcpu_update_apicv(vcpu); in kvm_vcpu_update_apicv()
10454 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
10456 if (!kvm_apic_present(vcpu)) in vcpu_scan_ioapic()
10459 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
10461 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_scan_ioapic()
10463 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
10464 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10465 else if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
10466 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
10468 if (is_guest_mode(vcpu)) in vcpu_scan_ioapic()
10469 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
10471 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in vcpu_scan_ioapic()
10474 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vcpu_load_eoi_exitmap() argument
10478 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
10481 if (to_hv_vcpu(vcpu)) { in vcpu_load_eoi_exitmap()
10483 vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
10484 to_hv_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
10485 static_call_cond(kvm_x86_load_eoi_exitmap)(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
10490 vcpu, (u64 *)vcpu->arch.ioapic_handled_vectors); in vcpu_load_eoi_exitmap()
10498 static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
10500 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
10503 static_call_cond(kvm_x86_set_apic_access_page_addr)(vcpu); in kvm_vcpu_reload_apic_access_page()
10506 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) in __kvm_request_immediate_exit() argument
10508 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
10518 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
10522 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
10523 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
10528 if (kvm_request_pending(vcpu)) { in vcpu_enter_guest()
10529 if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) { in vcpu_enter_guest()
10534 if (kvm_dirty_ring_check_request(vcpu)) { in vcpu_enter_guest()
10539 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { in vcpu_enter_guest()
10540 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
10545 if (kvm_check_request(KVM_REQ_MMU_FREE_OBSOLETE_ROOTS, vcpu)) in vcpu_enter_guest()
10546 kvm_mmu_free_obsolete_roots(vcpu); in vcpu_enter_guest()
10547 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
10548 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
10549 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
10550 kvm_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
10551 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
10552 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
10553 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
10554 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
10558 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
10559 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
10560 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) in vcpu_enter_guest()
10561 kvm_mmu_load_pgd(vcpu); in vcpu_enter_guest()
10568 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in vcpu_enter_guest()
10569 kvm_vcpu_flush_tlb_all(vcpu); in vcpu_enter_guest()
10571 kvm_service_local_tlb_flush_requests(vcpu); in vcpu_enter_guest()
10575 * flushing fails. Note, Hyper-V's flushing is per-vCPU, but in vcpu_enter_guest()
10579 if (kvm_check_request(KVM_REQ_HV_TLB_FLUSH, vcpu) && in vcpu_enter_guest()
10580 kvm_hv_vcpu_flush_tlb(vcpu)) in vcpu_enter_guest()
10581 kvm_vcpu_flush_tlb_guest(vcpu); in vcpu_enter_guest()
10583 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
10584 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
10588 if (kvm_test_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
10589 if (is_guest_mode(vcpu)) in vcpu_enter_guest()
10590 kvm_x86_ops.nested_ops->triple_fault(vcpu); in vcpu_enter_guest()
10592 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
10593 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
10594 vcpu->mmio_needed = 0; in vcpu_enter_guest()
10599 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
10601 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
10605 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
10606 record_steal_time(vcpu); in vcpu_enter_guest()
10608 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
10609 process_smi(vcpu); in vcpu_enter_guest()
10611 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
10612 process_nmi(vcpu); in vcpu_enter_guest()
10613 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
10614 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
10615 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
10616 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
10617 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
10618 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
10619 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
10620 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
10621 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
10622 vcpu->run->eoi.vector = in vcpu_enter_guest()
10623 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
10628 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
10629 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
10630 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) in vcpu_enter_guest()
10631 vcpu_load_eoi_exitmap(vcpu); in vcpu_enter_guest()
10632 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
10633 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
10634 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
10635 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10636 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
10637 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10641 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
10642 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
10643 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
10644 vcpu->run->system_event.ndata = 0; in vcpu_enter_guest()
10648 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { in vcpu_enter_guest()
10649 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in vcpu_enter_guest()
10651 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
10652 vcpu->run->hyperv = hv_vcpu->exit; in vcpu_enter_guest()
10662 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) in vcpu_enter_guest()
10663 kvm_hv_process_stimers(vcpu); in vcpu_enter_guest()
10664 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) in vcpu_enter_guest()
10665 kvm_vcpu_update_apicv(vcpu); in vcpu_enter_guest()
10666 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) in vcpu_enter_guest()
10667 kvm_check_async_pf_completion(vcpu); in vcpu_enter_guest()
10668 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) in vcpu_enter_guest()
10669 static_call(kvm_x86_msr_filter_changed)(vcpu); in vcpu_enter_guest()
10671 if (kvm_check_request(KVM_REQ_UPDATE_CPU_DIRTY_LOGGING, vcpu)) in vcpu_enter_guest()
10672 static_call(kvm_x86_update_cpu_dirty_logging)(vcpu); in vcpu_enter_guest()
10675 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win || in vcpu_enter_guest()
10676 kvm_xen_has_interrupt(vcpu)) { in vcpu_enter_guest()
10677 ++vcpu->stat.req_event; in vcpu_enter_guest()
10678 r = kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
10683 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
10688 r = kvm_check_and_inject_events(vcpu, &req_immediate_exit); in vcpu_enter_guest()
10694 static_call(kvm_x86_enable_irq_window)(vcpu); in vcpu_enter_guest()
10696 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
10697 update_cr8_intercept(vcpu); in vcpu_enter_guest()
10698 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
10702 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
10709 static_call(kvm_x86_prepare_switch_to_guest)(vcpu); in vcpu_enter_guest()
10718 /* Store vcpu->apicv_active before vcpu->mode. */ in vcpu_enter_guest()
10719 smp_store_release(&vcpu->mode, IN_GUEST_MODE); in vcpu_enter_guest()
10721 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_enter_guest()
10732 * tables done while the VCPU is running. Please see the comment in vcpu_enter_guest()
10740 * target vCPU wasn't running). Do this regardless of the vCPU's APICv in vcpu_enter_guest()
10744 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
10745 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
10747 if (kvm_vcpu_exit_request(vcpu)) { in vcpu_enter_guest()
10748 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
10752 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_enter_guest()
10758 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
10759 static_call(kvm_x86_request_immediate_exit)(vcpu); in vcpu_enter_guest()
10766 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10767 wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err); in vcpu_enter_guest()
10769 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
10771 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
10772 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
10773 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
10774 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
10783 * Assert that vCPU vs. VM APICv state is consistent. An APICv in vcpu_enter_guest()
10788 WARN_ON_ONCE((kvm_vcpu_apicv_activated(vcpu) != kvm_vcpu_apicv_active(vcpu)) && in vcpu_enter_guest()
10789 (kvm_get_apic_mode(vcpu) != LAPIC_MODE_DISABLED)); in vcpu_enter_guest()
10791 exit_fastpath = static_call(kvm_x86_vcpu_run)(vcpu); in vcpu_enter_guest()
10795 if (kvm_lapic_enabled(vcpu)) in vcpu_enter_guest()
10796 static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu); in vcpu_enter_guest()
10798 if (unlikely(kvm_vcpu_exit_request(vcpu))) { in vcpu_enter_guest()
10804 ++vcpu->stat.exits; in vcpu_enter_guest()
10813 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
10814 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
10815 static_call(kvm_x86_sync_dirty_debug_regs)(vcpu); in vcpu_enter_guest()
10816 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
10817 kvm_update_dr7(vcpu); in vcpu_enter_guest()
10830 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
10831 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
10833 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
10841 if (vcpu->arch.xfd_no_write_intercept) in vcpu_enter_guest()
10844 static_call(kvm_x86_handle_exit_irqoff)(vcpu); in vcpu_enter_guest()
10846 if (vcpu->arch.guest_fpu.xfd_err) in vcpu_enter_guest()
10856 kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ); in vcpu_enter_guest()
10858 ++vcpu->stat.exits; in vcpu_enter_guest()
10860 kvm_after_interrupt(vcpu); in vcpu_enter_guest()
10874 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_enter_guest()
10880 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
10884 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
10885 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
10887 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
10888 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
10890 r = static_call(kvm_x86_handle_exit)(vcpu, exit_fastpath); in vcpu_enter_guest()
10895 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
10896 static_call(kvm_x86_cancel_injection)(vcpu); in vcpu_enter_guest()
10897 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
10898 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
10904 static inline int vcpu_block(struct kvm_vcpu *vcpu) in vcpu_block() argument
10908 if (!kvm_arch_vcpu_runnable(vcpu)) { in vcpu_block()
10911 * the guest's timer may be a break event for the vCPU, and the in vcpu_block()
10916 hv_timer = kvm_lapic_hv_timer_in_use(vcpu); in vcpu_block()
10918 kvm_lapic_switch_to_sw_timer(vcpu); in vcpu_block()
10920 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_block()
10921 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) in vcpu_block()
10922 kvm_vcpu_halt(vcpu); in vcpu_block()
10924 kvm_vcpu_block(vcpu); in vcpu_block()
10925 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_block()
10928 kvm_lapic_switch_to_hv_timer(vcpu); in vcpu_block()
10931 * If the vCPU is not runnable, a signal or another host event in vcpu_block()
10933 * vCPU's activity state. in vcpu_block()
10935 if (!kvm_arch_vcpu_runnable(vcpu)) in vcpu_block()
10945 if (is_guest_mode(vcpu)) { in vcpu_block()
10946 if (kvm_check_nested_events(vcpu) < 0) in vcpu_block()
10950 if (kvm_apic_accept_events(vcpu) < 0) in vcpu_block()
10952 switch(vcpu->arch.mp_state) { in vcpu_block()
10955 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
10956 vcpu->arch.mp_state = in vcpu_block()
10960 vcpu->arch.apf.halted = false; in vcpu_block()
10971 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
10973 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
10974 !vcpu->arch.apf.halted); in kvm_vcpu_running()
10978 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
10982 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
10986 * If another guest vCPU requests a PV TLB flush in the middle in vcpu_run()
10991 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
10992 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
10993 r = vcpu_enter_guest(vcpu); in vcpu_run()
10995 r = vcpu_block(vcpu); in vcpu_run()
11001 kvm_clear_request(KVM_REQ_UNBLOCK, vcpu); in vcpu_run()
11002 if (kvm_xen_has_pending_events(vcpu)) in vcpu_run()
11003 kvm_xen_inject_pending_events(vcpu); in vcpu_run()
11005 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
11006 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
11008 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
11009 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
11011 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
11012 ++vcpu->stat.request_irq_exits; in vcpu_run()
11017 kvm_vcpu_srcu_read_unlock(vcpu); in vcpu_run()
11018 r = xfer_to_guest_mode_handle_work(vcpu); in vcpu_run()
11019 kvm_vcpu_srcu_read_lock(vcpu); in vcpu_run()
11028 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
11030 return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
11033 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
11035 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
11037 return complete_emulated_io(vcpu); in complete_emulated_pio()
11058 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
11060 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
11064 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
11067 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
11069 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
11075 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
11083 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
11084 vcpu->mmio_needed = 0; in complete_emulated_mmio()
11087 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11089 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
11090 return complete_emulated_io(vcpu); in complete_emulated_mmio()
11095 if (vcpu->mmio_is_write) in complete_emulated_mmio()
11098 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
11099 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
11104 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
11107 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, true); in kvm_load_guest_fpu()
11112 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
11114 fpu_swap_kvm_fpstate(&vcpu->arch.guest_fpu, false); in kvm_put_guest_fpu()
11115 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
11119 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
11121 struct kvm_queued_exception *ex = &vcpu->arch.exception; in kvm_arch_vcpu_ioctl_run()
11122 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
11125 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
11126 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
11128 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
11130 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
11131 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
11140 * APIC timer to be active is if userspace stuffed vCPU state, in kvm_arch_vcpu_ioctl_run()
11141 * i.e. put the vCPU into a nonsensical state. Only an INIT in kvm_arch_vcpu_ioctl_run()
11142 * will transition the vCPU out of UNINITIALIZED (without more in kvm_arch_vcpu_ioctl_run()
11147 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
11148 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
11149 kvm_vcpu_srcu_read_lock(vcpu); in kvm_arch_vcpu_ioctl_run()
11151 if (kvm_apic_accept_events(vcpu) < 0) { in kvm_arch_vcpu_ioctl_run()
11159 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
11171 r = sync_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
11177 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
11178 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
11188 if (vcpu->arch.exception_from_userspace && is_guest_mode(vcpu) && in kvm_arch_vcpu_ioctl_run()
11189 kvm_x86_ops.nested_ops->is_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11191 kvm_queue_exception_vmexit(vcpu, ex->vector, in kvm_arch_vcpu_ioctl_run()
11197 vcpu->arch.exception_from_userspace = false; in kvm_arch_vcpu_ioctl_run()
11199 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
11200 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
11201 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
11202 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
11206 WARN_ON_ONCE(vcpu->arch.pio.count); in kvm_arch_vcpu_ioctl_run()
11207 WARN_ON_ONCE(vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
11215 r = static_call(kvm_x86_vcpu_pre_run)(vcpu); in kvm_arch_vcpu_ioctl_run()
11219 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
11222 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
11224 store_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
11225 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
11226 kvm_vcpu_srcu_read_unlock(vcpu); in kvm_arch_vcpu_ioctl_run()
11228 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
11229 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
11233 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __get_regs() argument
11235 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
11239 * back from emulation context to vcpu. Userspace shouldn't do in __get_regs()
11243 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
11244 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
11246 regs->rax = kvm_rax_read(vcpu); in __get_regs()
11247 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
11248 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
11249 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
11250 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
11251 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
11252 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
11253 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
11255 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
11256 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
11257 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
11258 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
11259 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
11260 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
11261 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
11262 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
11265 regs->rip = kvm_rip_read(vcpu); in __get_regs()
11266 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
11269 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
11271 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
11272 __get_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_get_regs()
11273 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
11277 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __set_regs() argument
11279 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
11280 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
11282 kvm_rax_write(vcpu, regs->rax); in __set_regs()
11283 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
11284 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
11285 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
11286 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
11287 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
11288 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
11289 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
11291 kvm_r8_write(vcpu, regs->r8); in __set_regs()
11292 kvm_r9_write(vcpu, regs->r9); in __set_regs()
11293 kvm_r10_write(vcpu, regs->r10); in __set_regs()
11294 kvm_r11_write(vcpu, regs->r11); in __set_regs()
11295 kvm_r12_write(vcpu, regs->r12); in __set_regs()
11296 kvm_r13_write(vcpu, regs->r13); in __set_regs()
11297 kvm_r14_write(vcpu, regs->r14); in __set_regs()
11298 kvm_r15_write(vcpu, regs->r15); in __set_regs()
11301 kvm_rip_write(vcpu, regs->rip); in __set_regs()
11302 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
11304 vcpu->arch.exception.pending = false; in __set_regs()
11305 vcpu->arch.exception_vmexit.pending = false; in __set_regs()
11307 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_regs()
11310 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
11312 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
11313 __set_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_set_regs()
11314 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
11318 static void __get_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs_common() argument
11322 if (vcpu->arch.guest_state_protected) in __get_sregs_common()
11325 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs_common()
11326 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs_common()
11327 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs_common()
11328 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs_common()
11329 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs_common()
11330 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs_common()
11332 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs_common()
11333 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs_common()
11335 static_call(kvm_x86_get_idt)(vcpu, &dt); in __get_sregs_common()
11338 static_call(kvm_x86_get_gdt)(vcpu, &dt); in __get_sregs_common()
11342 sregs->cr2 = vcpu->arch.cr2; in __get_sregs_common()
11343 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs_common()
11346 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs_common()
11347 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs_common()
11348 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs_common()
11349 sregs->efer = vcpu->arch.efer; in __get_sregs_common()
11350 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs_common()
11353 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
11355 __get_sregs_common(vcpu, sregs); in __get_sregs()
11357 if (vcpu->arch.guest_state_protected) in __get_sregs()
11360 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
11361 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
11365 static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __get_sregs2() argument
11369 __get_sregs_common(vcpu, (struct kvm_sregs *)sregs2); in __get_sregs2()
11371 if (vcpu->arch.guest_state_protected) in __get_sregs2()
11374 if (is_pae_paging(vcpu)) { in __get_sregs2()
11376 sregs2->pdptrs[i] = kvm_pdptr_read(vcpu, i); in __get_sregs2()
11381 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
11384 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
11385 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
11386 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
11390 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
11395 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11397 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11399 r = kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11404 if ((vcpu->arch.mp_state == KVM_MP_STATE_HALTED || in kvm_arch_vcpu_ioctl_get_mpstate()
11405 vcpu->arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) && in kvm_arch_vcpu_ioctl_get_mpstate()
11406 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
11409 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
11413 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11414 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
11418 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
11423 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11431 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl_set_mpstate()
11448 if ((!kvm_apic_init_sipi_allowed(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
11454 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
11455 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
11457 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
11458 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11462 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
11466 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
11469 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
11472 init_emulate_ctxt(vcpu); in kvm_task_switch()
11477 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
11478 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
11479 vcpu->run->internal.ndata = 0; in kvm_task_switch()
11483 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
11484 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
11489 static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_is_valid_sregs() argument
11499 if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) in kvm_is_valid_sregs()
11510 return kvm_is_valid_cr4(vcpu, sregs->cr4) && in kvm_is_valid_sregs()
11511 kvm_is_valid_cr0(vcpu, sregs->cr0); in kvm_is_valid_sregs()
11514 static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, in __set_sregs_common() argument
11521 if (!kvm_is_valid_sregs(vcpu, sregs)) in __set_sregs_common()
11526 if (kvm_set_apic_base(vcpu, &apic_base_msr)) in __set_sregs_common()
11529 if (vcpu->arch.guest_state_protected) in __set_sregs_common()
11534 static_call(kvm_x86_set_idt)(vcpu, &dt); in __set_sregs_common()
11537 static_call(kvm_x86_set_gdt)(vcpu, &dt); in __set_sregs_common()
11539 vcpu->arch.cr2 = sregs->cr2; in __set_sregs_common()
11540 *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs_common()
11541 vcpu->arch.cr3 = sregs->cr3; in __set_sregs_common()
11542 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in __set_sregs_common()
11543 static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); in __set_sregs_common()
11545 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs_common()
11547 *mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs_common()
11548 static_call(kvm_x86_set_efer)(vcpu, sregs->efer); in __set_sregs_common()
11550 *mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs_common()
11551 static_call(kvm_x86_set_cr0)(vcpu, sregs->cr0); in __set_sregs_common()
11552 vcpu->arch.cr0 = sregs->cr0; in __set_sregs_common()
11554 *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs_common()
11555 static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); in __set_sregs_common()
11558 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs_common()
11559 if (is_pae_paging(vcpu)) { in __set_sregs_common()
11560 load_pdptrs(vcpu, kvm_read_cr3(vcpu)); in __set_sregs_common()
11563 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs_common()
11566 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs_common()
11567 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs_common()
11568 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs_common()
11569 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs_common()
11570 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs_common()
11571 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs_common()
11573 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs_common()
11574 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs_common()
11576 update_cr8_intercept(vcpu); in __set_sregs_common()
11578 /* Older userspace won't unhalt the vcpu on reset. */ in __set_sregs_common()
11579 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs_common()
11581 !is_protmode(vcpu)) in __set_sregs_common()
11582 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs_common()
11587 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
11591 int ret = __set_sregs_common(vcpu, sregs, &mmu_reset_needed, true); in __set_sregs()
11597 kvm_mmu_reset_context(vcpu); in __set_sregs()
11604 kvm_queue_interrupt(vcpu, pending_vec, false); in __set_sregs()
11606 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_sregs()
11611 static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2) in __set_sregs2() argument
11622 if (valid_pdptrs && (!pae || vcpu->arch.guest_state_protected)) in __set_sregs2()
11625 ret = __set_sregs_common(vcpu, (struct kvm_sregs *)sregs2, in __set_sregs2()
11632 kvm_pdptr_write(vcpu, i, sregs2->pdptrs[i]); in __set_sregs2()
11634 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in __set_sregs2()
11636 vcpu->arch.pdptrs_from_userspace = true; in __set_sregs2()
11639 kvm_mmu_reset_context(vcpu); in __set_sregs2()
11643 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
11648 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
11649 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
11650 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
11657 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_guestdbg_update_apicv_inhibit() local
11665 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11666 if (vcpu->guest_debug & KVM_GUESTDBG_BLOCKIRQ) { in kvm_arch_vcpu_guestdbg_update_apicv_inhibit()
11675 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
11681 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_ioctl_set_guest_debug()
11684 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11688 if (kvm_is_exception_pending(vcpu)) in kvm_arch_vcpu_ioctl_set_guest_debug()
11691 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
11693 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
11700 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11702 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
11703 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
11704 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
11706 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
11708 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11709 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11712 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
11714 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11716 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
11717 vcpu->arch.singlestep_rip = kvm_get_linear_rip(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11723 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
11725 static_call(kvm_x86_update_exception_bitmap)(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11727 kvm_arch_vcpu_guestdbg_update_apicv_inhibit(vcpu->kvm); in kvm_arch_vcpu_ioctl_set_guest_debug()
11732 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
11739 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
11746 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_translate()
11748 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
11749 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
11750 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
11756 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_translate()
11760 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
11764 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_get_fpu()
11767 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
11769 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
11779 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
11783 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
11787 if (fpstate_is_confidential(&vcpu->arch.guest_fpu)) in kvm_arch_vcpu_ioctl_set_fpu()
11790 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
11792 fxsave = &vcpu->arch.guest_fpu.fpstate->regs.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
11803 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
11807 static void store_regs(struct kvm_vcpu *vcpu) in store_regs() argument
11811 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
11812 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
11814 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
11815 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
11817 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
11819 vcpu, &vcpu->run->s.regs.events); in store_regs()
11822 static int sync_regs(struct kvm_vcpu *vcpu) in sync_regs() argument
11824 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
11825 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
11826 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
11829 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
11830 struct kvm_sregs sregs = vcpu->run->s.regs.sregs; in sync_regs()
11832 if (__set_sregs(vcpu, &sregs)) in sync_regs()
11835 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
11838 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
11839 struct kvm_vcpu_events events = vcpu->run->s.regs.events; in sync_regs()
11841 if (kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events)) in sync_regs()
11844 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
11865 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
11870 vcpu->arch.last_vmentry_cpu = -1; in kvm_arch_vcpu_create()
11871 vcpu->arch.regs_avail = ~0; in kvm_arch_vcpu_create()
11872 vcpu->arch.regs_dirty = ~0; in kvm_arch_vcpu_create()
11874 kvm_gpc_init(&vcpu->arch.pv_time, vcpu->kvm, vcpu, KVM_HOST_USES_PFN); in kvm_arch_vcpu_create()
11876 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
11877 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
11879 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
11881 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_create()
11885 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
11886 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); in kvm_arch_vcpu_create()
11891 * Defer evaluating inhibits until the vCPU is first run, as in kvm_arch_vcpu_create()
11892 * this vCPU will not get notified of any changes until this in kvm_arch_vcpu_create()
11893 * vCPU is visible to other vCPUs (marked online and added to in kvm_arch_vcpu_create()
11896 * Ignore the current per-VM APICv state so that vCPU creation in kvm_arch_vcpu_create()
11898 * will ensure the vCPU gets the correct state before VM-Entry. in kvm_arch_vcpu_create()
11901 vcpu->arch.apic->apicv_active = true; in kvm_arch_vcpu_create()
11902 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu); in kvm_arch_vcpu_create()
11912 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
11914 vcpu->arch.mce_banks = kcalloc(KVM_MAX_MCE_BANKS * 4, sizeof(u64), in kvm_arch_vcpu_create()
11916 vcpu->arch.mci_ctl2_banks = kcalloc(KVM_MAX_MCE_BANKS, sizeof(u64), in kvm_arch_vcpu_create()
11918 if (!vcpu->arch.mce_banks || !vcpu->arch.mci_ctl2_banks) in kvm_arch_vcpu_create()
11920 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
11922 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
11926 if (!alloc_emulate_ctxt(vcpu)) in kvm_arch_vcpu_create()
11929 if (!fpu_alloc_guest_fpstate(&vcpu->arch.guest_fpu)) { in kvm_arch_vcpu_create()
11930 pr_err("failed to allocate vcpu's fpu\n"); in kvm_arch_vcpu_create()
11934 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
11935 vcpu->arch.reserved_gpa_bits = kvm_vcpu_reserved_gpa_bits_raw(vcpu); in kvm_arch_vcpu_create()
11937 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
11939 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_create()
11941 vcpu->arch.perf_capabilities = kvm_caps.supported_perf_cap; in kvm_arch_vcpu_create()
11942 kvm_pmu_init(vcpu); in kvm_arch_vcpu_create()
11944 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
11945 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
11948 vcpu->arch.hv_root_tdp = INVALID_PAGE; in kvm_arch_vcpu_create()
11951 r = static_call(kvm_x86_vcpu_create)(vcpu); in kvm_arch_vcpu_create()
11955 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
11956 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
11957 kvm_xen_init_vcpu(vcpu); in kvm_arch_vcpu_create()
11958 kvm_vcpu_mtrr_init(vcpu); in kvm_arch_vcpu_create()
11959 vcpu_load(vcpu); in kvm_arch_vcpu_create()
11960 kvm_set_tsc_khz(vcpu, vcpu->kvm->arch.default_tsc_khz); in kvm_arch_vcpu_create()
11961 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_create()
11962 kvm_init_mmu(vcpu); in kvm_arch_vcpu_create()
11963 vcpu_put(vcpu); in kvm_arch_vcpu_create()
11967 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
11969 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
11971 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
11973 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
11974 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_create()
11975 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
11977 kvm_free_lapic(vcpu); in kvm_arch_vcpu_create()
11979 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_create()
11983 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
11985 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
11987 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
11989 vcpu_load(vcpu); in kvm_arch_vcpu_postcreate()
11990 kvm_synchronize_tsc(vcpu, 0); in kvm_arch_vcpu_postcreate()
11991 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
11994 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
11996 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
11998 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
12003 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
12007 kvmclock_reset(vcpu); in kvm_arch_vcpu_destroy()
12009 static_call(kvm_x86_vcpu_free)(vcpu); in kvm_arch_vcpu_destroy()
12011 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
12012 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
12013 fpu_free_guest_fpstate(&vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
12015 kvm_xen_destroy_vcpu(vcpu); in kvm_arch_vcpu_destroy()
12016 kvm_hv_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
12017 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
12018 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
12019 kfree(vcpu->arch.mci_ctl2_banks); in kvm_arch_vcpu_destroy()
12020 kvm_free_lapic(vcpu); in kvm_arch_vcpu_destroy()
12021 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
12022 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
12023 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
12024 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
12025 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
12026 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_destroy()
12030 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
12033 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_vcpu_reset()
12040 * into hardware, to be zeroed at vCPU creation. Use CRs as a sentinel in kvm_vcpu_reset()
12044 (old_cr0 || kvm_read_cr3(vcpu) || kvm_read_cr4(vcpu))); in kvm_vcpu_reset()
12048 * possible to INIT the vCPU while L2 is active. Force the vCPU back in kvm_vcpu_reset()
12052 if (is_guest_mode(vcpu)) in kvm_vcpu_reset()
12053 kvm_leave_nested(vcpu); in kvm_vcpu_reset()
12055 kvm_lapic_reset(vcpu, init_event); in kvm_vcpu_reset()
12057 WARN_ON_ONCE(is_guest_mode(vcpu) || is_smm(vcpu)); in kvm_vcpu_reset()
12058 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
12060 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
12061 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
12062 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
12063 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
12064 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
12065 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
12066 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
12068 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
12069 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
12070 vcpu->arch.dr6 = DR6_ACTIVE_LOW; in kvm_vcpu_reset()
12071 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
12072 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
12074 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
12076 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
12077 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
12078 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
12079 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
12081 kvmclock_reset(vcpu); in kvm_vcpu_reset()
12083 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
12084 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
12085 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
12087 if (vcpu->arch.guest_fpu.fpstate && kvm_mpx_supported()) { in kvm_vcpu_reset()
12088 struct fpstate *fpstate = vcpu->arch.guest_fpu.fpstate; in kvm_vcpu_reset()
12095 kvm_put_guest_fpu(vcpu); in kvm_vcpu_reset()
12101 kvm_load_guest_fpu(vcpu); in kvm_vcpu_reset()
12105 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
12106 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
12108 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
12109 vcpu->arch.ia32_misc_enable_msr = MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | in kvm_vcpu_reset()
12112 __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP); in kvm_vcpu_reset()
12113 __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true); in kvm_vcpu_reset()
12117 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
12118 kvm_register_mark_dirty(vcpu, VCPU_REGS_RSP); in kvm_vcpu_reset()
12123 * RESET since KVM emulates RESET before exposing the vCPU to userspace, in kvm_vcpu_reset()
12127 cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1); in kvm_vcpu_reset()
12128 kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600); in kvm_vcpu_reset()
12130 static_call(kvm_x86_vcpu_reset)(vcpu, init_event); in kvm_vcpu_reset()
12132 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in kvm_vcpu_reset()
12133 kvm_rip_write(vcpu, 0xfff0); in kvm_vcpu_reset()
12135 vcpu->arch.cr3 = 0; in kvm_vcpu_reset()
12136 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); in kvm_vcpu_reset()
12149 static_call(kvm_x86_set_cr0)(vcpu, new_cr0); in kvm_vcpu_reset()
12150 static_call(kvm_x86_set_cr4)(vcpu, 0); in kvm_vcpu_reset()
12151 static_call(kvm_x86_set_efer)(vcpu, 0); in kvm_vcpu_reset()
12152 static_call(kvm_x86_update_exception_bitmap)(vcpu); in kvm_vcpu_reset()
12163 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_vcpu_reset()
12164 kvm_mmu_reset_context(vcpu); in kvm_vcpu_reset()
12177 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_vcpu_reset()
12181 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
12185 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
12188 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
12189 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
12196 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
12216 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
12217 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
12218 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
12219 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
12221 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
12222 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
12239 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, in kvm_arch_hardware_enable()
12241 * adjustments, in case multiple suspend cycles happen before some VCPU in kvm_arch_hardware_enable()
12269 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
12270 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
12271 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
12272 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
12295 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
12297 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
12300 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
12302 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
12308 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
12310 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_arch_sched_in()
12312 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
12315 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_arch_sched_in()
12317 static_call(kvm_x86_sched_in)(vcpu, cpu); in kvm_arch_sched_in()
12394 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
12396 vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
12397 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
12398 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
12404 struct kvm_vcpu *vcpu; in kvm_unload_vcpu_mmus() local
12406 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_unload_vcpu_mmus()
12407 kvm_clear_async_pf_completion_queue(vcpu); in kvm_unload_vcpu_mmus()
12408 kvm_unload_vcpu_mmu(vcpu); in kvm_unload_vcpu_mmus()
12642 struct kvm_vcpu *vcpu; in kvm_arch_memslots_updated() local
12652 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_memslots_updated()
12653 kvm_vcpu_kick(vcpu); in kvm_arch_memslots_updated()
12841 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) in kvm_guest_apic_has_interrupt() argument
12843 return (is_guest_mode(vcpu) && in kvm_guest_apic_has_interrupt()
12844 static_call(kvm_x86_guest_apic_has_interrupt)(vcpu)); in kvm_guest_apic_has_interrupt()
12847 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
12849 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
12852 if (kvm_apic_has_pending_init_or_sipi(vcpu) && in kvm_vcpu_has_events()
12853 kvm_apic_init_sipi_allowed(vcpu)) in kvm_vcpu_has_events()
12856 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
12859 if (kvm_is_exception_pending(vcpu)) in kvm_vcpu_has_events()
12862 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_vcpu_has_events()
12863 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
12864 static_call(kvm_x86_nmi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
12868 if (kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_vcpu_has_events()
12869 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
12870 static_call(kvm_x86_smi_allowed)(vcpu, false))) in kvm_vcpu_has_events()
12874 if (kvm_test_request(KVM_REQ_PMI, vcpu)) in kvm_vcpu_has_events()
12877 if (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_has_events()
12878 (kvm_cpu_has_interrupt(vcpu) || in kvm_vcpu_has_events()
12879 kvm_guest_apic_has_interrupt(vcpu))) in kvm_vcpu_has_events()
12882 if (kvm_hv_has_stimer_pending(vcpu)) in kvm_vcpu_has_events()
12885 if (is_guest_mode(vcpu) && in kvm_vcpu_has_events()
12887 kvm_x86_ops.nested_ops->has_events(vcpu, false)) in kvm_vcpu_has_events()
12890 if (kvm_xen_has_pending_events(vcpu)) in kvm_vcpu_has_events()
12896 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
12898 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
12901 bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu) in kvm_arch_dy_has_pending_interrupt() argument
12903 if (kvm_vcpu_apicv_active(vcpu) && in kvm_arch_dy_has_pending_interrupt()
12904 static_call(kvm_x86_dy_apicv_has_pending_interrupt)(vcpu)) in kvm_arch_dy_has_pending_interrupt()
12910 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
12912 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
12915 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_arch_dy_runnable()
12917 kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_arch_dy_runnable()
12919 kvm_test_request(KVM_REQ_EVENT, vcpu)) in kvm_arch_dy_runnable()
12922 return kvm_arch_dy_has_pending_interrupt(vcpu); in kvm_arch_dy_runnable()
12925 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
12927 if (vcpu->arch.guest_state_protected) in kvm_arch_vcpu_in_kernel()
12930 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
12933 unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_get_ip() argument
12935 return kvm_rip_read(vcpu); in kvm_arch_vcpu_get_ip()
12938 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
12940 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
12943 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
12945 return static_call(kvm_x86_interrupt_allowed)(vcpu, false); in kvm_arch_interrupt_allowed()
12948 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
12951 if (vcpu->arch.guest_state_protected) in kvm_get_linear_rip()
12954 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
12955 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
12956 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
12957 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
12961 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
12963 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
12967 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
12971 rflags = static_call(kvm_x86_get_rflags)(vcpu); in kvm_get_rflags()
12972 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
12978 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
12980 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
12981 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
12983 static_call(kvm_x86_set_rflags)(vcpu, rflags); in __kvm_set_rflags()
12986 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
12988 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
12989 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
13005 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
13009 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
13012 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
13015 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
13021 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
13022 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
13028 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
13030 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
13033 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
13037 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
13039 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
13043 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
13046 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
13048 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
13055 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
13060 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) in apf_put_user_notpresent() argument
13064 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
13068 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) in apf_put_user_ready() argument
13072 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
13076 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) in apf_pageready_slot_free() argument
13081 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
13088 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) in kvm_can_deliver_async_pf() argument
13091 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_can_deliver_async_pf()
13094 if (vcpu->arch.apf.send_user_only && in kvm_can_deliver_async_pf()
13095 static_call(kvm_x86_get_cpl)(vcpu) == 0) in kvm_can_deliver_async_pf()
13098 if (is_guest_mode(vcpu)) { in kvm_can_deliver_async_pf()
13103 return vcpu->arch.apf.delivery_as_pf_vmexit; in kvm_can_deliver_async_pf()
13110 return is_paging(vcpu); in kvm_can_deliver_async_pf()
13114 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) in kvm_can_do_async_pf() argument
13116 if (unlikely(!lapic_in_kernel(vcpu) || in kvm_can_do_async_pf()
13117 kvm_event_needs_reinjection(vcpu) || in kvm_can_do_async_pf()
13118 kvm_is_exception_pending(vcpu))) in kvm_can_do_async_pf()
13121 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
13128 return kvm_arch_interrupt_allowed(vcpu); in kvm_can_do_async_pf()
13131 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
13137 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
13139 if (kvm_can_deliver_async_pf(vcpu) && in kvm_arch_async_page_not_present()
13140 !apf_put_user_notpresent(vcpu)) { in kvm_arch_async_page_not_present()
13147 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
13158 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
13163 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
13168 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
13174 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
13178 kvm_pv_async_pf_enabled(vcpu) && in kvm_arch_async_page_present()
13179 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
13180 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
13181 kvm_apic_set_irq(vcpu, &irq, NULL); in kvm_arch_async_page_present()
13184 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
13185 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
13188 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) in kvm_arch_async_page_present_queued() argument
13190 kvm_make_request(KVM_REQ_APF_READY, vcpu); in kvm_arch_async_page_present_queued()
13191 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
13192 kvm_vcpu_kick(vcpu); in kvm_arch_async_page_present_queued()
13195 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_dequeue_async_page_present() argument
13197 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_arch_can_dequeue_async_page_present()
13200 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); in kvm_arch_can_dequeue_async_page_present()
13307 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) in kvm_arch_no_poll() argument
13309 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
13340 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) in kvm_fixup_and_inject_pf_error() argument
13342 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in kvm_fixup_and_inject_pf_error()
13348 mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != INVALID_GPA) { in kvm_fixup_and_inject_pf_error()
13350 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
13361 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
13370 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, in kvm_handle_memory_failure() argument
13374 if (KVM_BUG_ON(!e, vcpu->kvm)) in kvm_handle_memory_failure()
13377 kvm_inject_emulated_page_fault(vcpu, e); in kvm_handle_memory_failure()
13388 kvm_prepare_emulation_failure_exit(vcpu); in kvm_handle_memory_failure()
13394 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) in kvm_handle_invpcid() argument
13404 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); in kvm_handle_invpcid()
13406 return kvm_handle_memory_failure(vcpu, r, &e); in kvm_handle_invpcid()
13409 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13413 pcid_enabled = kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE); in kvm_handle_invpcid()
13418 is_noncanonical_address(operand.gla, vcpu)) { in kvm_handle_invpcid()
13419 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13422 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); in kvm_handle_invpcid()
13423 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13427 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13431 kvm_invalidate_pcid(vcpu, operand.pcid); in kvm_handle_invpcid()
13432 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13444 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu); in kvm_handle_invpcid()
13445 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
13448 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
13454 static int complete_sev_es_emulated_mmio(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_mmio() argument
13456 struct kvm_run *run = vcpu->run; in complete_sev_es_emulated_mmio()
13460 BUG_ON(!vcpu->mmio_needed); in complete_sev_es_emulated_mmio()
13463 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_sev_es_emulated_mmio()
13465 if (!vcpu->mmio_is_write) in complete_sev_es_emulated_mmio()
13471 vcpu->mmio_cur_fragment++; in complete_sev_es_emulated_mmio()
13479 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_sev_es_emulated_mmio()
13480 vcpu->mmio_needed = 0; in complete_sev_es_emulated_mmio()
13490 run->mmio.is_write = vcpu->mmio_is_write; in complete_sev_es_emulated_mmio()
13495 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in complete_sev_es_emulated_mmio()
13500 int kvm_sev_es_mmio_write(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_write() argument
13509 handled = write_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_write()
13518 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_write()
13519 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_write()
13524 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_write()
13525 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_write()
13527 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_write()
13528 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_write()
13529 vcpu->run->mmio.is_write = 1; in kvm_sev_es_mmio_write()
13530 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in kvm_sev_es_mmio_write()
13531 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_write()
13533 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_write()
13539 int kvm_sev_es_mmio_read(struct kvm_vcpu *vcpu, gpa_t gpa, unsigned int bytes, in kvm_sev_es_mmio_read() argument
13548 handled = read_emultor.read_write_mmio(vcpu, gpa, bytes, data); in kvm_sev_es_mmio_read()
13557 frag = vcpu->mmio_fragments; in kvm_sev_es_mmio_read()
13558 vcpu->mmio_nr_fragments = 1; in kvm_sev_es_mmio_read()
13563 vcpu->mmio_needed = 1; in kvm_sev_es_mmio_read()
13564 vcpu->mmio_cur_fragment = 0; in kvm_sev_es_mmio_read()
13566 vcpu->run->mmio.phys_addr = gpa; in kvm_sev_es_mmio_read()
13567 vcpu->run->mmio.len = min(8u, frag->len); in kvm_sev_es_mmio_read()
13568 vcpu->run->mmio.is_write = 0; in kvm_sev_es_mmio_read()
13569 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvm_sev_es_mmio_read()
13571 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_mmio; in kvm_sev_es_mmio_read()
13577 static void advance_sev_es_emulated_pio(struct kvm_vcpu *vcpu, unsigned count, int size) in advance_sev_es_emulated_pio() argument
13579 vcpu->arch.sev_pio_count -= count; in advance_sev_es_emulated_pio()
13580 vcpu->arch.sev_pio_data += count * size; in advance_sev_es_emulated_pio()
13583 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size,
13586 static int complete_sev_es_emulated_outs(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_outs() argument
13588 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_outs()
13589 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_outs()
13591 vcpu->arch.pio.count = 0; in complete_sev_es_emulated_outs()
13592 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_outs()
13593 return kvm_sev_es_outs(vcpu, size, port); in complete_sev_es_emulated_outs()
13597 static int kvm_sev_es_outs(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_outs() argument
13602 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_outs()
13603 int ret = emulator_pio_out(vcpu, size, port, vcpu->arch.sev_pio_data, count); in kvm_sev_es_outs()
13606 advance_sev_es_emulated_pio(vcpu, count, size); in kvm_sev_es_outs()
13611 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_outs()
13615 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_outs; in kvm_sev_es_outs()
13619 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size,
13622 static int complete_sev_es_emulated_ins(struct kvm_vcpu *vcpu) in complete_sev_es_emulated_ins() argument
13624 unsigned count = vcpu->arch.pio.count; in complete_sev_es_emulated_ins()
13625 int size = vcpu->arch.pio.size; in complete_sev_es_emulated_ins()
13626 int port = vcpu->arch.pio.port; in complete_sev_es_emulated_ins()
13628 complete_emulator_pio_in(vcpu, vcpu->arch.sev_pio_data); in complete_sev_es_emulated_ins()
13629 advance_sev_es_emulated_pio(vcpu, count, size); in complete_sev_es_emulated_ins()
13630 if (vcpu->arch.sev_pio_count) in complete_sev_es_emulated_ins()
13631 return kvm_sev_es_ins(vcpu, size, port); in complete_sev_es_emulated_ins()
13635 static int kvm_sev_es_ins(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_ins() argument
13640 min_t(unsigned int, PAGE_SIZE / size, vcpu->arch.sev_pio_count); in kvm_sev_es_ins()
13641 if (!emulator_pio_in(vcpu, size, port, vcpu->arch.sev_pio_data, count)) in kvm_sev_es_ins()
13645 advance_sev_es_emulated_pio(vcpu, count, size); in kvm_sev_es_ins()
13646 if (!vcpu->arch.sev_pio_count) in kvm_sev_es_ins()
13650 vcpu->arch.complete_userspace_io = complete_sev_es_emulated_ins; in kvm_sev_es_ins()
13654 int kvm_sev_es_string_io(struct kvm_vcpu *vcpu, unsigned int size, in kvm_sev_es_string_io() argument
13658 vcpu->arch.sev_pio_data = data; in kvm_sev_es_string_io()
13659 vcpu->arch.sev_pio_count = count; in kvm_sev_es_string_io()
13660 return in ? kvm_sev_es_ins(vcpu, size, port) in kvm_sev_es_string_io()
13661 : kvm_sev_es_outs(vcpu, size, port); in kvm_sev_es_string_io()