Lines Matching full:vcpu
88 ((struct kvm_vcpu *)(ctxt)->vcpu)
106 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
107 static void process_nmi(struct kvm_vcpu *vcpu);
108 static void process_smi(struct kvm_vcpu *vcpu);
109 static void enter_smm(struct kvm_vcpu *vcpu);
110 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
111 static void store_regs(struct kvm_vcpu *vcpu);
112 static int sync_regs(struct kvm_vcpu *vcpu);
262 static bool kvm_msr_ignored_check(struct kvm_vcpu *vcpu, u32 msr, in kvm_msr_ignored_check() argument
293 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
297 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
398 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
400 return vcpu->arch.apic_base; in kvm_get_apic_base()
404 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) in kvm_get_apic_mode() argument
406 return kvm_apic_mode(kvm_get_apic_base(vcpu)); in kvm_get_apic_mode()
410 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
412 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); in kvm_set_apic_base()
414 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | in kvm_set_apic_base()
415 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
426 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
427 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
492 void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu) in kvm_deliver_exception_payload() argument
494 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
495 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
496 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
508 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
512 vcpu->arch.dr6 |= DR6_RTM; in kvm_deliver_exception_payload()
513 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
522 vcpu->arch.dr6 ^= payload & DR6_RTM; in kvm_deliver_exception_payload()
530 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
533 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
537 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
538 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
542 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
549 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
551 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
555 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
562 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
563 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
573 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
574 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
576 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
577 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
578 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
579 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
580 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
581 if (!is_guest_mode(vcpu)) in kvm_multiple_exception()
582 kvm_deliver_exception_payload(vcpu); in kvm_multiple_exception()
587 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
590 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
602 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
603 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
604 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
605 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
606 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
607 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
608 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
616 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
618 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, false); in kvm_queue_exception()
622 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
624 kvm_multiple_exception(vcpu, nr, false, 0, false, 0, true); in kvm_requeue_exception()
628 void kvm_queue_exception_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_p() argument
631 kvm_multiple_exception(vcpu, nr, false, 0, true, payload, false); in kvm_queue_exception_p()
635 static void kvm_queue_exception_e_p(struct kvm_vcpu *vcpu, unsigned nr, in kvm_queue_exception_e_p() argument
638 kvm_multiple_exception(vcpu, nr, true, error_code, in kvm_queue_exception_e_p()
642 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
645 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
647 return kvm_skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
653 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
655 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
656 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
657 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
658 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
659 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
660 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
662 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
668 bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu, in kvm_inject_emulated_page_fault() argument
674 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
675 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
683 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
686 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
691 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
693 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
694 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
698 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
700 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, false); in kvm_queue_exception_e()
704 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
706 kvm_multiple_exception(vcpu, nr, true, error_code, false, 0, true); in kvm_requeue_exception_e()
714 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
716 if (kvm_x86_ops.get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
718 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
723 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
725 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) in kvm_require_dr()
728 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
738 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument
747 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
753 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); in kvm_read_guest_page_mmu()
757 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
760 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
764 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) in pdptr_rsvd_bits() argument
766 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | in pdptr_rsvd_bits()
773 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument
781 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs()
790 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { in load_pdptrs()
798 kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR); in load_pdptrs()
806 bool pdptrs_changed(struct kvm_vcpu *vcpu) in pdptrs_changed() argument
808 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
813 if (!is_pae_paging(vcpu)) in pdptrs_changed()
816 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in pdptrs_changed()
819 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; in pdptrs_changed()
820 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
821 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
826 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
830 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
832 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
852 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
856 if (!is_pae(vcpu)) in kvm_set_cr0()
858 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
863 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
864 is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) && in kvm_set_cr0()
865 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
868 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) in kvm_set_cr0()
871 kvm_x86_ops.set_cr0(vcpu, cr0); in kvm_set_cr0()
874 kvm_clear_async_pf_completion_queue(vcpu); in kvm_set_cr0()
875 kvm_async_pf_hash_reset(vcpu); in kvm_set_cr0()
879 kvm_mmu_reset_context(vcpu); in kvm_set_cr0()
882 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
883 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
884 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
890 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
892 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
896 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_guest_xsave_state() argument
898 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_guest_xsave_state()
900 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
901 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
903 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
904 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
905 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
909 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || in kvm_load_guest_xsave_state()
910 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
911 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
912 __write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
916 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu) in kvm_load_host_xsave_state() argument
919 (kvm_read_cr4_bits(vcpu, X86_CR4_PKE) || in kvm_load_host_xsave_state()
920 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
921 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
922 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
923 __write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
926 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) { in kvm_load_host_xsave_state()
928 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
931 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
932 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
939 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
942 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
958 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
972 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
975 kvm_update_cpuid_runtime(vcpu); in __kvm_set_xcr()
979 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in kvm_set_xcr() argument
981 if (kvm_x86_ops.get_cpl(vcpu) != 0 || in kvm_set_xcr()
982 __kvm_set_xcr(vcpu, index, xcr)) { in kvm_set_xcr()
983 kvm_inject_gp(vcpu, 0); in kvm_set_xcr()
990 int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_valid_cr4() argument
995 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_valid_cr4()
998 if (!kvm_x86_ops.is_valid_cr4(vcpu, cr4)) in kvm_valid_cr4()
1005 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
1007 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
1012 if (kvm_valid_cr4(vcpu, cr4)) in kvm_set_cr4()
1015 if (is_long_mode(vcpu)) { in kvm_set_cr4()
1020 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
1022 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1023 kvm_read_cr3(vcpu))) in kvm_set_cr4()
1027 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) in kvm_set_cr4()
1031 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
1035 kvm_x86_ops.set_cr4(vcpu, cr4); in kvm_set_cr4()
1039 kvm_mmu_reset_context(vcpu); in kvm_set_cr4()
1042 kvm_update_cpuid_runtime(vcpu); in kvm_set_cr4()
1048 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
1052 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); in kvm_set_cr3()
1060 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { in kvm_set_cr3()
1062 kvm_mmu_sync_roots(vcpu); in kvm_set_cr3()
1063 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_set_cr3()
1068 if (is_long_mode(vcpu) && in kvm_set_cr3()
1069 (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) in kvm_set_cr3()
1071 else if (is_pae_paging(vcpu) && in kvm_set_cr3()
1072 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1075 kvm_mmu_new_pgd(vcpu, cr3, skip_tlb_flush, skip_tlb_flush); in kvm_set_cr3()
1076 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1077 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); in kvm_set_cr3()
1083 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
1087 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
1088 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
1090 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1095 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
1097 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
1098 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
1100 return vcpu->arch.cr8; in kvm_get_cr8()
1104 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
1108 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1110 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1111 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
1115 void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
1119 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1120 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1122 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1123 kvm_x86_ops.set_dr7(vcpu, dr7); in kvm_update_dr7()
1124 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1126 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1130 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
1134 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) in kvm_dr6_fixed()
1139 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in __kvm_set_dr() argument
1141 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1145 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1146 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1147 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1153 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1159 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1160 kvm_update_dr7(vcpu); in __kvm_set_dr()
1167 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
1169 if (__kvm_set_dr(vcpu, dr, val)) { in kvm_set_dr()
1170 kvm_inject_gp(vcpu, 0); in kvm_set_dr()
1177 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
1179 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1183 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1187 *val = vcpu->arch.dr6; in kvm_get_dr()
1191 *val = vcpu->arch.dr7; in kvm_get_dr()
1198 bool kvm_rdpmc(struct kvm_vcpu *vcpu) in kvm_rdpmc() argument
1200 u32 ecx = kvm_rcx_read(vcpu); in kvm_rdpmc()
1204 err = kvm_pmu_rdpmc(vcpu, ecx, &data); in kvm_rdpmc()
1207 kvm_rax_write(vcpu, (u32)data); in kvm_rdpmc()
1208 kvm_rdx_write(vcpu, data >> 32); in kvm_rdpmc()
1473 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr_feature() argument
1484 if (kvm_msr_ignored_check(vcpu, index, 0, false)) in do_get_msr_feature()
1496 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument
1498 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer()
1501 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer()
1505 !guest_cpuid_has(vcpu, X86_FEATURE_LM)) in __kvm_valid_efer()
1508 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer()
1514 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
1519 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer()
1523 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_efer() argument
1525 u64 old_efer = vcpu->arch.efer; in set_efer()
1533 if (!__kvm_valid_efer(vcpu, efer)) in set_efer()
1536 if (is_paging(vcpu) && in set_efer()
1537 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1542 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1544 r = kvm_x86_ops.set_efer(vcpu, efer); in set_efer()
1552 kvm_mmu_reset_context(vcpu); in set_efer()
1563 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type) in kvm_msr_allowed() argument
1567 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1599 ++vcpu->stat.exits; in kvm_msr_allowed()
1615 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data, in __kvm_set_msr() argument
1620 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE)) in __kvm_set_msr()
1629 if (is_noncanonical_address(data, vcpu)) in __kvm_set_msr()
1646 data = __canonical_address(data, vcpu_virt_addr_bits(vcpu)); in __kvm_set_msr()
1653 return kvm_x86_ops.set_msr(vcpu, &msr); in __kvm_set_msr()
1656 static int kvm_set_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_set_msr_ignored_check() argument
1659 int ret = __kvm_set_msr(vcpu, index, data, host_initiated); in kvm_set_msr_ignored_check()
1662 if (kvm_msr_ignored_check(vcpu, index, data, true)) in kvm_set_msr_ignored_check()
1674 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data, in __kvm_get_msr() argument
1680 if (!host_initiated && !kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ)) in __kvm_get_msr()
1686 ret = kvm_x86_ops.get_msr(vcpu, &msr); in __kvm_get_msr()
1692 static int kvm_get_msr_ignored_check(struct kvm_vcpu *vcpu, in kvm_get_msr_ignored_check() argument
1695 int ret = __kvm_get_msr(vcpu, index, data, host_initiated); in kvm_get_msr_ignored_check()
1700 if (kvm_msr_ignored_check(vcpu, index, 0, false)) in kvm_get_msr_ignored_check()
1707 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data) in kvm_get_msr() argument
1709 return kvm_get_msr_ignored_check(vcpu, index, data, false); in kvm_get_msr()
1713 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data) in kvm_set_msr() argument
1715 return kvm_set_msr_ignored_check(vcpu, index, data, false); in kvm_set_msr()
1719 static int complete_emulated_msr(struct kvm_vcpu *vcpu, bool is_read) in complete_emulated_msr() argument
1721 if (vcpu->run->msr.error) { in complete_emulated_msr()
1722 kvm_inject_gp(vcpu, 0); in complete_emulated_msr()
1725 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_msr()
1726 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_msr()
1729 return kvm_skip_emulated_instruction(vcpu); in complete_emulated_msr()
1732 static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu) in complete_emulated_rdmsr() argument
1734 return complete_emulated_msr(vcpu, true); in complete_emulated_rdmsr()
1737 static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu) in complete_emulated_wrmsr() argument
1739 return complete_emulated_msr(vcpu, false); in complete_emulated_wrmsr()
1754 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index, in kvm_msr_user_space() argument
1756 int (*completion)(struct kvm_vcpu *vcpu), in kvm_msr_user_space() argument
1762 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1765 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1766 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1767 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1768 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1769 vcpu->run->msr.index = index; in kvm_msr_user_space()
1770 vcpu->run->msr.data = data; in kvm_msr_user_space()
1771 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1776 static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r) in kvm_get_msr_user_space() argument
1778 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0, in kvm_get_msr_user_space()
1782 static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r) in kvm_set_msr_user_space() argument
1784 return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data, in kvm_set_msr_user_space()
1788 int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu) in kvm_emulate_rdmsr() argument
1790 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_rdmsr()
1794 r = kvm_get_msr(vcpu, ecx, &data); in kvm_emulate_rdmsr()
1797 if (r && kvm_get_msr_user_space(vcpu, ecx, r)) { in kvm_emulate_rdmsr()
1805 kvm_inject_gp(vcpu, 0); in kvm_emulate_rdmsr()
1811 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1812 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1813 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_rdmsr()
1817 int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu) in kvm_emulate_wrmsr() argument
1819 u32 ecx = kvm_rcx_read(vcpu); in kvm_emulate_wrmsr()
1820 u64 data = kvm_read_edx_eax(vcpu); in kvm_emulate_wrmsr()
1823 r = kvm_set_msr(vcpu, ecx, data); in kvm_emulate_wrmsr()
1826 if (r && kvm_set_msr_user_space(vcpu, ecx, data, r)) in kvm_emulate_wrmsr()
1837 kvm_inject_gp(vcpu, 0); in kvm_emulate_wrmsr()
1842 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wrmsr()
1846 bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu) in kvm_vcpu_exit_request() argument
1848 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1860 static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_x2apic_icr_irqoff() argument
1862 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1871 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1872 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1873 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
1881 static int handle_fastpath_set_tscdeadline(struct kvm_vcpu *vcpu, u64 data) in handle_fastpath_set_tscdeadline() argument
1883 if (!kvm_can_use_hv_timer(vcpu)) in handle_fastpath_set_tscdeadline()
1886 kvm_set_lapic_tscdeadline_msr(vcpu, data); in handle_fastpath_set_tscdeadline()
1890 fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu) in handle_fastpath_set_msr_irqoff() argument
1892 u32 msr = kvm_rcx_read(vcpu); in handle_fastpath_set_msr_irqoff()
1898 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
1899 if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
1900 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
1905 data = kvm_read_edx_eax(vcpu); in handle_fastpath_set_msr_irqoff()
1906 if (!handle_fastpath_set_tscdeadline(vcpu, data)) { in handle_fastpath_set_msr_irqoff()
1907 kvm_skip_emulated_instruction(vcpu); in handle_fastpath_set_msr_irqoff()
1925 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
1927 return kvm_get_msr_ignored_check(vcpu, index, data, true); in do_get_msr()
1930 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
1932 return kvm_set_msr_ignored_check(vcpu, index, *data, true); in do_set_msr()
2042 static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time, in kvm_write_system_time() argument
2045 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2047 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2049 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_write_system_time()
2054 vcpu->arch.time = system_time; in kvm_write_system_time()
2055 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_write_system_time()
2058 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
2062 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
2063 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2065 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2118 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
2124 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
2131 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2132 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2150 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
2154 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) in kvm_set_tsc_khz() argument
2162 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
2168 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2169 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2170 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2184 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); in kvm_set_tsc_khz()
2187 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
2189 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2190 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2191 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2192 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2201 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
2205 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2209 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2221 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
2223 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2224 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2244 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) in kvm_scale_tsc() argument
2247 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
2256 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_tsc_offset() argument
2260 tsc = kvm_scale_tsc(vcpu, rdtsc()); in kvm_compute_tsc_offset()
2265 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
2267 return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
2271 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in kvm_vcpu_write_tsc_offset() argument
2273 vcpu->arch.l1_tsc_offset = offset; in kvm_vcpu_write_tsc_offset()
2274 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2290 static void kvm_synchronize_tsc(struct kvm_vcpu *vcpu, u64 data) in kvm_synchronize_tsc() argument
2292 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2300 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2304 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2307 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2314 nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2315 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2333 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2337 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_synchronize_tsc()
2339 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_synchronize_tsc()
2342 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_synchronize_tsc()
2366 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_synchronize_tsc()
2368 vcpu->arch.last_guest_tsc = data; in kvm_synchronize_tsc()
2370 /* Keep track of which generation this VCPU has synchronized to */ in kvm_synchronize_tsc()
2371 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_synchronize_tsc()
2372 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_synchronize_tsc()
2373 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_synchronize_tsc()
2375 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_synchronize_tsc()
2385 kvm_track_tsc_matching(vcpu); in kvm_synchronize_tsc()
2389 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
2392 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2393 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest()
2396 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
2398 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2400 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); in adjust_tsc_offset_host()
2401 adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
2606 struct kvm_vcpu *vcpu; in kvm_gen_update_masterclock() local
2614 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
2615 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_gen_update_masterclock()
2618 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
2619 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_gen_update_masterclock()
2659 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page() local
2662 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2666 /* This VCPU is paused, but it's legal for a guest to read another in kvm_setup_pvclock_page()
2667 * VCPU's kvmclock, so we really have to follow the specification where in kvm_setup_pvclock_page()
2677 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2685 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2686 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2687 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2688 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2693 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2695 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2696 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2697 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2700 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2702 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2703 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2704 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2708 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2709 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2710 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2711 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2717 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
2758 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
2764 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2779 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2781 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2782 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2783 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2786 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2787 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2788 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2795 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2797 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2800 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2805 * kvmclock updates which are isolated to a given vcpu, such as
2806 * vcpu->cpu migration, should not allow system_timestamp from
2808 * correction applies to one vcpu's system_timestamp but not
2814 * The time for a remote vcpu to update its kvmclock is bound
2827 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
2829 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
2830 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
2831 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
2864 static bool can_set_mci_status(struct kvm_vcpu *vcpu) in can_set_mci_status() argument
2867 if (guest_cpuid_is_amd_or_hygon(vcpu)) in can_set_mci_status()
2868 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
2873 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_msr_mce() argument
2875 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2882 vcpu->arch.mcg_status = data; in set_msr_mce()
2890 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2914 if (!can_set_mci_status(vcpu)) in set_msr_mce()
2918 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2926 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) in xen_hvm_config() argument
2928 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2929 int lm = is_long_mode(vcpu); in xen_hvm_config()
2945 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) { in xen_hvm_config()
2952 static inline bool kvm_pv_async_pf_enabled(struct kvm_vcpu *vcpu) in kvm_pv_async_pf_enabled() argument
2956 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
2959 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
2967 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_VMEXIT) && in kvm_pv_enable_async_pf()
2971 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT) && in kvm_pv_enable_async_pf()
2975 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf()
2978 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
2980 if (!kvm_pv_async_pf_enabled(vcpu)) { in kvm_pv_enable_async_pf()
2981 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
2982 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
2986 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2990 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2991 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2993 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
2998 static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf_int() argument
3004 if (!lapic_in_kernel(vcpu)) in kvm_pv_enable_async_pf_int()
3007 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3009 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3014 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
3016 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
3017 vcpu->arch.time = 0; in kvmclock_reset()
3020 static void kvm_vcpu_flush_tlb_all(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_all() argument
3022 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3023 kvm_x86_ops.tlb_flush_all(vcpu); in kvm_vcpu_flush_tlb_all()
3026 static void kvm_vcpu_flush_tlb_guest(struct kvm_vcpu *vcpu) in kvm_vcpu_flush_tlb_guest() argument
3028 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3029 kvm_x86_ops.tlb_flush_guest(vcpu); in kvm_vcpu_flush_tlb_guest()
3032 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
3034 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in record_steal_time()
3037 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in record_steal_time()
3041 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3044 if (WARN_ON_ONCE(current->mm != vcpu->kvm->mm)) in record_steal_time()
3047 slots = kvm_memslots(vcpu->kvm); in record_steal_time()
3055 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) || in record_steal_time()
3065 if (guest_pv_has(vcpu, KVM_FEATURE_PV_TLB_FLUSH)) { in record_steal_time()
3084 vcpu->arch.st.preempted = 0; in record_steal_time()
3086 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3089 kvm_vcpu_flush_tlb_guest(vcpu); in record_steal_time()
3098 vcpu->arch.st.preempted = 0; in record_steal_time()
3112 vcpu->arch.st.last_steal; in record_steal_time()
3113 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3125 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
3144 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3149 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3161 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3166 return set_efer(vcpu, msr_info); in kvm_set_msr_common()
3174 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3176 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", in kvm_set_msr_common()
3183 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " in kvm_set_msr_common()
3197 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", in kvm_set_msr_common()
3201 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
3203 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
3205 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
3207 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
3210 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { in kvm_set_msr_common()
3212 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3213 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3217 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
3219 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3223 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3224 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3225 if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) in kvm_set_msr_common()
3227 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3228 kvm_update_cpuid_runtime(vcpu); in kvm_set_msr_common()
3230 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3236 vcpu->arch.smbase = data; in kvm_set_msr_common()
3239 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3243 kvm_synchronize_tsc(vcpu, data); in kvm_set_msr_common()
3245 u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3246 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
3247 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3252 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_set_msr_common()
3261 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3262 kvm_update_cpuid_runtime(vcpu); in kvm_set_msr_common()
3267 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3270 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3273 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3276 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3279 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3282 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_set_msr_common()
3285 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3288 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_set_msr_common()
3291 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3294 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_set_msr_common()
3297 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
3301 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3304 if (kvm_pv_enable_async_pf_int(vcpu, data)) in kvm_set_msr_common()
3308 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_set_msr_common()
3311 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3312 kvm_check_async_pf_completion(vcpu); in kvm_set_msr_common()
3316 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_set_msr_common()
3325 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3330 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
3334 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_set_msr_common()
3337 if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) in kvm_set_msr_common()
3342 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_set_msr_common()
3349 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3355 return set_msr_mce(vcpu, msr_info); in kvm_set_msr_common()
3363 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3364 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
3367 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " in kvm_set_msr_common()
3389 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
3396 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", in kvm_set_msr_common()
3400 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3402 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3405 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
3407 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3412 cpuid_fault_enabled(vcpu))) in kvm_set_msr_common()
3414 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3419 !supports_cpuid_fault(vcpu))) in kvm_set_msr_common()
3421 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3424 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
3425 return xen_hvm_config(vcpu, data); in kvm_set_msr_common()
3426 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
3427 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
3434 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) in get_msr_mce() argument
3437 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3446 data = vcpu->arch.mcg_cap; in get_msr_mce()
3451 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3454 data = vcpu->arch.mcg_status; in get_msr_mce()
3463 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3472 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
3512 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3513 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
3517 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3521 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_get_msr_common()
3523 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3527 !guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) in kvm_get_msr_common()
3529 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3532 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3544 u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : in kvm_get_msr_common()
3545 vcpu->arch.tsc_offset; in kvm_get_msr_common()
3547 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; in kvm_get_msr_common()
3552 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3571 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3574 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3576 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3579 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3582 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3587 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3590 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3599 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3602 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
3605 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3608 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
3611 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3614 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE)) in kvm_get_msr_common()
3617 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3620 if (!guest_pv_has(vcpu, KVM_FEATURE_CLOCKSOURCE2)) in kvm_get_msr_common()
3623 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3626 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF)) in kvm_get_msr_common()
3629 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3632 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
3635 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3638 if (!guest_pv_has(vcpu, KVM_FEATURE_ASYNC_PF_INT)) in kvm_get_msr_common()
3644 if (!guest_pv_has(vcpu, KVM_FEATURE_STEAL_TIME)) in kvm_get_msr_common()
3647 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3650 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_EOI)) in kvm_get_msr_common()
3653 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3656 if (!guest_pv_has(vcpu, KVM_FEATURE_POLL_CONTROL)) in kvm_get_msr_common()
3659 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3667 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3671 !guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) in kvm_get_msr_common()
3673 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3696 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
3713 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
3715 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3718 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
3720 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3724 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3726 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3729 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3732 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3735 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3736 return kvm_pmu_get_msr(vcpu, msr_info); in kvm_get_msr_common()
3748 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
3750 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
3756 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
3767 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
3768 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
3792 r = n = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
4049 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
4051 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4054 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
4057 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
4059 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4060 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4061 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4065 kvm_x86_ops.vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
4068 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4071 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4072 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4073 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4074 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4077 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4078 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4079 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4084 u64 offset = kvm_compute_tsc_offset(vcpu, in kvm_arch_vcpu_load()
4085 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4086 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
4087 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4090 if (kvm_lapic_hv_timer_in_use(vcpu)) in kvm_arch_vcpu_load()
4091 kvm_lapic_restart_hv_timer(vcpu); in kvm_arch_vcpu_load()
4095 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4097 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4098 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
4099 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4100 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); in kvm_arch_vcpu_load()
4101 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4104 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
4107 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) in kvm_steal_time_set_preempted() argument
4109 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache; in kvm_steal_time_set_preempted()
4113 gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS; in kvm_steal_time_set_preempted()
4116 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
4119 * when this is true, for example allowing the vCPU to be marked in kvm_steal_time_set_preempted()
4122 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4123 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
4127 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
4128 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4131 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4135 if (unlikely(current->mm != vcpu->kvm->mm)) in kvm_steal_time_set_preempted()
4138 slots = kvm_memslots(vcpu->kvm); in kvm_steal_time_set_preempted()
4149 vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4154 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
4158 if (vcpu->preempted) { in kvm_arch_vcpu_put()
4159 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4165 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4166 kvm_steal_time_set_preempted(vcpu); in kvm_arch_vcpu_put()
4167 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4170 kvm_x86_ops.vcpu_put(vcpu); in kvm_arch_vcpu_put()
4171 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4180 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
4183 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
4184 kvm_x86_ops.sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
4186 return kvm_apic_get_state(vcpu, s); in kvm_vcpu_ioctl_get_lapic()
4189 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
4194 r = kvm_apic_set_state(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
4197 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
4202 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
4210 if (kvm_cpu_has_extint(vcpu)) in kvm_cpu_accept_dm_intr()
4214 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
4215 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
4218 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
4227 return (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4228 kvm_cpu_accept_dm_intr(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4229 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
4230 !vcpu->arch.exception.pending); in kvm_vcpu_ready_for_interrupt_injection()
4233 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
4239 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4240 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4241 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
4249 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4252 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4255 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4256 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
4260 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
4262 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
4267 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_smi() argument
4269 kvm_make_request(KVM_REQ_SMI, vcpu); in kvm_vcpu_ioctl_smi()
4274 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
4279 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4283 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
4295 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4298 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4301 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4303 kvm_x86_ops.setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
4308 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
4311 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4313 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4322 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4332 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4333 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
4334 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
4341 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4343 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
4356 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
4359 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4361 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4362 process_smi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4375 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4376 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4377 kvm_deliver_exception_payload(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4385 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4389 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4390 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4396 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4398 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4400 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4401 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4402 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4403 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4404 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4407 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4408 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4410 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4412 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4413 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4414 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4419 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4420 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4422 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4423 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4428 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4434 static void kvm_smm_changed(struct kvm_vcpu *vcpu);
4436 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
4447 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4465 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4468 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4469 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4470 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4471 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4472 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4473 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4474 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4475 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4477 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4478 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4479 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4481 kvm_x86_ops.set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
4484 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4486 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4487 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4490 lapic_in_kernel(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4491 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4494 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4496 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4498 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4500 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4501 kvm_smm_changed(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4504 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4508 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4510 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4513 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4515 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4517 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4521 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4526 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
4532 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4533 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
4535 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4538 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
4549 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4550 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
4551 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4552 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4553 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
4560 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) in fill_xsave() argument
4562 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in fill_xsave()
4573 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
4591 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
4592 sizeof(vcpu->arch.pkru)); in fill_xsave()
4602 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) in load_xsave() argument
4604 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in load_xsave()
4634 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
4635 sizeof(vcpu->arch.pkru)); in load_xsave()
4644 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
4649 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
4652 &vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
4661 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
4676 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
4681 memcpy(&vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
4687 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
4698 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4701 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
4715 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
4730 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
4732 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4734 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4735 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
4739 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
4756 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
4758 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
4763 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4775 return kvm_x86_ops.enable_direct_tlbflush(vcpu); in kvm_vcpu_ioctl_enable_cap()
4778 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4779 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
4780 kvm_update_pv_runtime(vcpu); in kvm_vcpu_ioctl_enable_cap()
4792 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
4802 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
4808 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
4816 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
4827 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
4835 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
4844 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
4848 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
4852 r = kvm_vcpu_ioctl_smi(vcpu); in kvm_arch_vcpu_ioctl()
4862 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4872 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
4883 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
4894 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4895 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
4896 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4900 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4901 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
4902 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4911 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
4925 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
4930 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4931 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
4932 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4941 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
4950 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
4956 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
4971 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
4977 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
4994 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
5003 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5018 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
5027 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5043 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
5059 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
5065 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5069 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
5078 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
5094 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5138 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5139 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5140 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5151 r = kvm_vcpu_ioctl_get_hv_cpuid(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
5168 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
5172 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
6041 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
6049 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_write()
6050 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
6051 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
6062 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
6069 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_read()
6070 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
6072 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
6084 static void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
6087 kvm_x86_ops.set_segment(vcpu, var, seg); in kvm_set_segment()
6090 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
6093 kvm_x86_ops.get_segment(vcpu, var, seg); in kvm_get_segment()
6096 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument
6101 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
6105 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
6110 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
6113 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
6114 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
6117 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument
6120 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
6122 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
6125 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
6128 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
6130 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
6134 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
6137 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
6141 struct kvm_vcpu *vcpu, u32 access, in kvm_read_guest_virt_helper() argument
6148 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
6156 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
6176 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
6177 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
6182 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
6190 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
6198 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, in kvm_read_guest_virt() argument
6202 u32 access = (kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
6211 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
6220 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std() local
6223 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_read_std()
6226 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); in emulator_read_std()
6232 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_phys_system() local
6233 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); in kvm_read_guest_phys_system()
6239 struct kvm_vcpu *vcpu, u32 access, in kvm_write_guest_virt_helper() argument
6246 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6255 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_helper()
6273 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_write_std() local
6276 if (!system && kvm_x86_ops.get_cpl(vcpu) == 3) in emulator_write_std()
6279 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in emulator_write_std()
6283 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, in kvm_write_guest_virt_system() argument
6287 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6289 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in kvm_write_guest_virt_system()
6294 int handle_ud(struct kvm_vcpu *vcpu) in handle_ud() argument
6301 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, NULL, 0))) in handle_ud()
6305 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), in handle_ud()
6308 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); in handle_ud()
6312 return kvm_emulate_instruction(vcpu, emul_type); in handle_ud()
6316 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument
6323 if (vcpu_match_mmio_gpa(vcpu, gpa)) { in vcpu_is_mmio_gpa()
6331 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
6335 u32 access = ((kvm_x86_ops.get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
6343 if (vcpu_match_mmio_gva(vcpu, gva) in vcpu_mmio_gva_to_gpa()
6344 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6345 vcpu->arch.mmio_access, 0, access)) { in vcpu_mmio_gva_to_gpa()
6346 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6352 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6357 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); in vcpu_mmio_gva_to_gpa()
6360 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
6365 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
6368 kvm_page_track_write(vcpu, gpa, val, bytes); in emulator_write_phys()
6373 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
6375 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
6377 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6379 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
6384 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
6386 if (vcpu->mmio_read_completed) { in read_prepare()
6388 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6389 vcpu->mmio_read_completed = 0; in read_prepare()
6396 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
6399 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
6402 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
6405 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
6408 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
6411 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
6414 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
6421 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
6424 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6426 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6447 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
6454 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6466 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); in emulator_read_write_onepage()
6468 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
6473 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6479 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6487 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6488 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6501 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
6506 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6509 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6517 vcpu, ops); in emulator_read_write()
6529 vcpu, ops); in emulator_read_write()
6533 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6536 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6538 vcpu->mmio_needed = 1; in emulator_read_write()
6539 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6541 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6542 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6543 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6544 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6546 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6587 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
6597 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
6615 if (kvm_vcpu_map(vcpu, gpa_to_gfn(gpa), &map)) in emulator_cmpxchg_emulated()
6637 kvm_vcpu_unmap(vcpu, &map, true); in emulator_cmpxchg_emulated()
6642 kvm_page_track_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
6652 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) in kernel_pio() argument
6656 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
6657 if (vcpu->arch.pio.in) in kernel_pio()
6658 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
6659 vcpu->arch.pio.size, pd); in kernel_pio()
6661 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, in kernel_pio()
6662 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
6666 pd += vcpu->arch.pio.size; in kernel_pio()
6671 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
6675 vcpu->arch.pio.port = port; in emulator_pio_in_out()
6676 vcpu->arch.pio.in = in; in emulator_pio_in_out()
6677 vcpu->arch.pio.count = count; in emulator_pio_in_out()
6678 vcpu->arch.pio.size = size; in emulator_pio_in_out()
6680 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
6681 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
6685 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
6686 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
6687 vcpu->run->io.size = size; in emulator_pio_in_out()
6688 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
6689 vcpu->run->io.count = count; in emulator_pio_in_out()
6690 vcpu->run->io.port = port; in emulator_pio_in_out()
6695 static int emulator_pio_in(struct kvm_vcpu *vcpu, int size, in emulator_pio_in() argument
6700 if (vcpu->arch.pio.count) in emulator_pio_in()
6703 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in()
6705 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in()
6708 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in()
6709 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in()
6710 vcpu->arch.pio.count = 0; in emulator_pio_in()
6725 static int emulator_pio_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_out() argument
6729 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
6730 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
6731 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out()
6741 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
6743 return kvm_x86_ops.get_segment_base(vcpu, seg); in get_segment_base()
6751 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
6753 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
6759 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6760 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
6763 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6769 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
6771 kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
6772 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
6803 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
6808 value = kvm_read_cr0(vcpu); in emulator_get_cr()
6811 value = vcpu->arch.cr2; in emulator_get_cr()
6814 value = kvm_read_cr3(vcpu); in emulator_get_cr()
6817 value = kvm_read_cr4(vcpu); in emulator_get_cr()
6820 value = kvm_get_cr8(vcpu); in emulator_get_cr()
6832 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
6837 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
6840 vcpu->arch.cr2 = val; in emulator_set_cr()
6843 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
6846 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
6849 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
6930 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
6952 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
6959 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_msr() local
6962 r = kvm_get_msr(vcpu, msr_index, pdata); in emulator_get_msr()
6964 if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) { in emulator_get_msr()
6975 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_msr() local
6978 r = kvm_set_msr(vcpu, msr_index, data); in emulator_set_msr()
6980 if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) { in emulator_set_msr()
6990 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_smbase() local
6992 return vcpu->arch.smbase; in emulator_get_smbase()
6997 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_smbase() local
6999 vcpu->arch.smbase = smbase; in emulator_set_smbase()
7076 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_hflags() local
7078 vcpu->arch.hflags = emul_flags; in emulator_set_hflags()
7079 kvm_mmu_reset_context(vcpu); in emulator_set_hflags()
7146 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
7148 u32 int_shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in toggle_interruptibility()
7159 kvm_x86_ops.set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
7161 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
7165 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
7167 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
7169 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
7172 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
7175 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
7179 static struct x86_emulate_ctxt *alloc_emulate_ctxt(struct kvm_vcpu *vcpu) in alloc_emulate_ctxt() argument
7185 pr_err("kvm: failed to allocate vcpu's emulator\n"); in alloc_emulate_ctxt()
7189 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
7191 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
7196 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
7198 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
7201 kvm_x86_ops.get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
7204 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
7207 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
7208 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
7210 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
7223 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
7226 void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
7228 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
7231 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
7239 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_inject_realmode_interrupt()
7242 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
7243 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
7248 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) in handle_emulation_failure() argument
7250 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7251 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
7254 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in handle_emulation_failure()
7259 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7260 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7261 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7265 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
7267 if (!is_guest_mode(vcpu) && kvm_x86_ops.get_cpl(vcpu) == 0) { in handle_emulation_failure()
7268 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7269 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7270 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7277 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in reexecute_instruction() argument
7287 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in reexecute_instruction()
7291 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7296 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in reexecute_instruction()
7312 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7324 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7327 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7328 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7329 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7332 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7342 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7355 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
7358 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7359 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7374 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7379 if (WARN_ON_ONCE(is_guest_mode(vcpu)) || in retry_instruction()
7389 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7390 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7392 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7393 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in retry_instruction()
7395 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7400 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
7401 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
7403 static void kvm_smm_changed(struct kvm_vcpu *vcpu) in kvm_smm_changed() argument
7405 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
7407 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
7410 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
7413 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
7431 static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu) in kvm_vcpu_do_singlestep() argument
7433 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7435 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7437 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7442 kvm_queue_exception_p(vcpu, DB_VECTOR, DR6_BS); in kvm_vcpu_do_singlestep()
7446 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) in kvm_skip_emulated_instruction() argument
7448 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_skip_emulated_instruction()
7451 r = kvm_x86_ops.skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
7464 r = kvm_vcpu_do_singlestep(vcpu); in kvm_skip_emulated_instruction()
7469 static bool kvm_vcpu_check_code_breakpoint(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_check_code_breakpoint() argument
7471 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
7472 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
7473 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
7474 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
7476 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
7477 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
7489 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
7490 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { in kvm_vcpu_check_code_breakpoint()
7491 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_code_breakpoint()
7493 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
7494 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
7497 kvm_queue_exception_p(vcpu, DB_VECTOR, dr6); in kvm_vcpu_check_code_breakpoint()
7547 int x86_decode_emulated_instruction(struct kvm_vcpu *vcpu, int emulation_type, in x86_decode_emulated_instruction() argument
7550 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
7553 init_emulate_ctxt(vcpu); in x86_decode_emulated_instruction()
7559 trace_kvm_emulate_insn_start(vcpu); in x86_decode_emulated_instruction()
7560 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
7566 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in x86_emulate_instruction() argument
7570 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
7574 if (unlikely(!kvm_x86_ops.can_emulate_instruction(vcpu, insn, insn_len))) in x86_emulate_instruction()
7577 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
7583 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
7584 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
7587 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
7595 kvm_vcpu_check_code_breakpoint(vcpu, &r)) in x86_emulate_instruction()
7598 r = x86_decode_emulated_instruction(vcpu, emulation_type, in x86_emulate_instruction()
7603 kvm_queue_exception(vcpu, UD_VECTOR); in x86_emulate_instruction()
7606 if (reexecute_instruction(vcpu, cr2_or_gpa, in x86_emulate_instruction()
7619 inject_emulated_exception(vcpu); in x86_emulate_instruction()
7622 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
7628 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in x86_emulate_instruction()
7638 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
7640 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
7649 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
7650 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
7660 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
7675 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, in x86_emulate_instruction()
7679 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
7684 if (inject_emulated_exception(vcpu)) in x86_emulate_instruction()
7686 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
7687 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
7689 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
7692 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
7695 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
7696 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
7698 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
7701 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
7708 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu); in x86_emulate_instruction()
7709 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
7710 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
7719 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
7720 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
7721 r = kvm_vcpu_do_singlestep(vcpu); in x86_emulate_instruction()
7723 kvm_x86_ops.update_emulated_instruction(vcpu); in x86_emulate_instruction()
7724 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
7734 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
7736 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
7741 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) in kvm_emulate_instruction() argument
7743 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
7747 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, in kvm_emulate_instruction_from_buffer() argument
7750 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
7754 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) in complete_fast_pio_out_port_0x7e() argument
7756 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
7760 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) in complete_fast_pio_out() argument
7762 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
7764 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
7767 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_out()
7770 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_out() argument
7773 unsigned long val = kvm_rax_read(vcpu); in kvm_fast_pio_out()
7774 int ret = emulator_pio_out(vcpu, size, port, &val, 1); in kvm_fast_pio_out()
7784 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
7785 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
7787 kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio_out()
7789 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
7790 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
7795 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) in complete_fast_pio_in() argument
7800 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
7802 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
7803 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
7808 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
7811 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform in complete_fast_pio_in()
7814 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
7815 kvm_rax_write(vcpu, val); in complete_fast_pio_in()
7817 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_in()
7820 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_in() argument
7827 val = (size < 4) ? kvm_rax_read(vcpu) : 0; in kvm_fast_pio_in()
7829 ret = emulator_pio_in(vcpu, size, port, &val, 1); in kvm_fast_pio_in()
7831 kvm_rax_write(vcpu, val); in kvm_fast_pio_in()
7835 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
7836 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
7841 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) in kvm_fast_pio() argument
7846 ret = kvm_fast_pio_in(vcpu, size, port); in kvm_fast_pio()
7848 ret = kvm_fast_pio_out(vcpu, size, port); in kvm_fast_pio()
7849 return ret && kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio()
7877 struct kvm_vcpu *vcpu; in kvm_hyperv_tsc_notifier() local
7898 kvm_for_each_vcpu(cpu, vcpu, kvm) in kvm_hyperv_tsc_notifier()
7899 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_hyperv_tsc_notifier()
7901 kvm_for_each_vcpu(cpu, vcpu, kvm) in kvm_hyperv_tsc_notifier()
7902 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_hyperv_tsc_notifier()
7913 struct kvm_vcpu *vcpu; in __kvmclock_cpufreq_notifier() local
7927 * the TSC for each VCPU. We must flag these local variables in __kvmclock_cpufreq_notifier()
7946 * anytime after the setting of the VCPU's request bit, the in __kvmclock_cpufreq_notifier()
7959 kvm_for_each_vcpu(i, vcpu, kvm) { in __kvmclock_cpufreq_notifier()
7960 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
7962 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in __kvmclock_cpufreq_notifier()
7963 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
8069 struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu); in kvm_handle_intel_pt_intr() local
8071 kvm_make_request(KVM_REQ_PMI, vcpu); in kvm_handle_intel_pt_intr()
8073 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
8088 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
8093 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
8094 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
8165 * vCPU's FPU state as a fxregs_state struct. in kvm_arch_init()
8260 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
8262 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
8263 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_halt()
8264 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
8267 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
8273 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
8275 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_halt()
8280 return kvm_vcpu_halt(vcpu) && ret; in kvm_emulate_halt()
8285 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, in kvm_pv_clock_pairing() argument
8301 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); in kvm_pv_clock_pairing()
8306 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8315 * kvm_pv_kick_cpu_op: Kick a vcpu.
8317 * @apicid - apicid of vcpu to be kicked.
8361 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8369 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
8374 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8375 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
8377 nr = kvm_rax_read(vcpu); in kvm_emulate_hypercall()
8378 a0 = kvm_rbx_read(vcpu); in kvm_emulate_hypercall()
8379 a1 = kvm_rcx_read(vcpu); in kvm_emulate_hypercall()
8380 a2 = kvm_rdx_read(vcpu); in kvm_emulate_hypercall()
8381 a3 = kvm_rsi_read(vcpu); in kvm_emulate_hypercall()
8385 op_64_bit = is_64_bit_mode(vcpu); in kvm_emulate_hypercall()
8394 if (kvm_x86_ops.get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
8406 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_UNHALT)) in kvm_emulate_hypercall()
8409 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8410 kvm_sched_yield(vcpu->kvm, a1); in kvm_emulate_hypercall()
8415 ret = kvm_pv_clock_pairing(vcpu, a0, a1); in kvm_emulate_hypercall()
8419 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SEND_IPI)) in kvm_emulate_hypercall()
8422 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8425 if (!guest_pv_has(vcpu, KVM_FEATURE_PV_SCHED_YIELD)) in kvm_emulate_hypercall()
8428 kvm_sched_yield(vcpu->kvm, a0); in kvm_emulate_hypercall()
8438 kvm_rax_write(vcpu, ret); in kvm_emulate_hypercall()
8440 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8441 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
8447 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
8449 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
8451 kvm_x86_ops.patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
8457 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
8459 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8460 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
8463 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
8465 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
8467 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
8468 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
8469 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
8470 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
8472 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
8473 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
8476 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
8483 if (!lapic_in_kernel(vcpu)) in update_cr8_intercept()
8486 if (vcpu->arch.apicv_active) in update_cr8_intercept()
8489 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
8490 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
8497 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
8499 kvm_x86_ops.update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
8502 static void kvm_inject_exception(struct kvm_vcpu *vcpu) in kvm_inject_exception() argument
8504 trace_kvm_inj_exception(vcpu->arch.exception.nr, in kvm_inject_exception()
8505 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
8506 vcpu->arch.exception.error_code, in kvm_inject_exception()
8507 vcpu->arch.exception.injected); in kvm_inject_exception()
8509 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
8510 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
8511 kvm_x86_ops.queue_exception(vcpu); in kvm_inject_exception()
8514 static void inject_pending_event(struct kvm_vcpu *vcpu, bool *req_immediate_exit) in inject_pending_event() argument
8521 if (vcpu->arch.exception.injected) { in inject_pending_event()
8522 kvm_inject_exception(vcpu); in inject_pending_event()
8539 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
8540 if (vcpu->arch.nmi_injected) { in inject_pending_event()
8541 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8543 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
8544 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8549 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
8550 vcpu->arch.exception.pending); in inject_pending_event()
8558 if (is_guest_mode(vcpu)) { in inject_pending_event()
8559 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8565 if (vcpu->arch.exception.pending) { in inject_pending_event()
8576 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
8577 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in inject_pending_event()
8580 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
8581 kvm_deliver_exception_payload(vcpu); in inject_pending_event()
8582 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
8583 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
8584 kvm_update_dr7(vcpu); in inject_pending_event()
8588 kvm_inject_exception(vcpu); in inject_pending_event()
8590 vcpu->arch.exception.pending = false; in inject_pending_event()
8591 vcpu->arch.exception.injected = true; in inject_pending_event()
8607 if (vcpu->arch.smi_pending) { in inject_pending_event()
8608 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8612 vcpu->arch.smi_pending = false; in inject_pending_event()
8613 ++vcpu->arch.smi_count; in inject_pending_event()
8614 enter_smm(vcpu); in inject_pending_event()
8617 kvm_x86_ops.enable_smi_window(vcpu); in inject_pending_event()
8620 if (vcpu->arch.nmi_pending) { in inject_pending_event()
8621 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8625 --vcpu->arch.nmi_pending; in inject_pending_event()
8626 vcpu->arch.nmi_injected = true; in inject_pending_event()
8627 kvm_x86_ops.set_nmi(vcpu); in inject_pending_event()
8629 WARN_ON(kvm_x86_ops.nmi_allowed(vcpu, true) < 0); in inject_pending_event()
8631 if (vcpu->arch.nmi_pending) in inject_pending_event()
8632 kvm_x86_ops.enable_nmi_window(vcpu); in inject_pending_event()
8635 if (kvm_cpu_has_injectable_intr(vcpu)) { in inject_pending_event()
8636 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8640 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), false); in inject_pending_event()
8641 kvm_x86_ops.set_irq(vcpu); in inject_pending_event()
8642 WARN_ON(kvm_x86_ops.interrupt_allowed(vcpu, true) < 0); in inject_pending_event()
8644 if (kvm_cpu_has_injectable_intr(vcpu)) in inject_pending_event()
8645 kvm_x86_ops.enable_irq_window(vcpu); in inject_pending_event()
8648 if (is_guest_mode(vcpu) && in inject_pending_event()
8650 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8653 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
8661 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
8670 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8673 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
8674 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
8675 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
8692 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_32() argument
8697 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_32()
8711 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_64() argument
8717 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_64()
8728 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_32() argument
8735 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); in enter_smm_save_state_32()
8736 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); in enter_smm_save_state_32()
8737 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); in enter_smm_save_state_32()
8738 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); in enter_smm_save_state_32()
8741 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); in enter_smm_save_state_32()
8743 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_32()
8745 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_32()
8748 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_32()
8754 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_32()
8760 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_32()
8764 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_32()
8769 enter_smm_save_seg_32(vcpu, buf, i); in enter_smm_save_state_32()
8771 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); in enter_smm_save_state_32()
8775 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
8779 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_64() argument
8787 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
8789 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); in enter_smm_save_state_64()
8790 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); in enter_smm_save_state_64()
8792 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_64()
8794 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_64()
8797 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); in enter_smm_save_state_64()
8798 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); in enter_smm_save_state_64()
8799 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); in enter_smm_save_state_64()
8801 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
8806 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
8808 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_64()
8814 kvm_x86_ops.get_idt(vcpu, &dt); in enter_smm_save_state_64()
8818 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_64()
8824 kvm_x86_ops.get_gdt(vcpu, &dt); in enter_smm_save_state_64()
8829 enter_smm_save_seg_64(vcpu, buf, i); in enter_smm_save_state_64()
8833 static void enter_smm(struct kvm_vcpu *vcpu) in enter_smm() argument
8840 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
8843 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
8844 enter_smm_save_state_64(vcpu, buf); in enter_smm()
8847 enter_smm_save_state_32(vcpu, buf); in enter_smm()
8851 * vCPU state (e.g. leave guest mode) after we've saved the state into in enter_smm()
8854 kvm_x86_ops.pre_enter_smm(vcpu, buf); in enter_smm()
8856 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
8857 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
8859 if (kvm_x86_ops.get_nmi_mask(vcpu)) in enter_smm()
8860 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
8862 kvm_x86_ops.set_nmi_mask(vcpu, true); in enter_smm()
8864 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in enter_smm()
8865 kvm_rip_write(vcpu, 0x8000); in enter_smm()
8867 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
8868 kvm_x86_ops.set_cr0(vcpu, cr0); in enter_smm()
8869 vcpu->arch.cr0 = cr0; in enter_smm()
8871 kvm_x86_ops.set_cr4(vcpu, 0); in enter_smm()
8875 kvm_x86_ops.set_idt(vcpu, &dt); in enter_smm()
8877 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); in enter_smm()
8879 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
8880 cs.base = vcpu->arch.smbase; in enter_smm()
8897 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in enter_smm()
8898 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in enter_smm()
8899 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in enter_smm()
8900 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in enter_smm()
8901 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in enter_smm()
8902 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in enter_smm()
8905 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
8906 kvm_x86_ops.set_efer(vcpu, 0); in enter_smm()
8909 kvm_update_cpuid_runtime(vcpu); in enter_smm()
8910 kvm_mmu_reset_context(vcpu); in enter_smm()
8913 static void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
8915 vcpu->arch.smi_pending = true; in process_smi()
8916 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
8937 void kvm_vcpu_update_apicv(struct kvm_vcpu *vcpu) in kvm_vcpu_update_apicv() argument
8939 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_update_apicv()
8942 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
8943 kvm_apic_update_apicv(vcpu); in kvm_vcpu_update_apicv()
8944 kvm_x86_ops.refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_update_apicv()
8985 * while update the calling vcpu immediately instead of in kvm_request_apicv_update()
8996 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
8998 if (!kvm_apic_present(vcpu)) in vcpu_scan_ioapic()
9001 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
9003 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
9004 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
9006 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
9007 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
9008 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
9009 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
9012 if (is_guest_mode(vcpu)) in vcpu_scan_ioapic()
9013 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
9015 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in vcpu_scan_ioapic()
9018 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vcpu_load_eoi_exitmap() argument
9022 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
9025 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
9026 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
9027 kvm_x86_ops.load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
9050 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
9052 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
9058 kvm_x86_ops.set_apic_access_page_addr(vcpu); in kvm_vcpu_reload_apic_access_page()
9061 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) in __kvm_request_immediate_exit() argument
9063 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
9072 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
9076 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
9077 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
9082 if (kvm_request_pending(vcpu)) { in vcpu_enter_guest()
9083 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) { in vcpu_enter_guest()
9084 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
9089 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) in vcpu_enter_guest()
9090 kvm_mmu_unload(vcpu); in vcpu_enter_guest()
9091 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
9092 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
9093 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
9094 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
9095 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
9096 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
9097 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
9098 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
9102 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
9103 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
9104 if (kvm_check_request(KVM_REQ_LOAD_MMU_PGD, vcpu)) in vcpu_enter_guest()
9105 kvm_mmu_load_pgd(vcpu); in vcpu_enter_guest()
9106 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { in vcpu_enter_guest()
9107 kvm_vcpu_flush_tlb_all(vcpu); in vcpu_enter_guest()
9110 kvm_clear_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in vcpu_enter_guest()
9112 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu)) in vcpu_enter_guest()
9113 kvm_vcpu_flush_tlb_current(vcpu); in vcpu_enter_guest()
9114 if (kvm_check_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu)) in vcpu_enter_guest()
9115 kvm_vcpu_flush_tlb_guest(vcpu); in vcpu_enter_guest()
9117 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
9118 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
9122 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
9123 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
9124 vcpu->mmio_needed = 0; in vcpu_enter_guest()
9128 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
9130 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
9134 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
9135 record_steal_time(vcpu); in vcpu_enter_guest()
9136 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
9137 process_smi(vcpu); in vcpu_enter_guest()
9138 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
9139 process_nmi(vcpu); in vcpu_enter_guest()
9140 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
9141 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
9142 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
9143 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
9144 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
9145 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
9146 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
9147 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
9148 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
9149 vcpu->run->eoi.vector = in vcpu_enter_guest()
9150 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
9155 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
9156 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
9157 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) in vcpu_enter_guest()
9158 vcpu_load_eoi_exitmap(vcpu); in vcpu_enter_guest()
9159 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
9160 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
9161 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
9162 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9163 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
9167 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
9168 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9169 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
9173 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { in vcpu_enter_guest()
9174 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
9175 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
9185 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) in vcpu_enter_guest()
9186 kvm_hv_process_stimers(vcpu); in vcpu_enter_guest()
9187 if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu)) in vcpu_enter_guest()
9188 kvm_vcpu_update_apicv(vcpu); in vcpu_enter_guest()
9189 if (kvm_check_request(KVM_REQ_APF_READY, vcpu)) in vcpu_enter_guest()
9190 kvm_check_async_pf_completion(vcpu); in vcpu_enter_guest()
9191 if (kvm_check_request(KVM_REQ_MSR_FILTER_CHANGED, vcpu)) in vcpu_enter_guest()
9192 kvm_x86_ops.msr_filter_changed(vcpu); in vcpu_enter_guest()
9195 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { in vcpu_enter_guest()
9196 ++vcpu->stat.req_event; in vcpu_enter_guest()
9197 kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
9198 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
9203 inject_pending_event(vcpu, &req_immediate_exit); in vcpu_enter_guest()
9205 kvm_x86_ops.enable_irq_window(vcpu); in vcpu_enter_guest()
9207 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
9208 update_cr8_intercept(vcpu); in vcpu_enter_guest()
9209 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
9213 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
9220 kvm_x86_ops.prepare_guest_switch(vcpu); in vcpu_enter_guest()
9228 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
9230 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
9241 * tables done while the VCPU is running. Please see the comment in vcpu_enter_guest()
9250 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
9251 kvm_x86_ops.sync_pir_to_irr(vcpu); in vcpu_enter_guest()
9253 if (kvm_vcpu_exit_request(vcpu)) { in vcpu_enter_guest()
9254 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9258 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9264 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
9265 kvm_x86_ops.request_immediate_exit(vcpu); in vcpu_enter_guest()
9268 trace_kvm_entry(vcpu); in vcpu_enter_guest()
9274 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
9276 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
9277 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
9278 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
9279 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
9280 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
9281 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9286 exit_fastpath = kvm_x86_ops.run(vcpu); in vcpu_enter_guest()
9294 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
9295 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
9296 kvm_x86_ops.sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
9297 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
9298 kvm_update_dr7(vcpu); in vcpu_enter_guest()
9299 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9312 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
9313 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
9315 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9318 kvm_x86_ops.handle_exit_irqoff(vcpu); in vcpu_enter_guest()
9327 kvm_before_interrupt(vcpu); in vcpu_enter_guest()
9329 ++vcpu->stat.exits; in vcpu_enter_guest()
9331 kvm_after_interrupt(vcpu); in vcpu_enter_guest()
9342 if (lapic_in_kernel(vcpu)) { in vcpu_enter_guest()
9343 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9345 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9346 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9353 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9359 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
9363 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9364 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
9366 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9367 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
9369 r = kvm_x86_ops.handle_exit(vcpu, exit_fastpath); in vcpu_enter_guest()
9374 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
9375 kvm_x86_ops.cancel_injection(vcpu); in vcpu_enter_guest()
9376 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9377 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
9382 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
9384 if (!kvm_arch_vcpu_runnable(vcpu) && in vcpu_block()
9385 (!kvm_x86_ops.pre_block || kvm_x86_ops.pre_block(vcpu) == 0)) { in vcpu_block()
9386 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9387 kvm_vcpu_block(vcpu); in vcpu_block()
9388 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
9391 kvm_x86_ops.post_block(vcpu); in vcpu_block()
9393 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) in vcpu_block()
9397 kvm_apic_accept_events(vcpu); in vcpu_block()
9398 switch(vcpu->arch.mp_state) { in vcpu_block()
9400 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
9401 vcpu->arch.mp_state = in vcpu_block()
9405 vcpu->arch.apf.halted = false; in vcpu_block()
9415 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
9417 if (is_guest_mode(vcpu)) in kvm_vcpu_running()
9418 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9420 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
9421 !vcpu->arch.apf.halted); in kvm_vcpu_running()
9424 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
9427 struct kvm *kvm = vcpu->kvm; in vcpu_run()
9429 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9430 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
9434 * If another guest vCPU requests a PV TLB flush in the middle in vcpu_run()
9439 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
9440 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
9441 r = vcpu_enter_guest(vcpu); in vcpu_run()
9443 r = vcpu_block(kvm, vcpu); in vcpu_run()
9449 kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu); in vcpu_run()
9450 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
9451 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
9453 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
9454 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
9456 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
9457 ++vcpu->stat.request_irq_exits; in vcpu_run()
9462 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9463 r = xfer_to_guest_mode_handle_work(vcpu); in vcpu_run()
9466 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9470 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9475 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
9479 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
9480 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
9481 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
9485 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
9487 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
9489 return complete_emulated_io(vcpu); in complete_emulated_pio()
9510 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
9512 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
9516 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
9519 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
9521 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
9527 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
9535 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
9536 vcpu->mmio_needed = 0; in complete_emulated_mmio()
9539 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9541 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
9542 return complete_emulated_io(vcpu); in complete_emulated_mmio()
9547 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9550 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
9551 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
9569 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
9573 kvm_save_current_fpu(vcpu->arch.user_fpu); in kvm_load_guest_fpu()
9576 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, in kvm_load_guest_fpu()
9586 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
9590 kvm_save_current_fpu(vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
9592 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); in kvm_put_guest_fpu()
9597 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
9601 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_ioctl_run() argument
9603 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
9606 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
9607 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
9608 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
9610 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
9615 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
9616 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_run()
9617 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvm_arch_vcpu_ioctl_run()
9622 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
9633 r = sync_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
9639 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
9640 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
9646 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
9647 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
9648 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
9649 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
9653 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
9658 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
9661 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
9663 store_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
9664 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
9665 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
9667 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
9671 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __get_regs() argument
9673 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
9677 * back from emulation context to vcpu. Userspace shouldn't do in __get_regs()
9681 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
9682 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
9684 regs->rax = kvm_rax_read(vcpu); in __get_regs()
9685 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
9686 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
9687 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
9688 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
9689 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
9690 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
9691 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
9693 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
9694 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
9695 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
9696 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
9697 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
9698 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
9699 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
9700 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
9703 regs->rip = kvm_rip_read(vcpu); in __get_regs()
9704 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
9707 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
9709 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
9710 __get_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_get_regs()
9711 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
9715 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __set_regs() argument
9717 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
9718 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
9720 kvm_rax_write(vcpu, regs->rax); in __set_regs()
9721 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
9722 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
9723 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
9724 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
9725 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
9726 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
9727 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
9729 kvm_r8_write(vcpu, regs->r8); in __set_regs()
9730 kvm_r9_write(vcpu, regs->r9); in __set_regs()
9731 kvm_r10_write(vcpu, regs->r10); in __set_regs()
9732 kvm_r11_write(vcpu, regs->r11); in __set_regs()
9733 kvm_r12_write(vcpu, regs->r12); in __set_regs()
9734 kvm_r13_write(vcpu, regs->r13); in __set_regs()
9735 kvm_r14_write(vcpu, regs->r14); in __set_regs()
9736 kvm_r15_write(vcpu, regs->r15); in __set_regs()
9739 kvm_rip_write(vcpu, regs->rip); in __set_regs()
9740 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
9742 vcpu->arch.exception.pending = false; in __set_regs()
9744 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_regs()
9747 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
9749 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
9750 __set_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_set_regs()
9751 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
9755 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in kvm_get_cs_db_l_bits() argument
9759 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_get_cs_db_l_bits()
9765 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
9769 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
9770 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
9771 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
9772 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
9773 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
9774 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
9776 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
9777 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
9779 kvm_x86_ops.get_idt(vcpu, &dt); in __get_sregs()
9782 kvm_x86_ops.get_gdt(vcpu, &dt); in __get_sregs()
9786 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
9787 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
9788 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
9789 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
9790 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
9791 sregs->efer = vcpu->arch.efer; in __get_sregs()
9792 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
9796 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
9797 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
9801 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
9804 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
9805 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
9806 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
9810 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
9813 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
9815 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
9817 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
9818 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
9819 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
9822 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
9825 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
9826 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
9830 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
9835 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
9837 if (!lapic_in_kernel(vcpu) && in kvm_arch_vcpu_ioctl_set_mpstate()
9846 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
9852 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
9853 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
9855 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
9856 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
9860 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
9864 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
9867 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
9870 init_emulate_ctxt(vcpu); in kvm_task_switch()
9875 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
9876 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
9877 vcpu->run->internal.ndata = 0; in kvm_task_switch()
9881 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
9882 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
9887 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_valid_sregs() argument
9898 if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) in kvm_valid_sregs()
9909 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
9912 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
9921 if (kvm_valid_sregs(vcpu, sregs)) in __set_sregs()
9926 if (kvm_set_apic_base(vcpu, &apic_base_msr)) in __set_sregs()
9931 kvm_x86_ops.set_idt(vcpu, &dt); in __set_sregs()
9934 kvm_x86_ops.set_gdt(vcpu, &dt); in __set_sregs()
9936 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
9937 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
9938 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
9939 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3); in __set_sregs()
9941 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
9943 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
9944 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9946 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
9947 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9948 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
9950 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
9951 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
9953 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9955 kvm_update_cpuid_runtime(vcpu); in __set_sregs()
9957 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
9958 if (is_pae_paging(vcpu)) { in __set_sregs()
9959 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
9962 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
9965 kvm_mmu_reset_context(vcpu); in __set_sregs()
9971 kvm_queue_interrupt(vcpu, pending_vec, false); in __set_sregs()
9975 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
9976 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
9977 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
9978 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
9979 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
9980 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
9982 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
9983 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
9985 update_cr8_intercept(vcpu); in __set_sregs()
9987 /* Older userspace won't unhalt the vcpu on reset. */ in __set_sregs()
9988 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs()
9990 !is_protmode(vcpu)) in __set_sregs()
9991 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
9993 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_sregs()
10000 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
10005 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
10006 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
10007 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
10011 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
10017 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10021 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
10024 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
10026 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
10033 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10035 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
10036 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
10037 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
10039 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
10041 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10042 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10045 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
10047 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10049 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
10050 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
10051 get_segment_base(vcpu, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_guest_debug()
10057 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
10059 kvm_x86_ops.update_exception_bitmap(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10064 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
10071 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
10078 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_translate()
10080 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
10081 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
10082 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
10088 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_translate()
10092 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
10096 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
10098 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
10108 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
10112 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
10116 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
10118 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
10129 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
10133 static void store_regs(struct kvm_vcpu *vcpu) in store_regs() argument
10137 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
10138 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
10140 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
10141 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
10143 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
10145 vcpu, &vcpu->run->s.regs.events); in store_regs()
10148 static int sync_regs(struct kvm_vcpu *vcpu) in sync_regs() argument
10150 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
10153 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
10154 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
10155 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
10157 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
10158 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
10160 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
10162 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
10164 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
10166 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
10172 static void fx_init(struct kvm_vcpu *vcpu) in fx_init() argument
10174 fpstate_init(&vcpu->arch.guest_fpu->state); in fx_init()
10176 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = in fx_init()
10182 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
10184 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
10196 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_create() argument
10201 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
10202 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
10204 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
10206 kvm_set_tsc_khz(vcpu, max_tsc_khz); in kvm_arch_vcpu_create()
10208 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_create()
10212 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
10213 r = kvm_create_lapic(vcpu, lapic_timer_advance_ns); in kvm_arch_vcpu_create()
10216 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
10217 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
10226 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
10228 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
10230 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
10232 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
10234 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
10238 if (!alloc_emulate_ctxt(vcpu)) in kvm_arch_vcpu_create()
10241 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10243 if (!vcpu->arch.user_fpu) { in kvm_arch_vcpu_create()
10248 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10250 if (!vcpu->arch.guest_fpu) { in kvm_arch_vcpu_create()
10251 pr_err("kvm: failed to allocate vcpu's fpu\n"); in kvm_arch_vcpu_create()
10254 fx_init(vcpu); in kvm_arch_vcpu_create()
10256 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
10257 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); in kvm_arch_vcpu_create()
10259 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
10261 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_create()
10262 kvm_pmu_init(vcpu); in kvm_arch_vcpu_create()
10264 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
10265 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
10267 kvm_hv_vcpu_init(vcpu); in kvm_arch_vcpu_create()
10269 r = kvm_x86_ops.vcpu_create(vcpu); in kvm_arch_vcpu_create()
10273 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
10274 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
10275 kvm_vcpu_mtrr_init(vcpu); in kvm_arch_vcpu_create()
10276 vcpu_load(vcpu); in kvm_arch_vcpu_create()
10277 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_create()
10278 kvm_init_mmu(vcpu, false); in kvm_arch_vcpu_create()
10279 vcpu_put(vcpu); in kvm_arch_vcpu_create()
10283 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
10285 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_create()
10287 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
10289 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
10291 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
10293 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
10295 kvm_free_lapic(vcpu); in kvm_arch_vcpu_create()
10297 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_create()
10301 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
10303 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
10305 kvm_hv_vcpu_postcreate(vcpu); in kvm_arch_vcpu_postcreate()
10307 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
10309 vcpu_load(vcpu); in kvm_arch_vcpu_postcreate()
10310 kvm_synchronize_tsc(vcpu, 0); in kvm_arch_vcpu_postcreate()
10311 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
10314 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
10316 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
10318 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
10323 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
10327 kvmclock_reset(vcpu); in kvm_arch_vcpu_destroy()
10329 kvm_x86_ops.vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
10331 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
10332 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
10333 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_destroy()
10334 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
10336 kvm_hv_vcpu_uninit(vcpu); in kvm_arch_vcpu_destroy()
10337 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
10338 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
10339 kvm_free_lapic(vcpu); in kvm_arch_vcpu_destroy()
10340 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
10341 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_destroy()
10342 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
10343 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
10344 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
10345 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_destroy()
10349 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
10351 kvm_lapic_reset(vcpu, init_event); in kvm_vcpu_reset()
10353 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
10355 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
10356 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
10357 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
10358 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
10359 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
10360 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
10361 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
10363 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
10364 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
10365 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
10366 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
10367 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
10369 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
10371 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
10372 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
10373 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
10374 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
10376 kvmclock_reset(vcpu); in kvm_vcpu_reset()
10378 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
10379 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
10380 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
10390 kvm_put_guest_fpu(vcpu); in kvm_vcpu_reset()
10391 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10395 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10400 kvm_load_guest_fpu(vcpu); in kvm_vcpu_reset()
10404 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
10405 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
10407 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
10409 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
10412 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
10413 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
10414 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
10416 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
10418 kvm_x86_ops.vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
10421 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
10425 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
10428 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
10429 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
10435 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
10450 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
10451 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
10452 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
10453 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
10455 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
10456 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
10473 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, in kvm_arch_hardware_enable()
10475 * adjustments, in case multiple suspend cycles happen before some VCPU in kvm_arch_hardware_enable()
10503 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
10504 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
10505 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
10506 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
10589 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
10591 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
10595 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
10597 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
10603 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
10605 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); in kvm_arch_sched_in()
10607 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
10610 kvm_make_request(KVM_REQ_PMU, vcpu); in kvm_arch_sched_in()
10612 kvm_x86_ops.sched_in(vcpu, cpu); in kvm_arch_sched_in()
10669 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
10671 vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
10672 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
10673 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
10679 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
10684 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
10685 kvm_clear_async_pf_completion_queue(vcpu); in kvm_free_vcpus()
10686 kvm_unload_vcpu_mmu(vcpu); in kvm_free_vcpus()
10688 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
10689 kvm_vcpu_destroy(vcpu); in kvm_free_vcpus()
10882 struct kvm_vcpu *vcpu; in kvm_arch_memslots_updated() local
10892 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_memslots_updated()
10893 kvm_vcpu_kick(vcpu); in kvm_arch_memslots_updated()
11023 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) in kvm_guest_apic_has_interrupt() argument
11025 return (is_guest_mode(vcpu) && in kvm_guest_apic_has_interrupt()
11027 kvm_x86_ops.guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
11030 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
11032 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
11035 if (kvm_apic_has_events(vcpu)) in kvm_vcpu_has_events()
11038 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
11041 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
11044 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_vcpu_has_events()
11045 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
11046 kvm_x86_ops.nmi_allowed(vcpu, false))) in kvm_vcpu_has_events()
11049 if (kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_vcpu_has_events()
11050 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
11051 kvm_x86_ops.smi_allowed(vcpu, false))) in kvm_vcpu_has_events()
11054 if (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_has_events()
11055 (kvm_cpu_has_interrupt(vcpu) || in kvm_vcpu_has_events()
11056 kvm_guest_apic_has_interrupt(vcpu))) in kvm_vcpu_has_events()
11059 if (kvm_hv_has_stimer_pending(vcpu)) in kvm_vcpu_has_events()
11062 if (is_guest_mode(vcpu) && in kvm_vcpu_has_events()
11064 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
11070 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
11072 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
11075 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
11077 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11080 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_arch_dy_runnable()
11081 kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_arch_dy_runnable()
11082 kvm_test_request(KVM_REQ_EVENT, vcpu)) in kvm_arch_dy_runnable()
11085 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
11091 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
11093 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
11096 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
11098 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
11101 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
11103 return kvm_x86_ops.interrupt_allowed(vcpu, false); in kvm_arch_interrupt_allowed()
11106 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
11108 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
11109 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
11110 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
11111 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
11115 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
11117 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
11121 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
11125 rflags = kvm_x86_ops.get_rflags(vcpu); in kvm_get_rflags()
11126 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
11132 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
11134 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
11135 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
11137 kvm_x86_ops.set_rflags(vcpu, rflags); in __kvm_set_rflags()
11140 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
11142 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
11143 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
11147 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) in kvm_arch_async_page_ready() argument
11151 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
11155 r = kvm_mmu_reload(vcpu); in kvm_arch_async_page_ready()
11159 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
11160 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
11163 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
11178 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
11182 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
11185 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
11188 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
11194 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
11195 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
11201 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
11203 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
11206 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
11210 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
11212 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
11216 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
11219 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
11221 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
11228 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
11233 static inline int apf_put_user_notpresent(struct kvm_vcpu *vcpu) in apf_put_user_notpresent() argument
11237 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
11241 static inline int apf_put_user_ready(struct kvm_vcpu *vcpu, u32 token) in apf_put_user_ready() argument
11245 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
11249 static inline bool apf_pageready_slot_free(struct kvm_vcpu *vcpu) in apf_pageready_slot_free() argument
11254 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
11261 static bool kvm_can_deliver_async_pf(struct kvm_vcpu *vcpu) in kvm_can_deliver_async_pf() argument
11263 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
11266 if (!kvm_pv_async_pf_enabled(vcpu) || in kvm_can_deliver_async_pf()
11267 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11273 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu) in kvm_can_do_async_pf() argument
11275 if (unlikely(!lapic_in_kernel(vcpu) || in kvm_can_do_async_pf()
11276 kvm_event_needs_reinjection(vcpu) || in kvm_can_do_async_pf()
11277 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
11280 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
11287 return kvm_arch_interrupt_allowed(vcpu); in kvm_can_do_async_pf()
11290 bool kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
11296 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
11298 if (kvm_can_deliver_async_pf(vcpu) && in kvm_arch_async_page_not_present()
11299 !apf_put_user_notpresent(vcpu)) { in kvm_arch_async_page_not_present()
11306 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
11317 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
11322 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
11327 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
11333 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
11337 kvm_pv_async_pf_enabled(vcpu) && in kvm_arch_async_page_present()
11338 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
11339 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
11340 kvm_apic_set_irq(vcpu, &irq, NULL); in kvm_arch_async_page_present()
11343 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
11344 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
11347 void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu) in kvm_arch_async_page_present_queued() argument
11349 kvm_make_request(KVM_REQ_APF_READY, vcpu); in kvm_arch_async_page_present_queued()
11350 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
11351 kvm_vcpu_kick(vcpu); in kvm_arch_async_page_present_queued()
11354 bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_dequeue_async_page_present() argument
11356 if (!kvm_pv_async_pf_enabled(vcpu)) in kvm_arch_can_dequeue_async_page_present()
11359 return kvm_lapic_enabled(vcpu) && apf_pageready_slot_free(vcpu); in kvm_arch_can_dequeue_async_page_present()
11456 bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) in kvm_arch_no_poll() argument
11458 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
11489 void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code) in kvm_fixup_and_inject_pf_error() argument
11496 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
11498 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
11508 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
11517 int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r, in kvm_handle_memory_failure() argument
11521 kvm_inject_emulated_page_fault(vcpu, e); in kvm_handle_memory_failure()
11532 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_handle_memory_failure()
11533 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_handle_memory_failure()
11534 vcpu->run->internal.ndata = 0; in kvm_handle_memory_failure()
11540 int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) in kvm_handle_invpcid() argument
11552 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e); in kvm_handle_invpcid()
11554 return kvm_handle_memory_failure(vcpu, r, &e); in kvm_handle_invpcid()
11557 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
11561 pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); in kvm_handle_invpcid()
11566 is_noncanonical_address(operand.gla, vcpu)) { in kvm_handle_invpcid()
11567 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
11570 kvm_mmu_invpcid_gva(vcpu, operand.gla, operand.pcid); in kvm_handle_invpcid()
11571 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
11575 kvm_inject_gp(vcpu, 0); in kvm_handle_invpcid()
11579 if (kvm_get_active_pcid(vcpu) == operand.pcid) { in kvm_handle_invpcid()
11580 kvm_mmu_sync_roots(vcpu); in kvm_handle_invpcid()
11581 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu); in kvm_handle_invpcid()
11585 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd) in kvm_handle_invpcid()
11589 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_handle_invpcid()
11596 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()
11608 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu); in kvm_handle_invpcid()
11609 return kvm_skip_emulated_instruction(vcpu); in kvm_handle_invpcid()