Lines Matching full:vcpu
103 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
104 static void process_nmi(struct kvm_vcpu *vcpu);
105 static void enter_smm(struct kvm_vcpu *vcpu);
106 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
107 static void store_regs(struct kvm_vcpu *vcpu);
108 static int sync_regs(struct kvm_vcpu *vcpu);
221 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu) in kvm_async_pf_hash_reset() argument
225 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
321 u64 kvm_get_apic_base(struct kvm_vcpu *vcpu) in kvm_get_apic_base() argument
323 return vcpu->arch.apic_base; in kvm_get_apic_base()
327 enum lapic_mode kvm_get_apic_mode(struct kvm_vcpu *vcpu) in kvm_get_apic_mode() argument
329 return kvm_apic_mode(kvm_get_apic_base(vcpu)); in kvm_get_apic_mode()
333 int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_apic_base() argument
335 enum lapic_mode old_mode = kvm_get_apic_mode(vcpu); in kvm_set_apic_base()
337 u64 reserved_bits = ((~0ULL) << cpuid_maxphyaddr(vcpu)) | 0x2ff | in kvm_set_apic_base()
338 (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) ? 0 : X2APIC_ENABLE); in kvm_set_apic_base()
349 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
407 static void kvm_multiple_exception(struct kvm_vcpu *vcpu, in kvm_multiple_exception() argument
414 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_multiple_exception()
416 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
418 if (has_error && !is_protmode(vcpu)) in kvm_multiple_exception()
422 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
429 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
430 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
432 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
433 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
435 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
436 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
437 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
442 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
445 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_multiple_exception()
457 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
458 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
459 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
460 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
461 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
469 void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_queue_exception() argument
471 kvm_multiple_exception(vcpu, nr, false, 0, false); in kvm_queue_exception()
475 void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) in kvm_requeue_exception() argument
477 kvm_multiple_exception(vcpu, nr, false, 0, true); in kvm_requeue_exception()
481 int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err) in kvm_complete_insn_gp() argument
484 kvm_inject_gp(vcpu, 0); in kvm_complete_insn_gp()
486 return kvm_skip_emulated_instruction(vcpu); in kvm_complete_insn_gp()
492 void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_inject_page_fault() argument
494 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
495 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
496 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
497 if (vcpu->arch.exception.nested_apf) in kvm_inject_page_fault()
498 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
500 vcpu->arch.cr2 = fault->address; in kvm_inject_page_fault()
501 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
505 static bool kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault) in kvm_propagate_fault() argument
507 if (mmu_is_nested(vcpu) && !fault->nested_page_fault) in kvm_propagate_fault()
508 vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
510 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in kvm_propagate_fault()
515 void kvm_inject_nmi(struct kvm_vcpu *vcpu) in kvm_inject_nmi() argument
517 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
518 kvm_make_request(KVM_REQ_NMI, vcpu); in kvm_inject_nmi()
522 void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_queue_exception_e() argument
524 kvm_multiple_exception(vcpu, nr, true, error_code, false); in kvm_queue_exception_e()
528 void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code) in kvm_requeue_exception_e() argument
530 kvm_multiple_exception(vcpu, nr, true, error_code, true); in kvm_requeue_exception_e()
538 bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) in kvm_require_cpl() argument
540 if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) in kvm_require_cpl()
542 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in kvm_require_cpl()
547 bool kvm_require_dr(struct kvm_vcpu *vcpu, int dr) in kvm_require_dr() argument
549 if ((dr != 4 && dr != 5) || !kvm_read_cr4_bits(vcpu, X86_CR4_DE)) in kvm_require_dr()
552 kvm_queue_exception(vcpu, UD_VECTOR); in kvm_require_dr()
562 int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in kvm_read_guest_page_mmu() argument
571 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
577 return kvm_vcpu_read_guest_page(vcpu, real_gfn, data, offset, len); in kvm_read_guest_page_mmu()
581 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page() argument
584 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
588 static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu) in pdptr_rsvd_bits() argument
590 return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) | in pdptr_rsvd_bits()
597 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) in load_pdptrs() argument
605 ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte, in load_pdptrs()
614 (pdpte[i] & pdptr_rsvd_bits(vcpu))) { in load_pdptrs()
623 (unsigned long *)&vcpu->arch.regs_avail); in load_pdptrs()
625 (unsigned long *)&vcpu->arch.regs_dirty); in load_pdptrs()
632 bool pdptrs_changed(struct kvm_vcpu *vcpu) in pdptrs_changed() argument
634 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
640 if (!is_pae_paging(vcpu)) in pdptrs_changed()
644 (unsigned long *)&vcpu->arch.regs_avail)) in pdptrs_changed()
647 gfn = (kvm_read_cr3(vcpu) & 0xffffffe0ul) >> PAGE_SHIFT; in pdptrs_changed()
648 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
649 r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte), in pdptrs_changed()
653 changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
660 int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) in kvm_set_cr0() argument
662 unsigned long old_cr0 = kvm_read_cr0(vcpu); in kvm_set_cr0()
680 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { in kvm_set_cr0()
682 if ((vcpu->arch.efer & EFER_LME)) { in kvm_set_cr0()
685 if (!is_pae(vcpu)) in kvm_set_cr0()
687 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in kvm_set_cr0()
692 if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr0()
693 kvm_read_cr3(vcpu))) in kvm_set_cr0()
697 if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) in kvm_set_cr0()
700 kvm_x86_ops->set_cr0(vcpu, cr0); in kvm_set_cr0()
703 kvm_clear_async_pf_completion_queue(vcpu); in kvm_set_cr0()
704 kvm_async_pf_hash_reset(vcpu); in kvm_set_cr0()
708 kvm_mmu_reset_context(vcpu); in kvm_set_cr0()
711 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
712 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
713 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
719 void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) in kvm_lmsw() argument
721 (void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); in kvm_lmsw()
725 void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_load_guest_xcr0() argument
727 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && in kvm_load_guest_xcr0()
728 !vcpu->guest_xcr0_loaded) { in kvm_load_guest_xcr0()
730 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xcr0()
731 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xcr0()
732 vcpu->guest_xcr0_loaded = 1; in kvm_load_guest_xcr0()
737 void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) in kvm_put_guest_xcr0() argument
739 if (vcpu->guest_xcr0_loaded) { in kvm_put_guest_xcr0()
740 if (vcpu->arch.xcr0 != host_xcr0) in kvm_put_guest_xcr0()
742 vcpu->guest_xcr0_loaded = 0; in kvm_put_guest_xcr0()
747 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in __kvm_set_xcr() argument
750 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
766 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
780 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
783 kvm_update_cpuid(vcpu); in __kvm_set_xcr()
787 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) in kvm_set_xcr() argument
789 if (kvm_x86_ops->get_cpl(vcpu) != 0 || in kvm_set_xcr()
790 __kvm_set_xcr(vcpu, index, xcr)) { in kvm_set_xcr()
791 kvm_inject_gp(vcpu, 0); in kvm_set_xcr()
827 static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_valid_cr4() argument
832 if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) in kvm_valid_cr4()
835 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) in kvm_valid_cr4()
838 if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) in kvm_valid_cr4()
841 if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) in kvm_valid_cr4()
844 if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) in kvm_valid_cr4()
847 if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) in kvm_valid_cr4()
850 if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) in kvm_valid_cr4()
856 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) in kvm_set_cr4() argument
858 unsigned long old_cr4 = kvm_read_cr4(vcpu); in kvm_set_cr4()
863 if (kvm_valid_cr4(vcpu, cr4)) in kvm_set_cr4()
866 if (is_long_mode(vcpu)) { in kvm_set_cr4()
871 } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE) in kvm_set_cr4()
873 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
874 kvm_read_cr3(vcpu))) in kvm_set_cr4()
878 if (!guest_cpuid_has(vcpu, X86_FEATURE_PCID)) in kvm_set_cr4()
882 if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu)) in kvm_set_cr4()
886 if (kvm_x86_ops->set_cr4(vcpu, cr4)) in kvm_set_cr4()
891 kvm_mmu_reset_context(vcpu); in kvm_set_cr4()
894 kvm_update_cpuid(vcpu); in kvm_set_cr4()
900 int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) in kvm_set_cr3() argument
904 bool pcid_enabled = kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE); in kvm_set_cr3()
912 if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) { in kvm_set_cr3()
914 kvm_mmu_sync_roots(vcpu); in kvm_set_cr3()
915 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); in kvm_set_cr3()
920 if (is_long_mode(vcpu) && in kvm_set_cr3()
921 (cr3 & rsvd_bits(cpuid_maxphyaddr(vcpu), 63))) in kvm_set_cr3()
923 else if (is_pae_paging(vcpu) && in kvm_set_cr3()
924 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
927 kvm_mmu_new_cr3(vcpu, cr3, skip_tlb_flush); in kvm_set_cr3()
928 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
929 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in kvm_set_cr3()
935 int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8) in kvm_set_cr8() argument
939 if (lapic_in_kernel(vcpu)) in kvm_set_cr8()
940 kvm_lapic_set_tpr(vcpu, cr8); in kvm_set_cr8()
942 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
947 unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) in kvm_get_cr8() argument
949 if (lapic_in_kernel(vcpu)) in kvm_get_cr8()
950 return kvm_lapic_get_cr8(vcpu); in kvm_get_cr8()
952 return vcpu->arch.cr8; in kvm_get_cr8()
956 static void kvm_update_dr0123(struct kvm_vcpu *vcpu) in kvm_update_dr0123() argument
960 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
962 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
963 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
967 static void kvm_update_dr6(struct kvm_vcpu *vcpu) in kvm_update_dr6() argument
969 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in kvm_update_dr6()
970 kvm_x86_ops->set_dr6(vcpu, vcpu->arch.dr6); in kvm_update_dr6()
973 static void kvm_update_dr7(struct kvm_vcpu *vcpu) in kvm_update_dr7() argument
977 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
978 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
980 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
981 kvm_x86_ops->set_dr7(vcpu, dr7); in kvm_update_dr7()
982 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
984 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
987 static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) in kvm_dr6_fixed() argument
991 if (!guest_cpuid_has(vcpu, X86_FEATURE_RTM)) in kvm_dr6_fixed()
996 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in __kvm_set_dr() argument
998 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1002 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1003 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1004 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1011 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1012 kvm_update_dr6(vcpu); in __kvm_set_dr()
1019 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1020 kvm_update_dr7(vcpu); in __kvm_set_dr()
1027 int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) in kvm_set_dr() argument
1029 if (__kvm_set_dr(vcpu, dr, val)) { in kvm_set_dr()
1030 kvm_inject_gp(vcpu, 0); in kvm_set_dr()
1037 int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) in kvm_get_dr() argument
1039 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1043 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1048 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_get_dr()
1049 *val = vcpu->arch.dr6; in kvm_get_dr()
1051 *val = kvm_x86_ops->get_dr6(vcpu); in kvm_get_dr()
1056 *val = vcpu->arch.dr7; in kvm_get_dr()
1063 bool kvm_rdpmc(struct kvm_vcpu *vcpu) in kvm_rdpmc() argument
1065 u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_rdpmc()
1069 err = kvm_pmu_rdpmc(vcpu, ecx, &data); in kvm_rdpmc()
1072 kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data); in kvm_rdpmc()
1073 kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32); in kvm_rdpmc()
1245 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr_feature() argument
1260 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument
1262 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer()
1265 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer()
1271 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument
1276 return __kvm_valid_efer(vcpu, efer); in kvm_valid_efer()
1280 static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_efer() argument
1282 u64 old_efer = vcpu->arch.efer; in set_efer()
1289 if (!__kvm_valid_efer(vcpu, efer)) in set_efer()
1292 if (is_paging(vcpu) && in set_efer()
1293 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1298 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1300 kvm_x86_ops->set_efer(vcpu, efer); in set_efer()
1304 kvm_mmu_reset_context(vcpu); in set_efer()
1320 int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_set_msr() argument
1328 if (is_noncanonical_address(msr->data, vcpu)) in kvm_set_msr()
1345 msr->data = get_canonical(msr->data, vcpu_virt_addr_bits(vcpu)); in kvm_set_msr()
1347 return kvm_x86_ops->set_msr(vcpu, msr); in kvm_set_msr()
1354 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_get_msr() argument
1361 r = kvm_get_msr(vcpu, &msr); in do_get_msr()
1369 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) in do_set_msr() argument
1376 return kvm_set_msr(vcpu, &msr); in do_set_msr()
1423 void kvm_set_pending_timer(struct kvm_vcpu *vcpu) in kvm_set_pending_timer() argument
1428 * the physical CPU that is running vcpu. in kvm_set_pending_timer()
1430 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); in kvm_set_pending_timer()
1528 static int set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) in set_tsc_khz() argument
1534 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
1541 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
1542 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
1560 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
1564 static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) in kvm_set_tsc_khz() argument
1572 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
1578 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
1579 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
1580 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
1594 return set_tsc_khz(vcpu, user_tsc_khz, use_scaling); in kvm_set_tsc_khz()
1597 static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) in compute_guest_tsc() argument
1599 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
1600 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
1601 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
1602 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
1611 static void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) in kvm_track_tsc_matching() argument
1615 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
1619 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
1631 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_track_tsc_matching()
1633 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
1634 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
1639 static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset) in update_ia32_tsc_adjust_msr() argument
1641 u64 curr_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in update_ia32_tsc_adjust_msr()
1642 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset; in update_ia32_tsc_adjust_msr()
1660 u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc) in kvm_scale_tsc() argument
1663 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
1672 static u64 kvm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) in kvm_compute_tsc_offset() argument
1676 tsc = kvm_scale_tsc(vcpu, rdtsc()); in kvm_compute_tsc_offset()
1681 u64 kvm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc) in kvm_read_l1_tsc() argument
1683 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in kvm_read_l1_tsc()
1685 return tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
1689 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) in kvm_vcpu_write_tsc_offset() argument
1691 vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
1707 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_write_tsc() argument
1709 struct kvm *kvm = vcpu->kvm; in kvm_write_tsc()
1718 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1722 if (vcpu->arch.virtual_tsc_khz) { in kvm_write_tsc()
1725 * detection of vcpu initialization -- need to sync in kvm_write_tsc()
1732 nsec_to_cycles(vcpu, elapsed); in kvm_write_tsc()
1733 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_write_tsc()
1751 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_write_tsc()
1756 u64 delta = nsec_to_cycles(vcpu, elapsed); in kvm_write_tsc()
1758 offset = kvm_compute_tsc_offset(vcpu, data); in kvm_write_tsc()
1762 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_write_tsc()
1788 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_write_tsc()
1790 vcpu->arch.last_guest_tsc = data; in kvm_write_tsc()
1792 /* Keep track of which generation this VCPU has synchronized to */ in kvm_write_tsc()
1793 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_write_tsc()
1794 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_write_tsc()
1795 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_write_tsc()
1797 if (!msr->host_initiated && guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) in kvm_write_tsc()
1798 update_ia32_tsc_adjust_msr(vcpu, offset); in kvm_write_tsc()
1800 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_write_tsc()
1810 kvm_track_tsc_matching(vcpu); in kvm_write_tsc()
1816 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, in adjust_tsc_offset_guest() argument
1819 u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); in adjust_tsc_offset_guest()
1820 kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); in adjust_tsc_offset_guest()
1823 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) in adjust_tsc_offset_host() argument
1825 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
1827 adjustment = kvm_scale_tsc(vcpu, (u64) adjustment); in adjust_tsc_offset_host()
1828 adjust_tsc_offset_guest(vcpu, adjustment); in adjust_tsc_offset_host()
2033 struct kvm_vcpu *vcpu; in kvm_gen_update_masterclock() local
2041 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
2042 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_gen_update_masterclock()
2045 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_gen_update_masterclock()
2046 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_gen_update_masterclock()
2086 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page() local
2089 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2093 /* This VCPU is paused, but it's legal for a guest to read another in kvm_setup_pvclock_page()
2094 * VCPU's kvmclock, so we really have to follow the specification where in kvm_setup_pvclock_page()
2104 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2112 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2113 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2114 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2115 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2120 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2122 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2123 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2124 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2127 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2129 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2130 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2131 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2135 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2136 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2137 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2138 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2144 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update() local
2185 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
2191 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2206 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2208 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2209 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2210 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2213 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2214 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2215 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2222 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2224 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2227 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2232 * kvmclock updates which are isolated to a given vcpu, such as
2233 * vcpu->cpu migration, should not allow system_timestamp from
2235 * correction applies to one vcpu's system_timestamp but not
2241 * The time for a remote vcpu to update its kvmclock is bound
2254 struct kvm_vcpu *vcpu; in kvmclock_update_fn() local
2256 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_update_fn()
2257 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_update_fn()
2258 kvm_vcpu_kick(vcpu); in kvmclock_update_fn()
2288 static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in set_msr_mce() argument
2290 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2297 vcpu->arch.mcg_status = data; in set_msr_mce()
2305 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2325 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2333 static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data) in xen_hvm_config() argument
2335 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2336 int lm = is_long_mode(vcpu); in xen_hvm_config()
2355 if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) in xen_hvm_config()
2364 static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) in kvm_pv_enable_async_pf() argument
2372 vcpu->arch.apf.msr_val = data; in kvm_pv_enable_async_pf()
2375 kvm_clear_async_pf_completion_queue(vcpu); in kvm_pv_enable_async_pf()
2376 kvm_async_pf_hash_reset(vcpu); in kvm_pv_enable_async_pf()
2380 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2384 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2385 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2386 kvm_async_pf_wakeup_all(vcpu); in kvm_pv_enable_async_pf()
2390 static void kvmclock_reset(struct kvm_vcpu *vcpu) in kvmclock_reset() argument
2392 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2395 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa) in kvm_vcpu_flush_tlb() argument
2397 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb()
2398 kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa); in kvm_vcpu_flush_tlb()
2401 static void record_steal_time(struct kvm_vcpu *vcpu) in record_steal_time() argument
2406 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
2410 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, in record_steal_time()
2411 &map, &vcpu->arch.st.cache, false)) in record_steal_time()
2415 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in record_steal_time()
2422 kvm_vcpu_flush_tlb(vcpu, false); in record_steal_time()
2424 vcpu->arch.st.preempted = 0; in record_steal_time()
2434 vcpu->arch.st.last_steal; in record_steal_time()
2435 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
2441 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); in record_steal_time()
2444 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_set_msr_common() argument
2462 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
2467 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
2470 return set_efer(vcpu, msr_info); in kvm_set_msr_common()
2477 vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", in kvm_set_msr_common()
2484 vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: " in kvm_set_msr_common()
2498 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n", in kvm_set_msr_common()
2502 return kvm_mtrr_set_msr(vcpu, msr, data); in kvm_set_msr_common()
2504 return kvm_set_apic_base(vcpu, msr_info); in kvm_set_msr_common()
2506 return kvm_x2apic_msr_write(vcpu, msr, data); in kvm_set_msr_common()
2508 kvm_set_lapic_tscdeadline_msr(vcpu, data); in kvm_set_msr_common()
2511 if (guest_cpuid_has(vcpu, X86_FEATURE_TSC_ADJUST)) { in kvm_set_msr_common()
2513 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
2514 adjust_tsc_offset_guest(vcpu, adj); in kvm_set_msr_common()
2516 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
2520 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
2525 vcpu->arch.smbase = data; in kvm_set_msr_common()
2528 kvm_write_tsc(vcpu, msr_info); in kvm_set_msr_common()
2533 vcpu->arch.smi_count = data; in kvm_set_msr_common()
2537 vcpu->kvm->arch.wall_clock = data; in kvm_set_msr_common()
2538 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
2542 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_set_msr_common()
2544 kvmclock_reset(vcpu); in kvm_set_msr_common()
2546 if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) { in kvm_set_msr_common()
2550 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_set_msr_common()
2555 vcpu->arch.time = data; in kvm_set_msr_common()
2556 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_set_msr_common()
2562 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_set_msr_common()
2563 &vcpu->arch.pv_time, data & ~1ULL, in kvm_set_msr_common()
2565 vcpu->arch.pv_time_enabled = false; in kvm_set_msr_common()
2567 vcpu->arch.pv_time_enabled = true; in kvm_set_msr_common()
2572 if (kvm_pv_enable_async_pf(vcpu, data)) in kvm_set_msr_common()
2583 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
2588 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_set_msr_common()
2592 if (kvm_lapic_enable_pv_eoi(vcpu, data, sizeof(u8))) in kvm_set_msr_common()
2599 return set_msr_mce(vcpu, msr_info); in kvm_set_msr_common()
2606 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
2607 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2610 vcpu_unimpl(vcpu, "disabled perfctr wrmsr: " in kvm_set_msr_common()
2630 return kvm_hv_set_msr_common(vcpu, msr, data, in kvm_set_msr_common()
2637 vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", in kvm_set_msr_common()
2641 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
2643 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
2646 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_set_msr_common()
2648 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
2653 cpuid_fault_enabled(vcpu))) in kvm_set_msr_common()
2655 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
2660 !supports_cpuid_fault(vcpu))) in kvm_set_msr_common()
2662 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
2665 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
2666 return xen_hvm_config(vcpu, data); in kvm_set_msr_common()
2667 if (kvm_pmu_is_valid_msr(vcpu, msr)) in kvm_set_msr_common()
2668 return kvm_pmu_set_msr(vcpu, msr_info); in kvm_set_msr_common()
2670 vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_set_msr_common()
2675 vcpu_unimpl(vcpu, in kvm_set_msr_common()
2691 int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) in kvm_get_msr() argument
2693 return kvm_x86_ops->get_msr(vcpu, msr); in kvm_get_msr()
2697 static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) in get_msr_mce() argument
2700 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
2709 data = vcpu->arch.mcg_cap; in get_msr_mce()
2714 data = vcpu->arch.mcg_ctl; in get_msr_mce()
2717 data = vcpu->arch.mcg_status; in get_msr_mce()
2726 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
2735 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) in kvm_get_msr_common() argument
2764 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
2765 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2769 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
2773 !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES)) in kvm_get_msr_common()
2775 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
2778 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; in kvm_get_msr_common()
2782 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2801 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
2804 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2807 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
2810 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
2813 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
2818 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
2821 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
2830 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
2834 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
2838 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
2841 msr_info->data = vcpu->arch.apf.msr_val; in kvm_get_msr_common()
2844 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
2847 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
2855 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
2876 return kvm_hv_get_msr_common(vcpu, in kvm_get_msr_common()
2894 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
2896 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
2899 if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW)) in kvm_get_msr_common()
2901 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
2905 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
2907 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
2910 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
2913 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
2914 return kvm_pmu_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
2916 vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n", in kvm_get_msr_common()
2921 vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", in kvm_get_msr_common()
2936 static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, in __msr_io() argument
2938 int (*do_msr)(struct kvm_vcpu *vcpu, in __msr_io() argument
2944 if (do_msr(vcpu, entries[i].index, &entries[i].data)) in __msr_io()
2955 static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs, in msr_io() argument
2956 int (*do_msr)(struct kvm_vcpu *vcpu, in msr_io() argument
2980 r = n = __msr_io(vcpu, &msrs, entries, do_msr); in msr_io()
3218 static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) in need_emulate_wbinvd() argument
3220 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
3223 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_vcpu_load() argument
3226 if (need_emulate_wbinvd(vcpu)) { in kvm_arch_vcpu_load()
3228 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
3229 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
3230 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
3234 kvm_x86_ops->vcpu_load(vcpu, cpu); in kvm_arch_vcpu_load()
3237 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
3238 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
3239 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
3240 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
3243 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
3244 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
3245 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
3250 u64 offset = kvm_compute_tsc_offset(vcpu, in kvm_arch_vcpu_load()
3251 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
3252 kvm_vcpu_write_tsc_offset(vcpu, offset); in kvm_arch_vcpu_load()
3253 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
3256 if (kvm_lapic_hv_timer_in_use(vcpu)) in kvm_arch_vcpu_load()
3257 kvm_lapic_restart_hv_timer(vcpu); in kvm_arch_vcpu_load()
3261 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
3263 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
3264 kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu); in kvm_arch_vcpu_load()
3265 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
3266 kvm_make_request(KVM_REQ_MIGRATE_TIMER, vcpu); in kvm_arch_vcpu_load()
3267 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
3270 kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); in kvm_arch_vcpu_load()
3273 static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu) in kvm_steal_time_set_preempted() argument
3278 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
3281 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
3284 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, in kvm_steal_time_set_preempted()
3285 &vcpu->arch.st.cache, true)) in kvm_steal_time_set_preempted()
3289 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in kvm_steal_time_set_preempted()
3291 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
3293 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); in kvm_steal_time_set_preempted()
3296 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_put() argument
3300 if (vcpu->preempted) in kvm_arch_vcpu_put()
3301 vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu); in kvm_arch_vcpu_put()
3316 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
3317 kvm_steal_time_set_preempted(vcpu); in kvm_arch_vcpu_put()
3318 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
3320 kvm_x86_ops->vcpu_put(vcpu); in kvm_arch_vcpu_put()
3321 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
3330 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_get_lapic() argument
3333 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
3334 kvm_x86_ops->sync_pir_to_irr(vcpu); in kvm_vcpu_ioctl_get_lapic()
3336 return kvm_apic_get_state(vcpu, s); in kvm_vcpu_ioctl_get_lapic()
3339 static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_set_lapic() argument
3344 r = kvm_apic_set_state(vcpu, s); in kvm_vcpu_ioctl_set_lapic()
3347 update_cr8_intercept(vcpu); in kvm_vcpu_ioctl_set_lapic()
3352 static int kvm_cpu_accept_dm_intr(struct kvm_vcpu *vcpu) in kvm_cpu_accept_dm_intr() argument
3354 return (!lapic_in_kernel(vcpu) || in kvm_cpu_accept_dm_intr()
3355 kvm_apic_accept_pic_intr(vcpu)); in kvm_cpu_accept_dm_intr()
3364 static int kvm_vcpu_ready_for_interrupt_injection(struct kvm_vcpu *vcpu) in kvm_vcpu_ready_for_interrupt_injection() argument
3366 return kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
3367 !kvm_cpu_has_interrupt(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
3368 !kvm_event_needs_reinjection(vcpu) && in kvm_vcpu_ready_for_interrupt_injection()
3369 kvm_cpu_accept_dm_intr(vcpu); in kvm_vcpu_ready_for_interrupt_injection()
3372 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_interrupt() argument
3378 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
3379 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
3380 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
3388 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
3391 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
3394 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
3395 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_interrupt()
3399 static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_nmi() argument
3401 kvm_inject_nmi(vcpu); in kvm_vcpu_ioctl_nmi()
3406 static int kvm_vcpu_ioctl_smi(struct kvm_vcpu *vcpu) in kvm_vcpu_ioctl_smi() argument
3408 kvm_make_request(KVM_REQ_SMI, vcpu); in kvm_vcpu_ioctl_smi()
3413 static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu, in vcpu_ioctl_tpr_access_reporting() argument
3418 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
3422 static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_setup_mce() argument
3434 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
3437 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3440 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
3443 kvm_x86_ops->setup_mce(vcpu); in kvm_vcpu_ioctl_x86_setup_mce()
3448 static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_mce() argument
3451 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
3453 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
3462 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
3472 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
3473 !kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) { in kvm_vcpu_ioctl_x86_set_mce()
3474 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); in kvm_vcpu_ioctl_x86_set_mce()
3481 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
3483 kvm_queue_exception(vcpu, MC_VECTOR); in kvm_vcpu_ioctl_x86_set_mce()
3496 static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_vcpu_events() argument
3499 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3506 (vcpu->arch.exception.pending || in kvm_vcpu_ioctl_x86_get_vcpu_events()
3507 vcpu->arch.exception.injected) && in kvm_vcpu_ioctl_x86_get_vcpu_events()
3508 !kvm_exception_is_soft(vcpu->arch.exception.nr); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3509 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3510 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3512 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3515 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3516 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3518 events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3520 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3521 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3522 events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3527 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3528 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
3530 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3531 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
3539 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags);
3541 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events() argument
3552 is_guest_mode(vcpu))) in kvm_vcpu_ioctl_x86_set_vcpu_events()
3558 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
3561 process_nmi(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3562 vcpu->arch.exception.injected = false; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3563 vcpu->arch.exception.pending = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3564 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3565 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3566 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3568 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3569 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3570 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3572 kvm_x86_ops->set_interrupt_shadow(vcpu, in kvm_vcpu_ioctl_x86_set_vcpu_events()
3575 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3577 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3578 kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3581 lapic_in_kernel(vcpu)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
3582 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3585 u32 hflags = vcpu->arch.hflags; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3590 kvm_set_hflags(vcpu, hflags); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3592 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3596 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3598 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
3599 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
3601 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3603 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3608 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
3613 static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_debugregs() argument
3618 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
3619 kvm_get_dr(vcpu, 6, &val); in kvm_vcpu_ioctl_x86_get_debugregs()
3621 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
3626 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_debugregs() argument
3637 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
3638 kvm_update_dr0123(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3639 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
3640 kvm_update_dr6(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3641 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
3642 kvm_update_dr7(vcpu); in kvm_vcpu_ioctl_x86_set_debugregs()
3649 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu) in fill_xsave() argument
3651 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in fill_xsave()
3662 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
3680 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
3681 sizeof(vcpu->arch.pkru)); in fill_xsave()
3691 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src) in load_xsave() argument
3693 struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave; in load_xsave()
3723 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
3724 sizeof(vcpu->arch.pkru)); in load_xsave()
3733 static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xsave() argument
3738 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
3741 &vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
3750 static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xsave() argument
3766 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
3771 memcpy(&vcpu->arch.guest_fpu.state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
3777 static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_get_xcrs() argument
3788 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
3791 static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_x86_set_xcrs() argument
3805 r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, in kvm_vcpu_ioctl_x86_set_xcrs()
3820 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu) in kvm_set_guest_paused() argument
3822 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
3824 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
3825 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_set_guest_paused()
3829 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, in kvm_vcpu_ioctl_enable_cap() argument
3840 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
3842 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
3852 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl() local
3862 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl()
3868 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
3875 r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3886 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
3894 r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic); in kvm_arch_vcpu_ioctl()
3903 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); in kvm_arch_vcpu_ioctl()
3907 r = kvm_vcpu_ioctl_nmi(vcpu); in kvm_arch_vcpu_ioctl()
3911 r = kvm_vcpu_ioctl_smi(vcpu); in kvm_arch_vcpu_ioctl()
3921 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
3931 r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3942 r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid, in kvm_arch_vcpu_ioctl()
3953 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
3954 r = msr_io(vcpu, argp, do_get_msr, 1); in kvm_arch_vcpu_ioctl()
3955 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
3959 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
3960 r = msr_io(vcpu, argp, do_set_msr, 0); in kvm_arch_vcpu_ioctl()
3961 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
3970 r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac); in kvm_arch_vcpu_ioctl()
3984 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_ioctl()
3989 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
3990 r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); in kvm_arch_vcpu_ioctl()
3991 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4000 r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap); in kvm_arch_vcpu_ioctl()
4009 r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); in kvm_arch_vcpu_ioctl()
4015 kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
4030 r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events); in kvm_arch_vcpu_ioctl()
4036 kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
4053 r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs); in kvm_arch_vcpu_ioctl()
4062 kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
4077 r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave); in kvm_arch_vcpu_ioctl()
4086 kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
4102 r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs); in kvm_arch_vcpu_ioctl()
4117 if (!kvm_set_tsc_khz(vcpu, user_tsc_khz)) in kvm_arch_vcpu_ioctl()
4123 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
4127 r = kvm_set_guest_paused(vcpu); in kvm_arch_vcpu_ioctl()
4136 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); in kvm_arch_vcpu_ioctl()
4152 r = kvm_x86_ops->get_nested_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
4193 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4194 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
4195 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4204 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl()
4208 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) in kvm_arch_vcpu_fault() argument
4886 static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, in vcpu_mmio_write() argument
4894 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_write()
4895 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
4896 && kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_write()
4907 static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) in vcpu_mmio_read() argument
4914 if (!(lapic_in_kernel(vcpu) && in vcpu_mmio_read()
4915 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
4917 && kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, n, v)) in vcpu_mmio_read()
4929 static void kvm_set_segment(struct kvm_vcpu *vcpu, in kvm_set_segment() argument
4932 kvm_x86_ops->set_segment(vcpu, var, seg); in kvm_set_segment()
4935 void kvm_get_segment(struct kvm_vcpu *vcpu, in kvm_get_segment() argument
4938 kvm_x86_ops->get_segment(vcpu, var, seg); in kvm_get_segment()
4941 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access, in translate_nested_gpa() argument
4946 BUG_ON(!mmu_is_nested(vcpu)); in translate_nested_gpa()
4950 t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
4955 gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_read() argument
4958 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_read()
4959 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
4962 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_fetch() argument
4965 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_fetch()
4967 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
4970 gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_write() argument
4973 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_mmu_gva_to_gpa_write()
4975 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
4979 gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, in kvm_mmu_gva_to_gpa_system() argument
4982 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
4986 struct kvm_vcpu *vcpu, u32 access, in kvm_read_guest_virt_helper() argument
4993 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
5001 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, data, in kvm_read_guest_virt_helper()
5021 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_fetch_guest_virt() local
5022 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_fetch_guest_virt()
5027 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
5035 ret = kvm_vcpu_read_guest_page(vcpu, gpa >> PAGE_SHIFT, val, in kvm_fetch_guest_virt()
5043 int kvm_read_guest_virt(struct kvm_vcpu *vcpu, in kvm_read_guest_virt() argument
5047 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; in kvm_read_guest_virt()
5056 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, in kvm_read_guest_virt()
5065 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_std() local
5068 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_read_std()
5071 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, exception); in emulator_read_std()
5077 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in kvm_read_guest_phys_system() local
5078 int r = kvm_vcpu_read_guest(vcpu, addr, val, bytes); in kvm_read_guest_phys_system()
5084 struct kvm_vcpu *vcpu, u32 access, in kvm_write_guest_virt_helper() argument
5091 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
5100 ret = kvm_vcpu_write_guest(vcpu, gpa, data, towrite); in kvm_write_guest_virt_helper()
5118 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_write_std() local
5121 if (!system && kvm_x86_ops->get_cpl(vcpu) == 3) in emulator_write_std()
5124 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in emulator_write_std()
5128 int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val, in kvm_write_guest_virt_system() argument
5132 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
5141 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, in kvm_write_guest_virt_system()
5146 int handle_ud(struct kvm_vcpu *vcpu) in handle_ud() argument
5154 kvm_read_guest_virt(vcpu, kvm_get_linear_rip(vcpu), in handle_ud()
5157 kvm_rip_write(vcpu, kvm_rip_read(vcpu) + sizeof(sig)); in handle_ud()
5161 er = kvm_emulate_instruction(vcpu, emul_type); in handle_ud()
5165 kvm_queue_exception(vcpu, UD_VECTOR); in handle_ud()
5170 static int vcpu_is_mmio_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_is_mmio_gpa() argument
5177 if (vcpu_match_mmio_gpa(vcpu, gpa)) { in vcpu_is_mmio_gpa()
5185 static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, in vcpu_mmio_gva_to_gpa() argument
5189 u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) in vcpu_mmio_gva_to_gpa()
5197 if (vcpu_match_mmio_gva(vcpu, gva) in vcpu_mmio_gva_to_gpa()
5198 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
5199 vcpu->arch.access, 0, access)) { in vcpu_mmio_gva_to_gpa()
5200 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
5206 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
5211 return vcpu_is_mmio_gpa(vcpu, gva, *gpa, write); in vcpu_mmio_gva_to_gpa()
5214 int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, in emulator_write_phys() argument
5219 ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes); in emulator_write_phys()
5222 kvm_page_track_write(vcpu, gpa, val, bytes); in emulator_write_phys()
5227 int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
5229 int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
5231 int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
5233 int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
5238 static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) in read_prepare() argument
5240 if (vcpu->mmio_read_completed) { in read_prepare()
5242 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
5243 vcpu->mmio_read_completed = 0; in read_prepare()
5250 static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in read_emulate() argument
5253 return !kvm_vcpu_read_guest(vcpu, gpa, val, bytes); in read_emulate()
5256 static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa, in write_emulate() argument
5259 return emulator_write_phys(vcpu, gpa, val, bytes); in write_emulate()
5262 static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val) in write_mmio() argument
5265 return vcpu_mmio_write(vcpu, gpa, bytes, val); in write_mmio()
5268 static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in read_exit_mmio() argument
5275 static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, in write_exit_mmio() argument
5278 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
5280 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
5301 struct kvm_vcpu *vcpu, in emulator_read_write_onepage() argument
5308 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
5317 if (vcpu->arch.gpa_available && in emulator_read_write_onepage()
5319 (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
5320 gpa = vcpu->arch.gpa_val; in emulator_read_write_onepage()
5321 ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write); in emulator_read_write_onepage()
5323 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); in emulator_read_write_onepage()
5328 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
5334 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
5342 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
5343 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
5356 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_read_write() local
5361 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
5364 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
5372 vcpu, ops); in emulator_read_write()
5384 vcpu, ops); in emulator_read_write()
5388 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
5391 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
5393 vcpu->mmio_needed = 1; in emulator_read_write()
5394 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
5396 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
5397 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
5398 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
5399 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
5401 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
5441 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_cmpxchg_emulated() local
5451 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL); in emulator_cmpxchg_emulated()
5460 page = kvm_vcpu_gfn_to_page(vcpu, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
5488 kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT); in emulator_cmpxchg_emulated()
5489 kvm_page_track_write(vcpu, gpa, new, bytes); in emulator_cmpxchg_emulated()
5499 static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) in kernel_pio() argument
5503 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
5504 if (vcpu->arch.pio.in) in kernel_pio()
5505 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
5506 vcpu->arch.pio.size, pd); in kernel_pio()
5508 r = kvm_io_bus_write(vcpu, KVM_PIO_BUS, in kernel_pio()
5509 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
5513 pd += vcpu->arch.pio.size; in kernel_pio()
5518 static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size, in emulator_pio_in_out() argument
5522 vcpu->arch.pio.port = port; in emulator_pio_in_out()
5523 vcpu->arch.pio.in = in; in emulator_pio_in_out()
5524 vcpu->arch.pio.count = count; in emulator_pio_in_out()
5525 vcpu->arch.pio.size = size; in emulator_pio_in_out()
5527 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
5528 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
5532 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
5533 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
5534 vcpu->run->io.size = size; in emulator_pio_in_out()
5535 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
5536 vcpu->run->io.count = count; in emulator_pio_in_out()
5537 vcpu->run->io.port = port; in emulator_pio_in_out()
5546 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_in_emulated() local
5549 if (vcpu->arch.pio.count) in emulator_pio_in_emulated()
5552 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in_emulated()
5554 ret = emulator_pio_in_out(vcpu, size, port, val, count, true); in emulator_pio_in_emulated()
5557 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in_emulated()
5558 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in_emulated()
5559 vcpu->arch.pio.count = 0; in emulator_pio_in_emulated()
5570 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_pio_out_emulated() local
5572 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out_emulated()
5573 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out_emulated()
5574 return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false); in emulator_pio_out_emulated()
5577 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg) in get_segment_base() argument
5579 return kvm_x86_ops->get_segment_base(vcpu, seg); in get_segment_base()
5587 static int kvm_emulate_wbinvd_noskip(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd_noskip() argument
5589 if (!need_emulate_wbinvd(vcpu)) in kvm_emulate_wbinvd_noskip()
5595 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
5596 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
5599 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
5605 int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu) in kvm_emulate_wbinvd() argument
5607 kvm_emulate_wbinvd_noskip(vcpu); in kvm_emulate_wbinvd()
5608 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_wbinvd()
5639 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_cr() local
5644 value = kvm_read_cr0(vcpu); in emulator_get_cr()
5647 value = vcpu->arch.cr2; in emulator_get_cr()
5650 value = kvm_read_cr3(vcpu); in emulator_get_cr()
5653 value = kvm_read_cr4(vcpu); in emulator_get_cr()
5656 value = kvm_get_cr8(vcpu); in emulator_get_cr()
5668 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_cr() local
5673 res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val)); in emulator_set_cr()
5676 vcpu->arch.cr2 = val; in emulator_set_cr()
5679 res = kvm_set_cr3(vcpu, val); in emulator_set_cr()
5682 res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val)); in emulator_set_cr()
5685 res = kvm_set_cr8(vcpu, val); in emulator_set_cr()
5766 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_segment() local
5788 kvm_set_segment(vcpu, &var, seg); in emulator_set_segment()
5821 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_get_smbase() local
5823 return vcpu->arch.smbase; in emulator_get_smbase()
5828 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_set_smbase() local
5830 vcpu->arch.smbase = smbase; in emulator_set_smbase()
5935 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) in toggle_interruptibility() argument
5937 u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); in toggle_interruptibility()
5948 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); in toggle_interruptibility()
5950 kvm_make_request(KVM_REQ_EVENT, vcpu); in toggle_interruptibility()
5954 static bool inject_emulated_exception(struct kvm_vcpu *vcpu) in inject_emulated_exception() argument
5956 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in inject_emulated_exception()
5958 return kvm_propagate_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
5961 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
5964 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
5968 static void init_emulate_ctxt(struct kvm_vcpu *vcpu) in init_emulate_ctxt() argument
5970 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
5973 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); in init_emulate_ctxt()
5975 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
5978 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
5979 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
5981 (cs_l && is_long_mode(vcpu)) ? X86EMUL_MODE_PROT64 : in init_emulate_ctxt()
5989 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
5992 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) in kvm_inject_realmode_interrupt() argument
5994 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
5997 init_emulate_ctxt(vcpu); in kvm_inject_realmode_interrupt()
6008 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
6009 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
6015 static int handle_emulation_failure(struct kvm_vcpu *vcpu, int emulation_type) in handle_emulation_failure() argument
6019 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
6020 trace_kvm_emulate_insn_failed(vcpu); in handle_emulation_failure()
6025 if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { in handle_emulation_failure()
6026 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
6027 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
6028 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
6032 kvm_queue_exception(vcpu, UD_VECTOR); in handle_emulation_failure()
6037 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in reexecute_instruction() argument
6047 if (WARN_ON_ONCE(is_guest_mode(vcpu))) in reexecute_instruction()
6050 if (!vcpu->arch.mmu.direct_map) { in reexecute_instruction()
6055 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in reexecute_instruction()
6071 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
6083 if (vcpu->arch.mmu.direct_map) { in reexecute_instruction()
6086 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
6087 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
6088 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
6091 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
6101 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
6114 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in retry_instruction() local
6117 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
6118 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
6133 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
6138 if (WARN_ON_ONCE(is_guest_mode(vcpu))) in retry_instruction()
6147 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
6148 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
6150 if (!vcpu->arch.mmu.direct_map) in retry_instruction()
6151 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2_or_gpa, NULL); in retry_instruction()
6153 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
6158 static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
6159 static int complete_emulated_pio(struct kvm_vcpu *vcpu);
6161 static void kvm_smm_changed(struct kvm_vcpu *vcpu) in kvm_smm_changed() argument
6163 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
6165 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
6168 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_smm_changed()
6171 kvm_mmu_reset_context(vcpu); in kvm_smm_changed()
6174 static void kvm_set_hflags(struct kvm_vcpu *vcpu, unsigned emul_flags) in kvm_set_hflags() argument
6176 unsigned changed = vcpu->arch.hflags ^ emul_flags; in kvm_set_hflags()
6178 vcpu->arch.hflags = emul_flags; in kvm_set_hflags()
6181 kvm_smm_changed(vcpu); in kvm_set_hflags()
6199 static void kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_do_singlestep() argument
6201 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
6203 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
6205 kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; in kvm_vcpu_do_singlestep()
6215 vcpu->arch.dr6 &= ~15; in kvm_vcpu_do_singlestep()
6216 vcpu->arch.dr6 |= DR6_BS | DR6_RTM; in kvm_vcpu_do_singlestep()
6217 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_do_singlestep()
6221 int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu) in kvm_skip_emulated_instruction() argument
6223 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_skip_emulated_instruction()
6226 kvm_x86_ops->skip_emulated_instruction(vcpu); in kvm_skip_emulated_instruction()
6237 kvm_vcpu_do_singlestep(vcpu, &r); in kvm_skip_emulated_instruction()
6242 static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) in kvm_vcpu_check_breakpoint() argument
6244 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
6245 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
6246 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
6247 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
6249 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
6250 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
6262 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
6263 !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { in kvm_vcpu_check_breakpoint()
6264 unsigned long eip = kvm_get_linear_rip(vcpu); in kvm_vcpu_check_breakpoint()
6266 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
6267 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
6270 vcpu->arch.dr6 &= ~15; in kvm_vcpu_check_breakpoint()
6271 vcpu->arch.dr6 |= dr6 | DR6_RTM; in kvm_vcpu_check_breakpoint()
6272 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_vcpu_check_breakpoint()
6312 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, in x86_emulate_instruction() argument
6316 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
6318 bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
6320 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
6326 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
6327 kvm_clear_exception_queue(vcpu); in x86_emulate_instruction()
6330 init_emulate_ctxt(vcpu); in x86_emulate_instruction()
6339 kvm_vcpu_check_breakpoint(vcpu, &r)) in x86_emulate_instruction()
6351 trace_kvm_emulate_insn_start(vcpu); in x86_emulate_instruction()
6352 ++vcpu->stat.insn_emulation; in x86_emulate_instruction()
6356 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, in x86_emulate_instruction()
6366 inject_emulated_exception(vcpu); in x86_emulate_instruction()
6371 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
6380 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
6382 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
6391 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
6392 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
6406 if (reexecute_instruction(vcpu, cr2_or_gpa, write_fault_to_spt, in x86_emulate_instruction()
6410 return handle_emulation_failure(vcpu, emulation_type); in x86_emulate_instruction()
6415 if (inject_emulated_exception(vcpu)) in x86_emulate_instruction()
6417 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
6418 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
6420 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
6423 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
6426 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
6427 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
6430 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
6437 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); in x86_emulate_instruction()
6438 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
6439 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
6442 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
6444 kvm_vcpu_do_singlestep(vcpu, &r); in x86_emulate_instruction()
6445 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
6455 kvm_make_request(KVM_REQ_EVENT, vcpu); in x86_emulate_instruction()
6457 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
6462 int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type) in kvm_emulate_instruction() argument
6464 return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0); in kvm_emulate_instruction()
6468 int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, in kvm_emulate_instruction_from_buffer() argument
6471 return x86_emulate_instruction(vcpu, 0, 0, insn, insn_len); in kvm_emulate_instruction_from_buffer()
6475 static int complete_fast_pio_out_port_0x7e(struct kvm_vcpu *vcpu) in complete_fast_pio_out_port_0x7e() argument
6477 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
6481 static int complete_fast_pio_out(struct kvm_vcpu *vcpu) in complete_fast_pio_out() argument
6483 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
6485 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
6488 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_out()
6491 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_out() argument
6494 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_fast_pio_out()
6495 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt, in kvm_fast_pio_out()
6505 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
6506 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
6508 kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio_out()
6510 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
6511 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
6516 static int complete_fast_pio_in(struct kvm_vcpu *vcpu) in complete_fast_pio_in() argument
6521 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
6523 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
6524 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
6529 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) in complete_fast_pio_in()
6533 * Since vcpu->arch.pio.count == 1 let emulator_pio_in_emulated perform in complete_fast_pio_in()
6536 emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, vcpu->arch.pio.size, in complete_fast_pio_in()
6537 vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
6538 kvm_register_write(vcpu, VCPU_REGS_RAX, val); in complete_fast_pio_in()
6540 return kvm_skip_emulated_instruction(vcpu); in complete_fast_pio_in()
6543 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size, in kvm_fast_pio_in() argument
6550 val = (size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX) : 0; in kvm_fast_pio_in()
6552 ret = emulator_pio_in_emulated(&vcpu->arch.emulate_ctxt, size, port, in kvm_fast_pio_in()
6555 kvm_register_write(vcpu, VCPU_REGS_RAX, val); in kvm_fast_pio_in()
6559 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
6560 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
6565 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in) in kvm_fast_pio() argument
6570 ret = kvm_fast_pio_in(vcpu, size, port); in kvm_fast_pio()
6572 ret = kvm_fast_pio_out(vcpu, size, port); in kvm_fast_pio()
6573 return ret && kvm_skip_emulated_instruction(vcpu); in kvm_fast_pio()
6601 struct kvm_vcpu *vcpu; in kvm_hyperv_tsc_notifier() local
6622 kvm_for_each_vcpu(cpu, vcpu, kvm) in kvm_hyperv_tsc_notifier()
6623 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_hyperv_tsc_notifier()
6625 kvm_for_each_vcpu(cpu, vcpu, kvm) in kvm_hyperv_tsc_notifier()
6626 kvm_clear_request(KVM_REQ_MCLOCK_INPROGRESS, vcpu); in kvm_hyperv_tsc_notifier()
6639 struct kvm_vcpu *vcpu; in kvmclock_cpufreq_notifier() local
6653 * the TSC for each VCPU. We must flag these local variables in kvmclock_cpufreq_notifier()
6672 * anytime after the setting of the VCPU's request bit, the in kvmclock_cpufreq_notifier()
6690 kvm_for_each_vcpu(i, vcpu, kvm) { in kvmclock_cpufreq_notifier()
6691 if (vcpu->cpu != freq->cpu) in kvmclock_cpufreq_notifier()
6693 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvmclock_cpufreq_notifier()
6694 if (vcpu->cpu != raw_smp_processor_id()) in kvmclock_cpufreq_notifier()
6792 struct kvm_vcpu *vcpu; in pvclock_gtod_update_fn() local
6797 kvm_for_each_vcpu(i, vcpu, kvm) in pvclock_gtod_update_fn()
6798 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in pvclock_gtod_update_fn()
6913 int kvm_vcpu_halt(struct kvm_vcpu *vcpu) in kvm_vcpu_halt() argument
6915 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
6916 if (lapic_in_kernel(vcpu)) { in kvm_vcpu_halt()
6917 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
6920 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
6926 int kvm_emulate_halt(struct kvm_vcpu *vcpu) in kvm_emulate_halt() argument
6928 int ret = kvm_skip_emulated_instruction(vcpu); in kvm_emulate_halt()
6933 return kvm_vcpu_halt(vcpu) && ret; in kvm_emulate_halt()
6938 static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, in kvm_pv_clock_pairing() argument
6954 clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); in kvm_pv_clock_pairing()
6959 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
6968 * kvm_pv_kick_cpu_op: Kick a vcpu.
6970 * @apicid - apicid of vcpu to be kicked.
6986 void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu) in kvm_vcpu_deactivate_apicv() argument
6988 vcpu->arch.apicv_active = false; in kvm_vcpu_deactivate_apicv()
6989 kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu); in kvm_vcpu_deactivate_apicv()
6992 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) in kvm_emulate_hypercall() argument
6997 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
6998 return kvm_hv_hypercall(vcpu); in kvm_emulate_hypercall()
7000 nr = kvm_register_read(vcpu, VCPU_REGS_RAX); in kvm_emulate_hypercall()
7001 a0 = kvm_register_read(vcpu, VCPU_REGS_RBX); in kvm_emulate_hypercall()
7002 a1 = kvm_register_read(vcpu, VCPU_REGS_RCX); in kvm_emulate_hypercall()
7003 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); in kvm_emulate_hypercall()
7004 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); in kvm_emulate_hypercall()
7008 op_64_bit = is_64_bit_mode(vcpu); in kvm_emulate_hypercall()
7017 if (kvm_x86_ops->get_cpl(vcpu) != 0) { in kvm_emulate_hypercall()
7027 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
7032 ret = kvm_pv_clock_pairing(vcpu, a0, a1); in kvm_emulate_hypercall()
7036 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
7045 kvm_register_write(vcpu, VCPU_REGS_RAX, ret); in kvm_emulate_hypercall()
7047 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
7048 return kvm_skip_emulated_instruction(vcpu); in kvm_emulate_hypercall()
7054 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); in emulator_fix_hypercall() local
7056 unsigned long rip = kvm_rip_read(vcpu); in emulator_fix_hypercall()
7058 kvm_x86_ops->patch_hypercall(vcpu, instruction); in emulator_fix_hypercall()
7064 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu) in dm_request_for_irq_injection() argument
7066 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
7067 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
7070 static void post_kvm_run_save(struct kvm_vcpu *vcpu) in post_kvm_run_save() argument
7072 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
7074 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
7075 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
7076 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
7077 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
7079 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
7080 kvm_vcpu_ready_for_interrupt_injection(vcpu); in post_kvm_run_save()
7083 static void update_cr8_intercept(struct kvm_vcpu *vcpu) in update_cr8_intercept() argument
7090 if (!lapic_in_kernel(vcpu)) in update_cr8_intercept()
7093 if (vcpu->arch.apicv_active) in update_cr8_intercept()
7096 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
7097 max_irr = kvm_lapic_find_highest_irr(vcpu); in update_cr8_intercept()
7104 tpr = kvm_lapic_get_cr8(vcpu); in update_cr8_intercept()
7106 kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr); in update_cr8_intercept()
7109 static int inject_pending_event(struct kvm_vcpu *vcpu) in inject_pending_event() argument
7115 if (vcpu->arch.exception.injected) in inject_pending_event()
7116 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
7131 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
7132 if (vcpu->arch.nmi_injected) in inject_pending_event()
7133 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
7134 else if (vcpu->arch.interrupt.injected) in inject_pending_event()
7135 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
7144 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
7145 r = kvm_x86_ops->check_nested_events(vcpu); in inject_pending_event()
7151 if (vcpu->arch.exception.pending) { in inject_pending_event()
7152 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
7153 vcpu->arch.exception.has_error_code, in inject_pending_event()
7154 vcpu->arch.exception.error_code); in inject_pending_event()
7156 WARN_ON_ONCE(vcpu->arch.exception.injected); in inject_pending_event()
7157 vcpu->arch.exception.pending = false; in inject_pending_event()
7158 vcpu->arch.exception.injected = true; in inject_pending_event()
7160 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
7161 __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | in inject_pending_event()
7164 if (vcpu->arch.exception.nr == DB_VECTOR && in inject_pending_event()
7165 (vcpu->arch.dr7 & DR7_GD)) { in inject_pending_event()
7166 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
7167 kvm_update_dr7(vcpu); in inject_pending_event()
7170 kvm_x86_ops->queue_exception(vcpu); in inject_pending_event()
7174 if (kvm_event_needs_reinjection(vcpu)) in inject_pending_event()
7177 if (vcpu->arch.smi_pending && !is_smm(vcpu) && in inject_pending_event()
7178 kvm_x86_ops->smi_allowed(vcpu)) { in inject_pending_event()
7179 vcpu->arch.smi_pending = false; in inject_pending_event()
7180 ++vcpu->arch.smi_count; in inject_pending_event()
7181 enter_smm(vcpu); in inject_pending_event()
7182 } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) { in inject_pending_event()
7183 --vcpu->arch.nmi_pending; in inject_pending_event()
7184 vcpu->arch.nmi_injected = true; in inject_pending_event()
7185 kvm_x86_ops->set_nmi(vcpu); in inject_pending_event()
7186 } else if (kvm_cpu_has_injectable_intr(vcpu)) { in inject_pending_event()
7194 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { in inject_pending_event()
7195 r = kvm_x86_ops->check_nested_events(vcpu); in inject_pending_event()
7199 if (kvm_x86_ops->interrupt_allowed(vcpu)) { in inject_pending_event()
7200 kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), in inject_pending_event()
7202 kvm_x86_ops->set_irq(vcpu); in inject_pending_event()
7209 static void process_nmi(struct kvm_vcpu *vcpu) in process_nmi() argument
7218 if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
7221 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
7222 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
7223 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_nmi()
7240 static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_32() argument
7245 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_32()
7259 static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n) in enter_smm_save_seg_64() argument
7265 kvm_get_segment(vcpu, &seg, n); in enter_smm_save_seg_64()
7276 static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_32() argument
7283 put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu)); in enter_smm_save_state_32()
7284 put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu)); in enter_smm_save_state_32()
7285 put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu)); in enter_smm_save_state_32()
7286 put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu)); in enter_smm_save_state_32()
7289 put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read(vcpu, i)); in enter_smm_save_state_32()
7291 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_32()
7293 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_32()
7296 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_32()
7302 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_32()
7308 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_32()
7312 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_32()
7317 enter_smm_save_seg_32(vcpu, buf, i); in enter_smm_save_state_32()
7319 put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu)); in enter_smm_save_state_32()
7323 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
7327 static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf) in enter_smm_save_state_64() argument
7335 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
7337 put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu)); in enter_smm_save_state_64()
7338 put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu)); in enter_smm_save_state_64()
7340 kvm_get_dr(vcpu, 6, &val); in enter_smm_save_state_64()
7342 kvm_get_dr(vcpu, 7, &val); in enter_smm_save_state_64()
7345 put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu)); in enter_smm_save_state_64()
7346 put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu)); in enter_smm_save_state_64()
7347 put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu)); in enter_smm_save_state_64()
7349 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
7354 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
7356 kvm_get_segment(vcpu, &seg, VCPU_SREG_TR); in enter_smm_save_state_64()
7362 kvm_x86_ops->get_idt(vcpu, &dt); in enter_smm_save_state_64()
7366 kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR); in enter_smm_save_state_64()
7372 kvm_x86_ops->get_gdt(vcpu, &dt); in enter_smm_save_state_64()
7377 enter_smm_save_seg_64(vcpu, buf, i); in enter_smm_save_state_64()
7381 static void enter_smm(struct kvm_vcpu *vcpu) in enter_smm() argument
7388 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
7391 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
7392 enter_smm_save_state_64(vcpu, buf); in enter_smm()
7395 enter_smm_save_state_32(vcpu, buf); in enter_smm()
7399 * vCPU state (e.g. leave guest mode) after we've saved the state into in enter_smm()
7402 kvm_x86_ops->pre_enter_smm(vcpu, buf); in enter_smm()
7404 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
7405 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
7407 if (kvm_x86_ops->get_nmi_mask(vcpu)) in enter_smm()
7408 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
7410 kvm_x86_ops->set_nmi_mask(vcpu, true); in enter_smm()
7412 kvm_set_rflags(vcpu, X86_EFLAGS_FIXED); in enter_smm()
7413 kvm_rip_write(vcpu, 0x8000); in enter_smm()
7415 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
7416 kvm_x86_ops->set_cr0(vcpu, cr0); in enter_smm()
7417 vcpu->arch.cr0 = cr0; in enter_smm()
7419 kvm_x86_ops->set_cr4(vcpu, 0); in enter_smm()
7423 kvm_x86_ops->set_idt(vcpu, &dt); in enter_smm()
7425 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); in enter_smm()
7427 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
7428 cs.base = vcpu->arch.smbase; in enter_smm()
7445 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in enter_smm()
7446 kvm_set_segment(vcpu, &ds, VCPU_SREG_DS); in enter_smm()
7447 kvm_set_segment(vcpu, &ds, VCPU_SREG_ES); in enter_smm()
7448 kvm_set_segment(vcpu, &ds, VCPU_SREG_FS); in enter_smm()
7449 kvm_set_segment(vcpu, &ds, VCPU_SREG_GS); in enter_smm()
7450 kvm_set_segment(vcpu, &ds, VCPU_SREG_SS); in enter_smm()
7453 if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) in enter_smm()
7454 kvm_x86_ops->set_efer(vcpu, 0); in enter_smm()
7457 kvm_update_cpuid(vcpu); in enter_smm()
7458 kvm_mmu_reset_context(vcpu); in enter_smm()
7461 static void process_smi(struct kvm_vcpu *vcpu) in process_smi() argument
7463 vcpu->arch.smi_pending = true; in process_smi()
7464 kvm_make_request(KVM_REQ_EVENT, vcpu); in process_smi()
7472 static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) in vcpu_scan_ioapic() argument
7474 if (!kvm_apic_present(vcpu)) in vcpu_scan_ioapic()
7477 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
7479 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
7480 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
7482 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
7483 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_scan_ioapic()
7484 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
7485 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
7488 if (is_guest_mode(vcpu)) in vcpu_scan_ioapic()
7489 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
7491 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in vcpu_scan_ioapic()
7494 static void vcpu_load_eoi_exitmap(struct kvm_vcpu *vcpu) in vcpu_load_eoi_exitmap() argument
7498 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
7501 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
7502 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
7503 kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap); in vcpu_load_eoi_exitmap()
7520 void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu) in kvm_vcpu_reload_apic_access_page() argument
7524 if (!lapic_in_kernel(vcpu)) in kvm_vcpu_reload_apic_access_page()
7530 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in kvm_vcpu_reload_apic_access_page()
7533 kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page)); in kvm_vcpu_reload_apic_access_page()
7543 void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu) in __kvm_request_immediate_exit() argument
7545 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
7554 static int vcpu_enter_guest(struct kvm_vcpu *vcpu) in vcpu_enter_guest() argument
7558 dm_request_for_irq_injection(vcpu) && in vcpu_enter_guest()
7559 kvm_cpu_accept_dm_intr(vcpu); in vcpu_enter_guest()
7563 if (kvm_request_pending(vcpu)) { in vcpu_enter_guest()
7564 if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) in vcpu_enter_guest()
7565 kvm_x86_ops->get_vmcs12_pages(vcpu); in vcpu_enter_guest()
7566 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) in vcpu_enter_guest()
7567 kvm_mmu_unload(vcpu); in vcpu_enter_guest()
7568 if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu)) in vcpu_enter_guest()
7569 __kvm_migrate_timers(vcpu); in vcpu_enter_guest()
7570 if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
7571 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
7572 if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu)) in vcpu_enter_guest()
7573 kvm_gen_kvmclock_update(vcpu); in vcpu_enter_guest()
7574 if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) { in vcpu_enter_guest()
7575 r = kvm_guest_time_update(vcpu); in vcpu_enter_guest()
7579 if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu)) in vcpu_enter_guest()
7580 kvm_mmu_sync_roots(vcpu); in vcpu_enter_guest()
7581 if (kvm_check_request(KVM_REQ_LOAD_CR3, vcpu)) in vcpu_enter_guest()
7582 kvm_mmu_load_cr3(vcpu); in vcpu_enter_guest()
7583 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) in vcpu_enter_guest()
7584 kvm_vcpu_flush_tlb(vcpu, true); in vcpu_enter_guest()
7585 if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) { in vcpu_enter_guest()
7586 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
7590 if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) { in vcpu_enter_guest()
7591 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
7592 vcpu->mmio_needed = 0; in vcpu_enter_guest()
7596 if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) { in vcpu_enter_guest()
7598 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
7602 if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) in vcpu_enter_guest()
7603 record_steal_time(vcpu); in vcpu_enter_guest()
7604 if (kvm_check_request(KVM_REQ_SMI, vcpu)) in vcpu_enter_guest()
7605 process_smi(vcpu); in vcpu_enter_guest()
7606 if (kvm_check_request(KVM_REQ_NMI, vcpu)) in vcpu_enter_guest()
7607 process_nmi(vcpu); in vcpu_enter_guest()
7608 if (kvm_check_request(KVM_REQ_PMU, vcpu)) in vcpu_enter_guest()
7609 kvm_pmu_handle_event(vcpu); in vcpu_enter_guest()
7610 if (kvm_check_request(KVM_REQ_PMI, vcpu)) in vcpu_enter_guest()
7611 kvm_pmu_deliver_pmi(vcpu); in vcpu_enter_guest()
7612 if (kvm_check_request(KVM_REQ_IOAPIC_EOI_EXIT, vcpu)) { in vcpu_enter_guest()
7613 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
7614 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
7615 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
7616 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
7617 vcpu->run->eoi.vector = in vcpu_enter_guest()
7618 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
7623 if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) in vcpu_enter_guest()
7624 vcpu_scan_ioapic(vcpu); in vcpu_enter_guest()
7625 if (kvm_check_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu)) in vcpu_enter_guest()
7626 vcpu_load_eoi_exitmap(vcpu); in vcpu_enter_guest()
7627 if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) in vcpu_enter_guest()
7628 kvm_vcpu_reload_apic_access_page(vcpu); in vcpu_enter_guest()
7629 if (kvm_check_request(KVM_REQ_HV_CRASH, vcpu)) { in vcpu_enter_guest()
7630 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
7631 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
7635 if (kvm_check_request(KVM_REQ_HV_RESET, vcpu)) { in vcpu_enter_guest()
7636 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
7637 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
7641 if (kvm_check_request(KVM_REQ_HV_EXIT, vcpu)) { in vcpu_enter_guest()
7642 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
7643 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
7653 if (kvm_check_request(KVM_REQ_HV_STIMER, vcpu)) in vcpu_enter_guest()
7654 kvm_hv_process_stimers(vcpu); in vcpu_enter_guest()
7657 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { in vcpu_enter_guest()
7658 ++vcpu->stat.req_event; in vcpu_enter_guest()
7659 kvm_apic_accept_events(vcpu); in vcpu_enter_guest()
7660 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
7665 if (inject_pending_event(vcpu) != 0) in vcpu_enter_guest()
7682 if (vcpu->arch.smi_pending && !is_smm(vcpu)) in vcpu_enter_guest()
7683 if (!kvm_x86_ops->enable_smi_window(vcpu)) in vcpu_enter_guest()
7685 if (vcpu->arch.nmi_pending) in vcpu_enter_guest()
7686 kvm_x86_ops->enable_nmi_window(vcpu); in vcpu_enter_guest()
7687 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) in vcpu_enter_guest()
7688 kvm_x86_ops->enable_irq_window(vcpu); in vcpu_enter_guest()
7689 WARN_ON(vcpu->arch.exception.pending); in vcpu_enter_guest()
7692 if (kvm_lapic_enabled(vcpu)) { in vcpu_enter_guest()
7693 update_cr8_intercept(vcpu); in vcpu_enter_guest()
7694 kvm_lapic_sync_to_vapic(vcpu); in vcpu_enter_guest()
7698 r = kvm_mmu_reload(vcpu); in vcpu_enter_guest()
7705 kvm_x86_ops->prepare_guest_switch(vcpu); in vcpu_enter_guest()
7713 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
7715 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
7726 * tables done while the VCPU is running. Please see the comment in vcpu_enter_guest()
7735 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
7736 kvm_x86_ops->sync_pir_to_irr(vcpu); in vcpu_enter_guest()
7738 if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) in vcpu_enter_guest()
7740 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
7744 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
7750 kvm_make_request(KVM_REQ_EVENT, vcpu); in vcpu_enter_guest()
7751 kvm_x86_ops->request_immediate_exit(vcpu); in vcpu_enter_guest()
7754 trace_kvm_entry(vcpu->vcpu_id); in vcpu_enter_guest()
7756 wait_lapic_expire(vcpu); in vcpu_enter_guest()
7759 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
7761 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
7762 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
7763 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
7764 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
7765 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
7766 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
7769 kvm_x86_ops->run(vcpu); in vcpu_enter_guest()
7777 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
7778 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
7779 kvm_x86_ops->sync_dirty_debug_regs(vcpu); in vcpu_enter_guest()
7780 kvm_update_dr0123(vcpu); in vcpu_enter_guest()
7781 kvm_update_dr6(vcpu); in vcpu_enter_guest()
7782 kvm_update_dr7(vcpu); in vcpu_enter_guest()
7783 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
7796 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
7798 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
7801 kvm_before_interrupt(vcpu); in vcpu_enter_guest()
7802 kvm_x86_ops->handle_external_intr(vcpu); in vcpu_enter_guest()
7803 kvm_after_interrupt(vcpu); in vcpu_enter_guest()
7805 ++vcpu->stat.exits; in vcpu_enter_guest()
7812 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
7818 unsigned long rip = kvm_rip_read(vcpu); in vcpu_enter_guest()
7822 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
7823 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in vcpu_enter_guest()
7825 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
7826 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
7828 vcpu->arch.gpa_available = false; in vcpu_enter_guest()
7829 r = kvm_x86_ops->handle_exit(vcpu); in vcpu_enter_guest()
7833 kvm_x86_ops->cancel_injection(vcpu); in vcpu_enter_guest()
7834 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
7835 kvm_lapic_sync_from_vapic(vcpu); in vcpu_enter_guest()
7840 static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu) in vcpu_block() argument
7842 if (!kvm_arch_vcpu_runnable(vcpu) && in vcpu_block()
7843 (!kvm_x86_ops->pre_block || kvm_x86_ops->pre_block(vcpu) == 0)) { in vcpu_block()
7844 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
7845 kvm_vcpu_block(vcpu); in vcpu_block()
7846 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
7849 kvm_x86_ops->post_block(vcpu); in vcpu_block()
7851 if (!kvm_check_request(KVM_REQ_UNHALT, vcpu)) in vcpu_block()
7855 kvm_apic_accept_events(vcpu); in vcpu_block()
7856 switch(vcpu->arch.mp_state) { in vcpu_block()
7858 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
7859 vcpu->arch.mp_state = in vcpu_block()
7862 vcpu->arch.apf.halted = false; in vcpu_block()
7873 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu) in kvm_vcpu_running() argument
7875 if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) in kvm_vcpu_running()
7876 kvm_x86_ops->check_nested_events(vcpu); in kvm_vcpu_running()
7878 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
7879 !vcpu->arch.apf.halted); in kvm_vcpu_running()
7882 static int vcpu_run(struct kvm_vcpu *vcpu) in vcpu_run() argument
7885 struct kvm *kvm = vcpu->kvm; in vcpu_run()
7887 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
7888 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
7891 if (kvm_vcpu_running(vcpu)) { in vcpu_run()
7892 r = vcpu_enter_guest(vcpu); in vcpu_run()
7894 r = vcpu_block(kvm, vcpu); in vcpu_run()
7900 kvm_clear_request(KVM_REQ_PENDING_TIMER, vcpu); in vcpu_run()
7901 if (kvm_cpu_has_pending_timer(vcpu)) in vcpu_run()
7902 kvm_inject_pending_timer_irqs(vcpu); in vcpu_run()
7904 if (dm_request_for_irq_injection(vcpu) && in vcpu_run()
7905 kvm_vcpu_ready_for_interrupt_injection(vcpu)) { in vcpu_run()
7907 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
7908 ++vcpu->stat.request_irq_exits; in vcpu_run()
7912 kvm_check_async_pf_completion(vcpu); in vcpu_run()
7916 vcpu->run->exit_reason = KVM_EXIT_INTR; in vcpu_run()
7917 ++vcpu->stat.signal_exits; in vcpu_run()
7921 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
7923 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
7927 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
7932 static inline int complete_emulated_io(struct kvm_vcpu *vcpu) in complete_emulated_io() argument
7935 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
7936 r = kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE); in complete_emulated_io()
7937 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
7943 static int complete_emulated_pio(struct kvm_vcpu *vcpu) in complete_emulated_pio() argument
7945 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
7947 return complete_emulated_io(vcpu); in complete_emulated_pio()
7968 static int complete_emulated_mmio(struct kvm_vcpu *vcpu) in complete_emulated_mmio() argument
7970 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
7974 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
7977 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
7979 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
7985 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
7993 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
7994 vcpu->mmio_needed = 0; in complete_emulated_mmio()
7997 if (vcpu->mmio_is_write) in complete_emulated_mmio()
7999 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
8000 return complete_emulated_io(vcpu); in complete_emulated_mmio()
8005 if (vcpu->mmio_is_write) in complete_emulated_mmio()
8008 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
8009 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
8014 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) in kvm_load_guest_fpu() argument
8017 copy_fpregs_to_fpstate(&vcpu->arch.user_fpu); in kvm_load_guest_fpu()
8019 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state, in kvm_load_guest_fpu()
8026 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) in kvm_put_guest_fpu() argument
8029 copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
8030 copy_kernel_to_fpregs(&vcpu->arch.user_fpu.state); in kvm_put_guest_fpu()
8032 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
8036 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) in kvm_arch_vcpu_ioctl_run() argument
8040 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_run()
8041 kvm_sigset_activate(vcpu); in kvm_arch_vcpu_ioctl_run()
8042 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
8044 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
8049 kvm_vcpu_block(vcpu); in kvm_arch_vcpu_ioctl_run()
8050 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_run()
8051 kvm_clear_request(KVM_REQ_UNHALT, vcpu); in kvm_arch_vcpu_ioctl_run()
8055 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
8056 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
8061 if (vcpu->run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { in kvm_arch_vcpu_ioctl_run()
8066 if (vcpu->run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
8067 r = sync_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
8073 if (!lapic_in_kernel(vcpu)) { in kvm_arch_vcpu_ioctl_run()
8074 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
8080 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
8081 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
8082 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
8083 r = cui(vcpu); in kvm_arch_vcpu_ioctl_run()
8087 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
8092 r = vcpu_run(vcpu); in kvm_arch_vcpu_ioctl_run()
8095 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_run()
8096 if (vcpu->run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
8097 store_regs(vcpu); in kvm_arch_vcpu_ioctl_run()
8098 post_kvm_run_save(vcpu); in kvm_arch_vcpu_ioctl_run()
8099 kvm_sigset_deactivate(vcpu); in kvm_arch_vcpu_ioctl_run()
8101 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_run()
8105 static void __get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __get_regs() argument
8107 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
8111 * back from emulation context to vcpu. Userspace shouldn't do in __get_regs()
8115 emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); in __get_regs()
8116 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
8118 regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); in __get_regs()
8119 regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX); in __get_regs()
8120 regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX); in __get_regs()
8121 regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX); in __get_regs()
8122 regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI); in __get_regs()
8123 regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI); in __get_regs()
8124 regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); in __get_regs()
8125 regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP); in __get_regs()
8127 regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8); in __get_regs()
8128 regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9); in __get_regs()
8129 regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10); in __get_regs()
8130 regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11); in __get_regs()
8131 regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12); in __get_regs()
8132 regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13); in __get_regs()
8133 regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14); in __get_regs()
8134 regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15); in __get_regs()
8137 regs->rip = kvm_rip_read(vcpu); in __get_regs()
8138 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
8141 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_get_regs() argument
8143 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
8144 __get_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_get_regs()
8145 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_regs()
8149 static void __set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in __set_regs() argument
8151 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
8152 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
8154 kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax); in __set_regs()
8155 kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx); in __set_regs()
8156 kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx); in __set_regs()
8157 kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx); in __set_regs()
8158 kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi); in __set_regs()
8159 kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi); in __set_regs()
8160 kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp); in __set_regs()
8161 kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp); in __set_regs()
8163 kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8); in __set_regs()
8164 kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9); in __set_regs()
8165 kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10); in __set_regs()
8166 kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11); in __set_regs()
8167 kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12); in __set_regs()
8168 kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13); in __set_regs()
8169 kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14); in __set_regs()
8170 kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15); in __set_regs()
8173 kvm_rip_write(vcpu, regs->rip); in __set_regs()
8174 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
8176 vcpu->arch.exception.pending = false; in __set_regs()
8178 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_regs()
8181 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) in kvm_arch_vcpu_ioctl_set_regs() argument
8183 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
8184 __set_regs(vcpu, regs); in kvm_arch_vcpu_ioctl_set_regs()
8185 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_regs()
8189 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) in kvm_get_cs_db_l_bits() argument
8193 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_get_cs_db_l_bits()
8199 static void __get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __get_sregs() argument
8203 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
8204 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
8205 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
8206 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
8207 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
8208 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
8210 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
8211 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
8213 kvm_x86_ops->get_idt(vcpu, &dt); in __get_sregs()
8216 kvm_x86_ops->get_gdt(vcpu, &dt); in __get_sregs()
8220 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
8221 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
8222 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
8223 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
8224 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
8225 sregs->efer = vcpu->arch.efer; in __get_sregs()
8226 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
8230 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
8231 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
8235 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_sregs() argument
8238 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
8239 __get_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_get_sregs()
8240 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_sregs()
8244 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_get_mpstate() argument
8247 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
8249 kvm_load_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
8251 kvm_apic_accept_events(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
8252 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
8253 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
8256 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
8259 kvm_put_guest_fpu(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
8260 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_mpstate()
8264 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_mpstate() argument
8269 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
8271 if (!lapic_in_kernel(vcpu) && in kvm_arch_vcpu_ioctl_set_mpstate()
8276 if ((is_smm(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
8282 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
8283 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
8285 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
8286 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
8290 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_mpstate()
8294 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, in kvm_task_switch() argument
8297 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; in kvm_task_switch()
8300 init_emulate_ctxt(vcpu); in kvm_task_switch()
8308 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
8309 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
8310 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_task_switch()
8315 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in kvm_valid_sregs() argument
8335 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
8338 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) in __set_sregs() argument
8347 if (kvm_valid_sregs(vcpu, sregs)) in __set_sregs()
8352 if (kvm_set_apic_base(vcpu, &apic_base_msr)) in __set_sregs()
8357 kvm_x86_ops->set_idt(vcpu, &dt); in __set_sregs()
8360 kvm_x86_ops->set_gdt(vcpu, &dt); in __set_sregs()
8362 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
8363 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
8364 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
8365 __set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail); in __set_sregs()
8367 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
8369 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
8370 kvm_x86_ops->set_efer(vcpu, sregs->efer); in __set_sregs()
8372 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
8373 kvm_x86_ops->set_cr0(vcpu, sregs->cr0); in __set_sregs()
8374 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
8376 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
8377 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
8379 kvm_x86_ops->set_cr4(vcpu, sregs->cr4); in __set_sregs()
8381 kvm_update_cpuid(vcpu); in __set_sregs()
8383 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
8384 if (is_pae_paging(vcpu)) { in __set_sregs()
8385 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
8388 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
8391 kvm_mmu_reset_context(vcpu); in __set_sregs()
8397 kvm_queue_interrupt(vcpu, pending_vec, false); in __set_sregs()
8401 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
8402 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
8403 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
8404 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
8405 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
8406 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
8408 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
8409 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
8411 update_cr8_intercept(vcpu); in __set_sregs()
8413 /* Older userspace won't unhalt the vcpu on reset. */ in __set_sregs()
8414 if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 && in __set_sregs()
8416 !is_protmode(vcpu)) in __set_sregs()
8417 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
8419 kvm_make_request(KVM_REQ_EVENT, vcpu); in __set_sregs()
8426 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_sregs() argument
8431 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
8432 ret = __set_sregs(vcpu, sregs); in kvm_arch_vcpu_ioctl_set_sregs()
8433 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_sregs()
8437 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_set_guest_debug() argument
8443 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8447 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
8450 kvm_queue_exception(vcpu, DB_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
8452 kvm_queue_exception(vcpu, BP_VECTOR); in kvm_arch_vcpu_ioctl_set_guest_debug()
8459 rflags = kvm_get_rflags(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8461 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
8462 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
8463 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
8465 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
8467 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
8468 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
8471 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
8473 kvm_update_dr7(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8475 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
8476 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
8477 get_segment_base(vcpu, VCPU_SREG_CS); in kvm_arch_vcpu_ioctl_set_guest_debug()
8483 kvm_set_rflags(vcpu, rflags); in kvm_arch_vcpu_ioctl_set_guest_debug()
8485 kvm_x86_ops->update_bp_intercept(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8490 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_guest_debug()
8497 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, in kvm_arch_vcpu_ioctl_translate() argument
8504 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_translate()
8506 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
8507 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL); in kvm_arch_vcpu_ioctl_translate()
8508 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
8514 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_translate()
8518 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_get_fpu() argument
8522 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
8524 fxsave = &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
8534 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_get_fpu()
8538 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) in kvm_arch_vcpu_ioctl_set_fpu() argument
8542 vcpu_load(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
8544 fxsave = &vcpu->arch.guest_fpu.state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
8555 vcpu_put(vcpu); in kvm_arch_vcpu_ioctl_set_fpu()
8559 static void store_regs(struct kvm_vcpu *vcpu) in store_regs() argument
8563 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
8564 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
8566 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
8567 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
8569 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
8571 vcpu, &vcpu->run->s.regs.events); in store_regs()
8574 static int sync_regs(struct kvm_vcpu *vcpu) in sync_regs() argument
8576 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
8579 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
8580 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
8581 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
8583 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
8584 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
8586 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
8588 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
8590 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
8592 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
8598 static void fx_init(struct kvm_vcpu *vcpu) in fx_init() argument
8600 fpstate_init(&vcpu->arch.guest_fpu.state); in fx_init()
8602 vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv = in fx_init()
8608 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
8610 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
8613 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_free() argument
8615 void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask; in kvm_arch_vcpu_free()
8616 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; in kvm_arch_vcpu_free()
8620 kvmclock_reset(vcpu); in kvm_arch_vcpu_free()
8622 kvm_x86_ops->vcpu_free(vcpu); in kvm_arch_vcpu_free()
8629 struct kvm_vcpu *vcpu; in kvm_arch_vcpu_create() local
8636 vcpu = kvm_x86_ops->vcpu_create(kvm, id); in kvm_arch_vcpu_create()
8638 return vcpu; in kvm_arch_vcpu_create()
8641 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_setup() argument
8643 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_setup()
8644 kvm_vcpu_mtrr_init(vcpu); in kvm_arch_vcpu_setup()
8645 vcpu_load(vcpu); in kvm_arch_vcpu_setup()
8646 kvm_vcpu_reset(vcpu, false); in kvm_arch_vcpu_setup()
8647 kvm_mmu_setup(vcpu); in kvm_arch_vcpu_setup()
8648 vcpu_put(vcpu); in kvm_arch_vcpu_setup()
8652 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_postcreate() argument
8655 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
8657 kvm_hv_vcpu_postcreate(vcpu); in kvm_arch_vcpu_postcreate()
8659 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
8661 vcpu_load(vcpu); in kvm_arch_vcpu_postcreate()
8665 kvm_write_tsc(vcpu, &msr); in kvm_arch_vcpu_postcreate()
8666 vcpu_put(vcpu); in kvm_arch_vcpu_postcreate()
8667 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
8676 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_destroy() argument
8678 kvm_arch_vcpu_free(vcpu); in kvm_arch_vcpu_destroy()
8681 void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) in kvm_vcpu_reset() argument
8683 kvm_lapic_reset(vcpu, init_event); in kvm_vcpu_reset()
8685 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
8687 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
8688 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
8689 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
8690 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
8691 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
8692 kvm_clear_interrupt_queue(vcpu); in kvm_vcpu_reset()
8693 kvm_clear_exception_queue(vcpu); in kvm_vcpu_reset()
8694 vcpu->arch.exception.pending = false; in kvm_vcpu_reset()
8696 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
8697 kvm_update_dr0123(vcpu); in kvm_vcpu_reset()
8698 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
8699 kvm_update_dr6(vcpu); in kvm_vcpu_reset()
8700 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
8701 kvm_update_dr7(vcpu); in kvm_vcpu_reset()
8703 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
8705 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_vcpu_reset()
8706 vcpu->arch.apf.msr_val = 0; in kvm_vcpu_reset()
8707 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
8709 kvmclock_reset(vcpu); in kvm_vcpu_reset()
8711 kvm_clear_async_pf_completion_queue(vcpu); in kvm_vcpu_reset()
8712 kvm_async_pf_hash_reset(vcpu); in kvm_vcpu_reset()
8713 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
8723 kvm_put_guest_fpu(vcpu); in kvm_vcpu_reset()
8724 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, in kvm_vcpu_reset()
8728 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu.state.xsave, in kvm_vcpu_reset()
8733 kvm_load_guest_fpu(vcpu); in kvm_vcpu_reset()
8737 kvm_pmu_reset(vcpu); in kvm_vcpu_reset()
8738 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
8740 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_vcpu_reset()
8741 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
8743 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
8746 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
8747 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
8748 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
8750 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
8752 kvm_x86_ops->vcpu_reset(vcpu, init_event); in kvm_vcpu_reset()
8755 void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) in kvm_vcpu_deliver_sipi_vector() argument
8759 kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
8762 kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); in kvm_vcpu_deliver_sipi_vector()
8763 kvm_rip_write(vcpu, 0); in kvm_vcpu_deliver_sipi_vector()
8769 struct kvm_vcpu *vcpu; in kvm_arch_hardware_enable() local
8784 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
8785 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
8786 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
8787 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
8789 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
8790 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
8807 * adjustment to TSC in each VCPU. When the VCPU later gets loaded, in kvm_arch_hardware_enable()
8809 * adjustments, in case multiple suspend cycles happen before some VCPU in kvm_arch_hardware_enable()
8837 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arch_hardware_enable()
8838 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
8839 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
8840 kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); in kvm_arch_hardware_enable()
8901 bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_reset_bsp() argument
8903 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
8907 bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu) in kvm_vcpu_is_bsp() argument
8909 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
8915 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_init() argument
8920 vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv(vcpu); in kvm_arch_vcpu_init()
8921 vcpu->arch.emulate_ctxt.ops = &emulate_ops; in kvm_arch_vcpu_init()
8922 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_init()
8923 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_init()
8925 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_init()
8932 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_init()
8934 kvm_set_tsc_khz(vcpu, max_tsc_khz); in kvm_arch_vcpu_init()
8936 r = kvm_mmu_create(vcpu); in kvm_arch_vcpu_init()
8940 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_init()
8941 r = kvm_create_lapic(vcpu); in kvm_arch_vcpu_init()
8947 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_init()
8949 if (!vcpu->arch.mce_banks) { in kvm_arch_vcpu_init()
8953 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_init()
8955 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) { in kvm_arch_vcpu_init()
8960 fx_init(vcpu); in kvm_arch_vcpu_init()
8962 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; in kvm_arch_vcpu_init()
8964 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_init()
8966 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_init()
8968 kvm_async_pf_hash_reset(vcpu); in kvm_arch_vcpu_init()
8969 kvm_pmu_init(vcpu); in kvm_arch_vcpu_init()
8971 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_init()
8972 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_init()
8974 kvm_hv_vcpu_init(vcpu); in kvm_arch_vcpu_init()
8979 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_init()
8981 kvm_free_lapic(vcpu); in kvm_arch_vcpu_init()
8983 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_init()
8985 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_init()
8990 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_uninit() argument
8994 kvm_hv_vcpu_uninit(vcpu); in kvm_arch_vcpu_uninit()
8995 kvm_pmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
8996 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_uninit()
8997 kvm_free_lapic(vcpu); in kvm_arch_vcpu_uninit()
8998 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_uninit()
8999 kvm_mmu_destroy(vcpu); in kvm_arch_vcpu_uninit()
9000 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_uninit()
9001 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_uninit()
9002 if (!lapic_in_kernel(vcpu)) in kvm_arch_vcpu_uninit()
9006 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) in kvm_arch_sched_in() argument
9008 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
9009 kvm_x86_ops->sched_in(vcpu, cpu); in kvm_arch_sched_in()
9057 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu) in kvm_unload_vcpu_mmu() argument
9059 vcpu_load(vcpu); in kvm_unload_vcpu_mmu()
9060 kvm_mmu_unload(vcpu); in kvm_unload_vcpu_mmu()
9061 vcpu_put(vcpu); in kvm_unload_vcpu_mmu()
9067 struct kvm_vcpu *vcpu; in kvm_free_vcpus() local
9072 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_free_vcpus()
9073 kvm_clear_async_pf_completion_queue(vcpu); in kvm_free_vcpus()
9074 kvm_unload_vcpu_mmu(vcpu); in kvm_free_vcpus()
9076 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_free_vcpus()
9077 kvm_arch_vcpu_free(vcpu); in kvm_free_vcpus()
9283 struct kvm_vcpu *vcpu; in kvm_arch_memslots_updated() local
9293 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arch_memslots_updated()
9294 kvm_vcpu_kick(vcpu); in kvm_arch_memslots_updated()
9415 static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) in kvm_guest_apic_has_interrupt() argument
9417 return (is_guest_mode(vcpu) && in kvm_guest_apic_has_interrupt()
9419 kvm_x86_ops->guest_apic_has_interrupt(vcpu)); in kvm_guest_apic_has_interrupt()
9422 static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) in kvm_vcpu_has_events() argument
9424 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
9427 if (kvm_apic_has_events(vcpu)) in kvm_vcpu_has_events()
9430 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
9433 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
9436 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_vcpu_has_events()
9437 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
9438 kvm_x86_ops->nmi_allowed(vcpu))) in kvm_vcpu_has_events()
9441 if (kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_vcpu_has_events()
9442 (vcpu->arch.smi_pending && !is_smm(vcpu))) in kvm_vcpu_has_events()
9445 if (kvm_arch_interrupt_allowed(vcpu) && in kvm_vcpu_has_events()
9446 (kvm_cpu_has_interrupt(vcpu) || in kvm_vcpu_has_events()
9447 kvm_guest_apic_has_interrupt(vcpu))) in kvm_vcpu_has_events()
9450 if (kvm_hv_has_stimer_pending(vcpu)) in kvm_vcpu_has_events()
9456 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_runnable() argument
9458 return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu); in kvm_arch_vcpu_runnable()
9461 bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu) in kvm_arch_dy_runnable() argument
9463 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
9466 if (kvm_test_request(KVM_REQ_NMI, vcpu) || in kvm_arch_dy_runnable()
9467 kvm_test_request(KVM_REQ_SMI, vcpu) || in kvm_arch_dy_runnable()
9468 kvm_test_request(KVM_REQ_EVENT, vcpu)) in kvm_arch_dy_runnable()
9471 if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
9477 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_in_kernel() argument
9479 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
9482 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) in kvm_arch_vcpu_should_kick() argument
9484 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; in kvm_arch_vcpu_should_kick()
9487 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) in kvm_arch_interrupt_allowed() argument
9489 return kvm_x86_ops->interrupt_allowed(vcpu); in kvm_arch_interrupt_allowed()
9492 unsigned long kvm_get_linear_rip(struct kvm_vcpu *vcpu) in kvm_get_linear_rip() argument
9494 if (is_64_bit_mode(vcpu)) in kvm_get_linear_rip()
9495 return kvm_rip_read(vcpu); in kvm_get_linear_rip()
9496 return (u32)(get_segment_base(vcpu, VCPU_SREG_CS) + in kvm_get_linear_rip()
9497 kvm_rip_read(vcpu)); in kvm_get_linear_rip()
9501 bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip) in kvm_is_linear_rip() argument
9503 return kvm_get_linear_rip(vcpu) == linear_rip; in kvm_is_linear_rip()
9507 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) in kvm_get_rflags() argument
9511 rflags = kvm_x86_ops->get_rflags(vcpu); in kvm_get_rflags()
9512 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
9518 static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in __kvm_set_rflags() argument
9520 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
9521 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
9523 kvm_x86_ops->set_rflags(vcpu, rflags); in __kvm_set_rflags()
9526 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) in kvm_set_rflags() argument
9528 __kvm_set_rflags(vcpu, rflags); in kvm_set_rflags()
9529 kvm_make_request(KVM_REQ_EVENT, vcpu); in kvm_set_rflags()
9533 void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) in kvm_arch_async_page_ready() argument
9537 if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
9541 r = kvm_mmu_reload(vcpu); in kvm_arch_async_page_ready()
9545 if (!vcpu->arch.mmu.direct_map && in kvm_arch_async_page_ready()
9546 work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) in kvm_arch_async_page_ready()
9549 vcpu->arch.mmu.page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
9562 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn() argument
9566 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
9569 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
9572 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot() argument
9578 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
9579 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
9585 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn() argument
9587 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
9590 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn() argument
9594 i = j = kvm_async_pf_gfn_slot(vcpu, gfn); in kvm_del_async_pf_gfn()
9596 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
9599 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
9601 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
9608 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
9613 static int apf_put_user(struct kvm_vcpu *vcpu, u32 val) in apf_put_user() argument
9616 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val, in apf_put_user()
9620 static int apf_get_user(struct kvm_vcpu *vcpu, u32 *val) in apf_get_user() argument
9623 return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, val, in apf_get_user()
9627 void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_not_present() argument
9633 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
9635 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) || in kvm_arch_async_page_not_present()
9636 (vcpu->arch.apf.send_user_only && in kvm_arch_async_page_not_present()
9637 kvm_x86_ops->get_cpl(vcpu) == 0)) in kvm_arch_async_page_not_present()
9638 kvm_make_request(KVM_REQ_APF_HALT, vcpu); in kvm_arch_async_page_not_present()
9639 else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) { in kvm_arch_async_page_not_present()
9646 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_not_present()
9650 void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, in kvm_arch_async_page_present() argument
9659 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
9662 if (vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED && in kvm_arch_async_page_present()
9663 !apf_get_user(vcpu, &val)) { in kvm_arch_async_page_present()
9665 vcpu->arch.exception.pending && in kvm_arch_async_page_present()
9666 vcpu->arch.exception.nr == PF_VECTOR && in kvm_arch_async_page_present()
9667 !apf_put_user(vcpu, 0)) { in kvm_arch_async_page_present()
9668 vcpu->arch.exception.injected = false; in kvm_arch_async_page_present()
9669 vcpu->arch.exception.pending = false; in kvm_arch_async_page_present()
9670 vcpu->arch.exception.nr = 0; in kvm_arch_async_page_present()
9671 vcpu->arch.exception.has_error_code = false; in kvm_arch_async_page_present()
9672 vcpu->arch.exception.error_code = 0; in kvm_arch_async_page_present()
9673 } else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { in kvm_arch_async_page_present()
9680 kvm_inject_page_fault(vcpu, &fault); in kvm_arch_async_page_present()
9683 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
9684 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
9687 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) in kvm_arch_can_inject_async_page_present() argument
9689 if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED)) in kvm_arch_can_inject_async_page_present()
9692 return kvm_can_do_async_pf(vcpu); in kvm_arch_can_inject_async_page_present()