• Home
  • Raw
  • Download

Lines Matching +full:pic +full:- +full:base +full:- +full:vec

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
43 #include <linux/intel-iommu.h>
45 #include <linux/user-return-notifier.h>
59 #include <linux/entry-kvm.h>
88 ((struct kvm_vcpu *)(ctxt)->vcpu)
91 * - enable syscall per default because its emulated by KVM
92 * - enable LME and LMA per default on 64 bit KVM
140 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
145 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
147 * advancement entirely. Any other value is used as-is and disables adaptive
150 static int __read_mostly lapic_timer_advance_ns = -1;
163 int __read_mostly pi_inject_timer = -1;
288 size - useroffset, NULL); in kvm_alloc_emulator_cache()
297 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
313 if (msrs->registered) { in kvm_on_user_return()
314 msrs->registered = false; in kvm_on_user_return()
319 values = &msrs->values[slot]; in kvm_on_user_return()
320 if (values->host != values->curr) { in kvm_on_user_return()
321 wrmsrl(user_return_msrs_global.msrs[slot], values->host); in kvm_on_user_return()
322 values->curr = values->host; in kvm_on_user_return()
361 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
362 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
372 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
373 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
379 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
380 if (!msrs->registered) { in kvm_set_user_return_msr()
381 msrs->urn.on_user_return = kvm_on_user_return; in kvm_set_user_return_msr()
382 user_return_notifier_register(&msrs->urn); in kvm_set_user_return_msr()
383 msrs->registered = true; in kvm_set_user_return_msr()
394 if (msrs->registered) in drop_user_return_notifiers()
395 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
400 return vcpu->arch.apic_base; in kvm_get_apic_base()
413 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); in kvm_set_apic_base()
417 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) in kvm_set_apic_base()
419 if (!msr_info->host_initiated) { in kvm_set_apic_base()
426 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
427 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
487 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
488 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
489 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
497 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
501 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
505 vcpu->arch.dr6 |= DR6_RTM; in kvm_deliver_exception_payload()
506 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
515 vcpu->arch.dr6 ^= payload & DR6_RTM; in kvm_deliver_exception_payload()
523 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
526 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
530 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
531 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
544 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
548 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
555 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
556 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
566 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
567 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
569 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
570 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
571 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
572 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
573 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
580 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
582 /* triple fault -> shutdown */ in kvm_multiple_exception()
591 * Generate double fault per SDM Table 5-5. Set in kvm_multiple_exception()
595 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
596 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
597 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
598 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
599 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
600 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
601 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
604 that instruction re-execution will regenerate lost in kvm_multiple_exception()
648 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
649 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
650 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
651 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
652 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
653 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
655 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
656 fault->address); in kvm_inject_page_fault()
665 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
667 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
668 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
674 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
675 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
676 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
677 fault_mmu->root_hpa); in kvm_inject_emulated_page_fault()
679 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
680 return fault->nested_page_fault; in kvm_inject_emulated_page_fault()
686 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
740 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
742 return -EFAULT; in kvm_read_guest_page_mmu()
753 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
769 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; in load_pdptrs()
772 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
790 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
801 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
813 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
819 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
845 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
856 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
858 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
875 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
876 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
877 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
893 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
894 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
896 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
897 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
898 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
903 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
904 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
905 __write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
913 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
914 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
915 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
916 __write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
921 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
924 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
925 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
935 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
951 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
965 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
986 return -EINVAL; in kvm_valid_cr4()
988 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_valid_cr4()
989 return -EINVAL; in kvm_valid_cr4()
1012 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1060 (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) in kvm_set_cr3()
1063 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1067 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1081 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1091 return vcpu->arch.cr8; in kvm_get_cr8()
1099 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1101 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1102 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
1110 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1111 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1113 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1115 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1117 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1132 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1136 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1137 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1138 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1143 return -1; /* #GP */ in __kvm_set_dr()
1144 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1149 return -1; /* #GP */ in __kvm_set_dr()
1150 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1170 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1174 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1178 *val = vcpu->arch.dr6; in kvm_get_dr()
1182 *val = vcpu->arch.dr7; in kvm_get_dr()
1212 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1335 * List of msr numbers which are used to expose MSR-based features that
1428 switch (msr->index) { in kvm_get_msr_feature()
1430 msr->data = kvm_get_arch_capabilities(); in kvm_get_msr_feature()
1433 rdmsrl_safe(msr->index, &msr->data); in kvm_get_msr_feature()
1493 u64 old_efer = vcpu->arch.efer; in set_efer()
1494 u64 efer = msr_info->data; in set_efer()
1500 if (!msr_info->host_initiated) { in set_efer()
1505 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1510 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1535 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1544 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1546 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
1552 allowed = msr_filter->default_allow; in kvm_msr_allowed()
1553 ranges = msr_filter->ranges; in kvm_msr_allowed()
1555 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1556 u32 start = ranges[i].base; in kvm_msr_allowed()
1562 allowed = !!test_bit(index - start, bitmap); in kvm_msr_allowed()
1568 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1577 * Returns 0 on success, non-0 otherwise.
1601 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1602 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1603 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1605 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1607 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1609 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1636 * Returns 0 on success, non-0 otherwise.
1686 if (vcpu->run->msr.error) { in complete_emulated_msr()
1690 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_msr()
1691 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_msr()
1727 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1730 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1731 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1732 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1733 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1734 vcpu->run->msr.index = index; in kvm_msr_user_space()
1735 vcpu->run->msr.data = data; in kvm_msr_user_space()
1736 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1776 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1777 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1813 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1820 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1827 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1836 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1837 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1838 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
1927 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
1930 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
1931 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
1932 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
1933 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
1934 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
1935 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
1936 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
1938 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
1939 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
1940 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
1941 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
1942 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
1943 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
1944 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
1946 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
1948 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
1950 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
1973 kvm->arch.wall_clock = wall_clock; in kvm_write_wall_clock()
1995 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_write_wall_clock()
2010 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2012 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2013 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2016 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2019 vcpu->arch.time = system_time; in kvm_write_system_time()
2023 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
2027 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
2028 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2030 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2053 shift--; in kvm_get_time_scale()
2089 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
2096 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2097 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2101 return -1; in set_tsc_khz()
2105 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2110 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2112 return -1; in set_tsc_khz()
2115 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
2127 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
2128 return -1; in kvm_set_tsc_khz()
2133 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2134 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2135 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2143 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2154 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2155 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2156 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2157 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2170 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2173 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2174 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2184 if (ka->use_master_clock || in kvm_track_tsc_matching()
2185 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) in kvm_track_tsc_matching()
2188 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2189 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2190 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2197 * The most significant 64-N bits (mult) of ratio represent the
2200 * point number (mult + frac * 2^(-N)).
2212 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
2227 return target_tsc - tsc; in kvm_compute_tsc_offset()
2232 return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
2238 vcpu->arch.l1_tsc_offset = offset; in kvm_vcpu_write_tsc_offset()
2239 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2246 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2257 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2264 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2267 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2269 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2272 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2278 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2280 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2298 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2300 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2307 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_synchronize_tsc()
2316 * These values are tracked in kvm->arch.cur_xxx variables. in kvm_synchronize_tsc()
2318 kvm->arch.cur_tsc_generation++; in kvm_synchronize_tsc()
2319 kvm->arch.cur_tsc_nsec = ns; in kvm_synchronize_tsc()
2320 kvm->arch.cur_tsc_write = data; in kvm_synchronize_tsc()
2321 kvm->arch.cur_tsc_offset = offset; in kvm_synchronize_tsc()
2329 kvm->arch.last_tsc_nsec = ns; in kvm_synchronize_tsc()
2330 kvm->arch.last_tsc_write = data; in kvm_synchronize_tsc()
2331 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_synchronize_tsc()
2333 vcpu->arch.last_guest_tsc = data; in kvm_synchronize_tsc()
2336 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_synchronize_tsc()
2337 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_synchronize_tsc()
2338 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_synchronize_tsc()
2341 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2343 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2345 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_synchronize_tsc()
2347 kvm->arch.nr_vcpus_matched_tsc++; in kvm_synchronize_tsc()
2351 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2357 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2363 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2397 switch (clock->vclock_mode) { in vgettsc()
2404 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2405 clock->mask; in vgettsc()
2414 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2415 clock->mask; in vgettsc()
2424 return v * clock->mult; in vgettsc()
2435 seq = read_seqcount_begin(&gtod->seq); in do_monotonic_raw()
2436 ns = gtod->raw_clock.base_cycles; in do_monotonic_raw()
2437 ns += vgettsc(&gtod->raw_clock, tsc_timestamp, &mode); in do_monotonic_raw()
2438 ns >>= gtod->raw_clock.shift; in do_monotonic_raw()
2439 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_monotonic_raw()
2440 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); in do_monotonic_raw()
2454 seq = read_seqcount_begin(&gtod->seq); in do_realtime()
2455 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
2456 ns = gtod->clock.base_cycles; in do_realtime()
2457 ns += vgettsc(&gtod->clock, tsc_timestamp, &mode); in do_realtime()
2458 ns >>= gtod->clock.shift; in do_realtime()
2459 } while (unlikely(read_seqcount_retry(&gtod->seq, seq))); in do_realtime()
2461 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
2462 ts->tv_nsec = ns; in do_realtime()
2506 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2507 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2508 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2512 * - ret0 < ret1
2513 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2515 * - 0 < N - M => M < N
2534 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2538 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
2539 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
2546 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
2547 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
2549 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
2550 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
2551 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
2553 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
2557 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
2572 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
2574 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2586 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2592 struct kvm_arch *ka = &kvm->arch; in get_kvmclock_ns()
2596 spin_lock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2597 if (!ka->use_master_clock) { in get_kvmclock_ns()
2598 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2599 return get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2602 hv_clock.tsc_timestamp = ka->master_cycle_now; in get_kvmclock_ns()
2603 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in get_kvmclock_ns()
2604 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2615 ret = get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2624 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page()
2627 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2642 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2650 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2651 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2652 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2653 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2658 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2660 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2661 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2662 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2665 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2667 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2668 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2669 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2673 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2674 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2675 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2676 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2682 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
2683 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
2696 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2697 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
2699 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
2700 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
2702 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2723 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
2729 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2732 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
2744 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2746 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2747 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2748 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2751 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2752 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2753 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2760 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2762 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2764 if (v == kvm_get_vcpu(v->kvm, 0)) in kvm_guest_time_update()
2765 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2771 * vcpu->cpu migration, should not allow system_timestamp from
2777 * We need to rate-limit these requests though, as they can
2780 * by the delay we use to rate-limit the updates.
2802 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
2805 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
2821 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
2822 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
2833 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
2840 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2842 u32 msr = msr_info->index; in set_msr_mce()
2843 u64 data = msr_info->data; in set_msr_mce()
2847 vcpu->arch.mcg_status = data; in set_msr_mce()
2851 (data || !msr_info->host_initiated)) in set_msr_mce()
2855 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2861 msr - MSR_IA32_MC0_CTL, in set_msr_mce()
2862 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in set_msr_mce()
2871 return -1; in set_msr_mce()
2874 if (!msr_info->host_initiated && in set_msr_mce()
2877 return -1; in set_msr_mce()
2880 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2890 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2892 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
2893 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
2894 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
2895 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
2918 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
2940 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
2948 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2952 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2953 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2962 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
2969 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
2971 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
2978 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
2979 vcpu->arch.time = 0; in kvmclock_reset()
2984 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
2990 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
2999 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3002 /* -EAGAIN is returned in atomic context so we can just return. */ in record_steal_time()
3003 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, in record_steal_time()
3004 &map, &vcpu->arch.st.cache, false)) in record_steal_time()
3008 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in record_steal_time()
3015 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3016 st->preempted & KVM_VCPU_FLUSH_TLB); in record_steal_time()
3017 if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) in record_steal_time()
3020 st->preempted = 0; in record_steal_time()
3023 vcpu->arch.st.preempted = 0; in record_steal_time()
3025 if (st->version & 1) in record_steal_time()
3026 st->version += 1; /* first time write, random junk */ in record_steal_time()
3028 st->version += 1; in record_steal_time()
3032 st->steal += current->sched_info.run_delay - in record_steal_time()
3033 vcpu->arch.st.last_steal; in record_steal_time()
3034 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3038 st->version += 1; in record_steal_time()
3040 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); in record_steal_time()
3046 u32 msr = msr_info->index; in kvm_set_msr_common()
3047 u64 data = msr_info->data; in kvm_set_msr_common()
3060 if (msr_info->host_initiated) in kvm_set_msr_common()
3061 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3064 if (!msr_info->host_initiated) in kvm_set_msr_common()
3066 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3071 if (!msr_info->host_initiated) in kvm_set_msr_common()
3078 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3091 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3107 /* We support the non-activated case already */ in kvm_set_msr_common()
3110 /* Values other than LBR and BTF are vendor-specific, in kvm_set_msr_common()
3128 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3129 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3136 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3140 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3141 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3144 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3147 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3151 if (!msr_info->host_initiated) in kvm_set_msr_common()
3153 vcpu->arch.smbase = data; in kvm_set_msr_common()
3156 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3159 if (msr_info->host_initiated) { in kvm_set_msr_common()
3162 u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3164 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3168 if (!msr_info->host_initiated && in kvm_set_msr_common()
3178 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3182 if (!msr_info->host_initiated) in kvm_set_msr_common()
3184 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3190 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3196 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3202 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3208 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3228 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3242 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3263 if (data & (-1ULL << 1)) in kvm_set_msr_common()
3266 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3271 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3291 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
3307 msr_info->host_initiated); in kvm_set_msr_common()
3309 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
3319 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3324 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3327 if (!msr_info->host_initiated || in kvm_set_msr_common()
3331 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3338 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3341 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
3354 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3363 data = vcpu->arch.mcg_cap; in get_msr_mce()
3368 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3371 data = vcpu->arch.mcg_status; in get_msr_mce()
3377 msr - MSR_IA32_MC0_CTL, in get_msr_mce()
3378 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in get_msr_mce()
3380 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3391 switch (msr_info->index) { in kvm_get_msr_common()
3414 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
3421 msr_info->data = 0; in kvm_get_msr_common()
3428 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3430 msr_info->data = 0; in kvm_get_msr_common()
3433 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3436 if (!msr_info->host_initiated && in kvm_get_msr_common()
3439 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3442 if (!msr_info->host_initiated && in kvm_get_msr_common()
3445 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3448 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3457 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
3460 u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : in kvm_get_msr_common()
3461 vcpu->arch.tsc_offset; in kvm_get_msr_common()
3463 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; in kvm_get_msr_common()
3468 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3470 msr_info->data = 3; in kvm_get_msr_common()
3484 msr_info->data = 1 << 24; in kvm_get_msr_common()
3487 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3490 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3492 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3495 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3498 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3501 if (!msr_info->host_initiated) in kvm_get_msr_common()
3503 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3506 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3510 msr_info->data = 1000ULL; in kvm_get_msr_common()
3512 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
3515 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3521 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3527 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3533 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3539 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3545 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3551 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3557 msr_info->data = 0; in kvm_get_msr_common()
3563 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3569 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3575 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3582 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
3583 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3584 msr_info->host_initiated); in kvm_get_msr_common()
3586 if (!msr_info->host_initiated && in kvm_get_msr_common()
3589 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3593 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
3601 msr_info->data = 0x20000000; in kvm_get_msr_common()
3613 msr_info->index, &msr_info->data, in kvm_get_msr_common()
3614 msr_info->host_initiated); in kvm_get_msr_common()
3626 msr_info->data = 0xbe702111; in kvm_get_msr_common()
3631 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3636 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3639 if (!msr_info->host_initiated && in kvm_get_msr_common()
3640 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3642 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3645 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3648 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3651 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3671 for (i = 0; i < msrs->nmsrs; ++i) in __msr_io()
3693 r = -EFAULT; in msr_io()
3697 r = -E2BIG; in msr_io()
3702 entries = memdup_user(user_msrs->entries, size); in msr_io()
3712 r = -EFAULT; in msr_io()
3713 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
3845 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3846 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3852 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3879 r = -EFAULT; in kvm_arch_dev_ioctl()
3886 r = -E2BIG; in kvm_arch_dev_ioctl()
3889 r = -EFAULT; in kvm_arch_dev_ioctl()
3890 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
3893 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
3905 r = -EFAULT; in kvm_arch_dev_ioctl()
3909 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
3914 r = -EFAULT; in kvm_arch_dev_ioctl()
3921 r = -EFAULT; in kvm_arch_dev_ioctl()
3932 r = -EFAULT; in kvm_arch_dev_ioctl()
3939 r = -E2BIG; in kvm_arch_dev_ioctl()
3942 r = -EFAULT; in kvm_arch_dev_ioctl()
3943 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
3953 r = -EINVAL; in kvm_arch_dev_ioctl()
3967 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
3975 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
3976 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
3977 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
3984 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
3987 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
3988 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
3989 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
3993 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
3994 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
3995 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4001 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4003 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4011 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4013 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4015 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4017 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4029 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
4033 * preempted if and only if the VM-Exit was due to a host interrupt. in kvm_steal_time_set_preempted()
4035 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4036 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
4040 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
4041 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4044 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4047 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, in kvm_steal_time_set_preempted()
4048 &vcpu->arch.st.cache, true)) in kvm_steal_time_set_preempted()
4052 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in kvm_steal_time_set_preempted()
4054 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4056 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); in kvm_steal_time_set_preempted()
4063 if (vcpu->preempted) in kvm_arch_vcpu_put()
4064 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4079 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4081 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4084 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4096 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
4138 * instruction boundary and with no events half-injected. in kvm_vcpu_ready_for_interrupt_injection()
4143 !vcpu->arch.exception.pending); in kvm_vcpu_ready_for_interrupt_injection()
4149 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
4150 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
4152 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4153 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4159 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
4160 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
4162 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4163 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
4165 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4166 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
4168 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4190 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
4191 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
4192 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4202 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
4208 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4211 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4214 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4224 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4226 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4228 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
4229 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
4234 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
4235 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4237 banks += 4 * mce->bank; in kvm_vcpu_ioctl_x86_set_mce()
4242 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4244 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
4245 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4251 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4252 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4253 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4254 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4255 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4260 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4261 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4262 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4263 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4281 * modified under nVMX). Unless the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
4288 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4289 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4298 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4299 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4300 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4302 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4303 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4309 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4310 events->exception.injected |= in kvm_vcpu_ioctl_x86_get_vcpu_events()
4311 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4313 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4314 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4315 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4316 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4317 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4319 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4320 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4321 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4322 events->interrupt.soft = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4323 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4325 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4326 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4327 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4328 events->nmi.pad = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4330 events->sipi_vector = 0; /* never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
4332 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4333 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4334 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4335 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4336 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4338 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
4341 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4342 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4344 memset(&events->reserved, 0, sizeof(events->reserved)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4352 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
4357 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4359 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4360 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4361 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4362 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4363 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4365 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4367 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4368 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4371 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4372 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4373 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4376 if (events->flags & KVM_VCPUEVENT_VALID_SMM && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4377 (events->smi.smm || events->smi.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4378 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4379 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4382 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4383 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4384 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4385 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4386 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4387 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4388 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4390 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4391 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4392 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4393 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4395 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4397 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4398 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4399 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4400 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4402 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4404 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4406 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4407 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4408 if (events->smi.smm) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4409 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4411 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4413 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4417 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4419 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4420 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4421 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4423 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4427 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4428 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4430 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4444 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4446 dbgregs->dr6 = val; in kvm_vcpu_ioctl_x86_get_debugregs()
4447 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4448 dbgregs->flags = 0; in kvm_vcpu_ioctl_x86_get_debugregs()
4449 memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved)); in kvm_vcpu_ioctl_x86_get_debugregs()
4455 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
4456 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4458 if (dbgregs->dr6 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4459 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4460 if (dbgregs->dr7 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4461 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4463 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4465 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4466 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4476 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in fill_xsave()
4477 u64 xstate_bv = xsave->header.xfeatures; in fill_xsave()
4487 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
4492 * non-compacted offset. in fill_xsave()
4496 u64 xfeature_mask = valid & -valid; in fill_xsave()
4497 int xfeature_nr = fls64(xfeature_mask) - 1; in fill_xsave()
4505 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
4506 sizeof(vcpu->arch.pkru)); in fill_xsave()
4512 valid -= xfeature_mask; in fill_xsave()
4518 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in load_xsave()
4529 xsave->header.xfeatures = xstate_bv; in load_xsave()
4531 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; in load_xsave()
4534 * Copy each region from the non-compacted offset to the in load_xsave()
4539 u64 xfeature_mask = valid & -valid; in load_xsave()
4540 int xfeature_nr = fls64(xfeature_mask) - 1; in load_xsave()
4548 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
4549 sizeof(vcpu->arch.pkru)); in load_xsave()
4554 valid -= xfeature_mask; in load_xsave()
4563 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
4565 memcpy(guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
4566 &vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
4568 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = in kvm_vcpu_ioctl_x86_get_xsave()
4579 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4580 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4589 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4590 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
4594 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4595 memcpy(&vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
4596 guest_xsave->region, sizeof(struct fxregs_state)); in kvm_vcpu_ioctl_x86_set_xsave()
4605 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4609 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
4610 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4611 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
4612 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4621 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4623 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
4624 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4626 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
4628 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
4630 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
4634 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4646 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4647 return -EINVAL; in kvm_set_guest_paused()
4648 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4660 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4661 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4663 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4665 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
4666 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4670 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
4671 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4672 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
4675 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4676 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4677 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4679 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4682 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
4687 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4692 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4693 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
4699 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4706 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4721 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4727 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4733 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4740 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4755 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4773 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4776 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4783 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4787 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4794 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4798 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4801 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4808 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4810 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4814 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4816 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4822 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4828 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4838 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4841 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4844 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4846 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4852 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4861 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4872 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4881 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4893 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4903 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4913 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4919 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4937 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4943 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4963 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4979 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
4989 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4999 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5000 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
5003 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
5004 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5005 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5008 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5014 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5015 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5017 r = -E2BIG; in kvm_arch_vcpu_ioctl()
5029 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5030 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
5033 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5037 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5052 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5053 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5054 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5061 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5066 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5070 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5077 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5095 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
5096 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
5111 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
5113 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5116 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
5118 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5124 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
5129 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip() local
5133 switch (chip->chip_id) { in kvm_vm_ioctl_get_irqchip()
5135 memcpy(&chip->chip.pic, &pic->pics[0], in kvm_vm_ioctl_get_irqchip()
5139 memcpy(&chip->chip.pic, &pic->pics[1], in kvm_vm_ioctl_get_irqchip()
5143 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
5146 r = -EINVAL; in kvm_vm_ioctl_get_irqchip()
5154 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip() local
5158 switch (chip->chip_id) { in kvm_vm_ioctl_set_irqchip()
5160 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5161 memcpy(&pic->pics[0], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5163 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5166 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5167 memcpy(&pic->pics[1], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5169 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5172 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
5175 r = -EINVAL; in kvm_vm_ioctl_set_irqchip()
5178 kvm_pic_update_irq(pic); in kvm_vm_ioctl_set_irqchip()
5184 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
5186 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); in kvm_vm_ioctl_get_pit()
5188 mutex_lock(&kps->lock); in kvm_vm_ioctl_get_pit()
5189 memcpy(ps, &kps->channels, sizeof(*ps)); in kvm_vm_ioctl_get_pit()
5190 mutex_unlock(&kps->lock); in kvm_vm_ioctl_get_pit()
5197 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
5199 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5200 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
5202 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
5203 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5209 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5210 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
5211 sizeof(ps->channels)); in kvm_vm_ioctl_get_pit2()
5212 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
5213 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5214 memset(&ps->reserved, 0, sizeof(ps->reserved)); in kvm_vm_ioctl_get_pit2()
5223 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
5225 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5226 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5227 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5230 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
5231 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
5232 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
5234 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
5236 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5243 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
5245 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
5249 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5250 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
5251 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5259 * Flush potentially hardware-cached dirty pages to dirty_bitmap. in kvm_arch_sync_dirty_log()
5269 return -ENXIO; in kvm_vm_ioctl_irq_line()
5271 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
5272 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
5282 if (cap->flags) in kvm_vm_ioctl_enable_cap()
5283 return -EINVAL; in kvm_vm_ioctl_enable_cap()
5285 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
5287 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5291 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5292 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5293 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
5295 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
5298 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
5305 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
5306 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5309 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5313 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5314 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
5317 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
5318 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
5319 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
5320 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
5325 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5326 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) in kvm_vm_ioctl_enable_cap()
5329 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && in kvm_vm_ioctl_enable_cap()
5331 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
5332 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) in kvm_vm_ioctl_enable_cap()
5333 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
5334 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) in kvm_vm_ioctl_enable_cap()
5335 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
5336 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) in kvm_vm_ioctl_enable_cap()
5337 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
5341 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5345 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5349 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5353 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5367 msr_filter->default_allow = default_allow; in kvm_alloc_msr_filter()
5378 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
5379 kfree(msr_filter->ranges[i].bitmap); in kvm_free_msr_filter()
5392 if (!user_range->nmsrs) in kvm_add_msr_filter()
5395 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
5397 return -EINVAL; in kvm_add_msr_filter()
5399 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
5404 .flags = user_range->flags, in kvm_add_msr_filter()
5405 .base = user_range->base, in kvm_add_msr_filter()
5406 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
5411 r = -EINVAL; in kvm_add_msr_filter()
5416 r = -EINVAL; in kvm_add_msr_filter()
5421 msr_filter->ranges[msr_filter->count] = range; in kvm_add_msr_filter()
5422 msr_filter->count++; in kvm_add_msr_filter()
5441 return -EFAULT; in kvm_vm_ioctl_set_msr_filter()
5448 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5452 return -ENOMEM; in kvm_vm_ioctl_set_msr_filter()
5462 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5464 /* The per-VM filter is protected by kvm->lock... */ in kvm_vm_ioctl_set_msr_filter()
5465 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); in kvm_vm_ioctl_set_msr_filter()
5467 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); in kvm_vm_ioctl_set_msr_filter()
5468 synchronize_srcu(&kvm->srcu); in kvm_vm_ioctl_set_msr_filter()
5473 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5481 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
5483 int r = -ENOTTY; in kvm_arch_vm_ioctl()
5485 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
5502 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5503 r = -EINVAL; in kvm_arch_vm_ioctl()
5504 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5506 r = -EFAULT; in kvm_arch_vm_ioctl()
5511 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5521 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5523 r = -EEXIST; in kvm_arch_vm_ioctl()
5527 r = -EINVAL; in kvm_arch_vm_ioctl()
5528 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5547 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
5549 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
5551 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5558 r = -EFAULT; in kvm_arch_vm_ioctl()
5563 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5564 r = -EEXIST; in kvm_arch_vm_ioctl()
5565 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5567 r = -ENOMEM; in kvm_arch_vm_ioctl()
5568 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
5569 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5572 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5575 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
5584 r = -ENXIO; in kvm_arch_vm_ioctl()
5590 r = -EFAULT; in kvm_arch_vm_ioctl()
5599 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
5608 r = -ENXIO; in kvm_arch_vm_ioctl()
5617 r = -EFAULT; in kvm_arch_vm_ioctl()
5620 r = -ENXIO; in kvm_arch_vm_ioctl()
5621 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5626 r = -EFAULT; in kvm_arch_vm_ioctl()
5633 r = -EFAULT; in kvm_arch_vm_ioctl()
5636 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5637 r = -ENXIO; in kvm_arch_vm_ioctl()
5638 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5642 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5646 r = -ENXIO; in kvm_arch_vm_ioctl()
5647 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5652 r = -EFAULT; in kvm_arch_vm_ioctl()
5659 r = -EFAULT; in kvm_arch_vm_ioctl()
5662 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5663 r = -ENXIO; in kvm_arch_vm_ioctl()
5664 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5668 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5673 r = -EFAULT; in kvm_arch_vm_ioctl()
5676 r = -ENXIO; in kvm_arch_vm_ioctl()
5677 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5684 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5685 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5686 r = -EBUSY; in kvm_arch_vm_ioctl()
5688 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
5689 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5693 r = -EFAULT; in kvm_arch_vm_ioctl()
5696 r = -EINVAL; in kvm_arch_vm_ioctl()
5699 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); in kvm_arch_vm_ioctl()
5707 r = -EFAULT; in kvm_arch_vm_ioctl()
5711 r = -EINVAL; in kvm_arch_vm_ioctl()
5723 kvm->arch.kvmclock_offset += user_ns.clock - now_ns; in kvm_arch_vm_ioctl()
5733 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; in kvm_arch_vm_ioctl()
5736 r = -EFAULT; in kvm_arch_vm_ioctl()
5743 r = -ENOTTY; in kvm_arch_vm_ioctl()
5751 r = -EFAULT; in kvm_arch_vm_ioctl()
5755 r = -ENOTTY; in kvm_arch_vm_ioctl()
5763 r = -EFAULT; in kvm_arch_vm_ioctl()
5767 r = -ENOTTY; in kvm_arch_vm_ioctl()
5775 r = -EFAULT; in kvm_arch_vm_ioctl()
5788 r = -ENOTTY; in kvm_arch_vm_ioctl()
5849 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= in kvm_init_msr_list()
5854 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_init_msr_list()
5859 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_init_msr_list()
5897 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
5902 len -= n; in vcpu_mmio_write()
5917 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
5924 len -= n; in vcpu_mmio_read()
5950 /* NPT walks are always user-walks */ in translate_nested_gpa()
5952 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
5961 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
5969 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
5977 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
5984 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
5995 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
5997 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
5998 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
6010 bytes -= toread; in kvm_read_guest_virt_helper()
6029 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
6034 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
6036 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
6093 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6096 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
6097 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
6108 bytes -= towrite; in kvm_write_guest_virt_helper()
6134 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6191 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6192 vcpu->arch.mmio_access, 0, access)) { in vcpu_mmio_gva_to_gpa()
6193 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6194 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
6199 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6202 return -1; in vcpu_mmio_gva_to_gpa()
6233 if (vcpu->mmio_read_completed) { in read_prepare()
6235 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6236 vcpu->mmio_read_completed = 0; in read_prepare()
6271 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6273 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6299 bool write = ops->write; in emulator_read_write_onepage()
6301 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6310 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
6311 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
6312 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
6320 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6326 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6331 bytes -= handled; in emulator_read_write_onepage()
6334 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6335 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6336 frag->gpa = gpa; in emulator_read_write_onepage()
6337 frag->data = val; in emulator_read_write_onepage()
6338 frag->len = bytes; in emulator_read_write_onepage()
6352 if (ops->read_write_prepare && in emulator_read_write()
6353 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6356 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6359 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
6362 now = -addr & ~PAGE_MASK; in emulator_read_write()
6369 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
6372 bytes -= now; in emulator_read_write()
6380 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6383 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6385 vcpu->mmio_needed = 1; in emulator_read_write()
6386 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6388 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6389 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6390 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6391 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6393 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6441 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
6455 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
6459 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
6503 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
6504 if (vcpu->arch.pio.in) in kernel_pio()
6505 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
6506 vcpu->arch.pio.size, pd); in kernel_pio()
6509 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
6513 pd += vcpu->arch.pio.size; in kernel_pio()
6522 vcpu->arch.pio.port = port; in emulator_pio_in_out()
6523 vcpu->arch.pio.in = in; in emulator_pio_in_out()
6524 vcpu->arch.pio.count = count; in emulator_pio_in_out()
6525 vcpu->arch.pio.size = size; in emulator_pio_in_out()
6527 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
6528 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
6532 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
6533 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
6534 vcpu->run->io.size = size; in emulator_pio_in_out()
6535 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
6536 vcpu->run->io.count = count; in emulator_pio_in_out()
6537 vcpu->run->io.port = port; in emulator_pio_in_out()
6547 if (vcpu->arch.pio.count) in emulator_pio_in()
6550 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in()
6555 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in()
6556 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in()
6557 vcpu->arch.pio.count = 0; in emulator_pio_in()
6576 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
6577 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
6606 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6607 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
6610 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6645 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
6658 value = vcpu->arch.cr2; in emulator_get_cr()
6687 vcpu->arch.cr2 = val; in emulator_set_cr()
6700 res = -1; in emulator_set_cr()
6756 set_desc_base(desc, (unsigned long)var.base); in emulator_get_segment()
6759 *base3 = var.base >> 32; in emulator_get_segment()
6761 desc->type = var.type; in emulator_get_segment()
6762 desc->s = var.s; in emulator_get_segment()
6763 desc->dpl = var.dpl; in emulator_get_segment()
6764 desc->p = var.present; in emulator_get_segment()
6765 desc->avl = var.avl; in emulator_get_segment()
6766 desc->l = var.l; in emulator_get_segment()
6767 desc->d = var.db; in emulator_get_segment()
6768 desc->g = var.g; in emulator_get_segment()
6781 var.base = get_desc_base(desc); in emulator_set_segment()
6783 var.base |= ((u64)base3) << 32; in emulator_set_segment()
6786 if (desc->g) in emulator_set_segment()
6788 var.type = desc->type; in emulator_set_segment()
6789 var.dpl = desc->dpl; in emulator_set_segment()
6790 var.db = desc->d; in emulator_set_segment()
6791 var.s = desc->s; in emulator_set_segment()
6792 var.l = desc->l; in emulator_set_segment()
6793 var.g = desc->g; in emulator_set_segment()
6794 var.avl = desc->avl; in emulator_set_segment()
6795 var.present = desc->p; in emulator_set_segment()
6839 return vcpu->arch.smbase; in emulator_get_smbase()
6846 vcpu->arch.smbase = smbase; in emulator_set_smbase()
6863 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
6871 &ctxt->exception); in emulator_intercept()
6913 return emul_to_vcpu(ctxt)->arch.hflags; in emulator_get_hflags()
6920 vcpu->arch.hflags = emul_flags; in emulator_set_hflags()
7008 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
7009 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
7010 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
7012 if (ctxt->exception.error_code_valid) in inject_emulated_exception()
7013 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
7014 ctxt->exception.error_code); in inject_emulated_exception()
7016 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
7030 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
7031 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
7032 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
7039 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
7044 ctxt->gpa_available = false; in init_emulate_ctxt()
7045 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
7046 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
7048 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
7049 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
7050 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
7058 ctxt->interruptibility = 0; in init_emulate_ctxt()
7059 ctxt->have_exception = false; in init_emulate_ctxt()
7060 ctxt->exception.vector = -1; in init_emulate_ctxt()
7061 ctxt->perm_ok = false; in init_emulate_ctxt()
7064 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
7069 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
7074 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
7075 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
7076 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
7082 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
7083 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
7084 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
7091 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7100 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7101 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7102 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7109 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7110 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7111 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7132 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7150 * retry instruction -> write #PF -> emulation fail -> retry in reexecute_instruction()
7151 * instruction -> ... in reexecute_instruction()
7153 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7164 /* The instructions are well-emulated on direct mmu. */ in reexecute_instruction()
7165 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7168 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7169 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7170 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7173 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7180 * and it failed try to unshadow page and re-enter the in reexecute_instruction()
7183 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7199 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7200 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7203 * If the emulation is caused by #PF and it is non-page_table in retry_instruction()
7204 * writing instruction, it means the VM-EXIT is caused by shadow in retry_instruction()
7208 * Note: if the guest uses a non-page-table modifying instruction in retry_instruction()
7215 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7227 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) in retry_instruction()
7230 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7231 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7233 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7236 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7246 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
7248 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
7274 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7276 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7277 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_do_singlestep()
7278 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7279 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
7280 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
7312 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_breakpoint()
7313 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_breakpoint()
7314 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_breakpoint()
7317 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_breakpoint()
7318 vcpu->arch.eff_db); in kvm_vcpu_check_breakpoint()
7321 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_breakpoint()
7322 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_breakpoint()
7323 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_breakpoint()
7324 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_breakpoint()
7330 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_breakpoint()
7334 vcpu->arch.dr7, in kvm_vcpu_check_breakpoint()
7335 vcpu->arch.db); in kvm_vcpu_check_breakpoint()
7349 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
7351 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7368 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7385 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
7398 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; in x86_decode_emulated_instruction()
7403 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
7413 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
7420 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
7426 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
7427 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
7444 if (ctxt->have_exception) { in x86_emulate_instruction()
7446 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
7449 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
7450 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
7467 * updating interruptibility state and injecting single-step #DBs. in x86_emulate_instruction()
7470 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
7471 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
7472 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
7481 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
7482 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
7489 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
7492 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
7493 ctxt->gpa_available = true; in x86_emulate_instruction()
7494 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
7498 ctxt->exception.address = 0; in x86_emulate_instruction()
7514 if (ctxt->have_exception) { in x86_emulate_instruction()
7518 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
7519 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
7520 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
7521 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
7524 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
7527 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
7528 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
7530 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
7533 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
7541 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
7542 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
7543 if (!ctxt->have_exception || in x86_emulate_instruction()
7544 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
7545 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
7546 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
7550 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
7559 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
7562 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
7582 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
7588 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
7590 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
7610 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
7611 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
7615 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
7616 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
7626 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
7628 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
7629 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
7634 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
7637 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform in complete_fast_pio_in()
7640 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
7661 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
7662 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
7691 khz = freq->new; in tsc_khz_changed()
7712 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
7718 struct kvm_arch *ka = &kvm->arch; in kvm_hyperv_tsc_notifier()
7720 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7730 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7786 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
7789 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
7795 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
7818 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
7820 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
7823 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
7851 if (policy->cpuinfo.max_freq) in kvm_timer_init()
7852 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
7899 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
7955 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
7973 r = -EEXIST; in kvm_arch_init()
7977 if (!ops->cpu_has_kvm_support()) { in kvm_arch_init()
7979 r = -EOPNOTSUPP; in kvm_arch_init()
7982 if (ops->disabled_by_bios()) { in kvm_arch_init()
7984 r = -EOPNOTSUPP; in kvm_arch_init()
7995 r = -EOPNOTSUPP; in kvm_arch_init()
7999 r = -ENOMEM; in kvm_arch_init()
8029 if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) in kvm_arch_init()
8039 if (pi_inject_timer == -1) in kvm_arch_init()
8088 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
8090 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
8093 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
8103 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
8120 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8123 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8132 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8134 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
8143 * @apicid - apicid of vcpu to be kicked.
8161 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
8169 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8172 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8182 map = rcu_dereference(kvm->arch.apic_map); in kvm_sched_yield()
8184 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) in kvm_sched_yield()
8185 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8189 if (target && READ_ONCE(target->ready)) in kvm_sched_yield()
8198 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8219 ret = -KVM_EPERM; in kvm_emulate_hypercall()
8223 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8233 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8234 kvm_sched_yield(vcpu->kvm, a1); in kvm_emulate_hypercall()
8246 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8252 kvm_sched_yield(vcpu->kvm, a0); in kvm_emulate_hypercall()
8256 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8264 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8278 &ctxt->exception); in emulator_fix_hypercall()
8283 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8284 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
8289 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
8291 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
8292 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
8293 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
8294 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
8295 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
8296 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
8310 if (vcpu->arch.apicv_active) in update_cr8_intercept()
8313 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
8316 max_irr = -1; in update_cr8_intercept()
8318 if (max_irr != -1) in update_cr8_intercept()
8328 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
8329 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
8340 if (vcpu->arch.exception.injected) { in inject_pending_event()
8348 * Trap-like exceptions, e.g. #DB, have higher priority than in inject_pending_event()
8351 * Fault-like exceptions, e.g. #GP and #PF, are the lowest in inject_pending_event()
8353 * execution, i.e. a pending fault-like exception means the in inject_pending_event()
8358 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
8359 if (vcpu->arch.nmi_injected) { in inject_pending_event()
8362 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
8368 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
8369 vcpu->arch.exception.pending); in inject_pending_event()
8373 * in order for caller to determine if it should require immediate-exit in inject_pending_event()
8378 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8384 if (vcpu->arch.exception.pending) { in inject_pending_event()
8385 trace_kvm_inj_exception(vcpu->arch.exception.nr, in inject_pending_event()
8386 vcpu->arch.exception.has_error_code, in inject_pending_event()
8387 vcpu->arch.exception.error_code); in inject_pending_event()
8389 vcpu->arch.exception.pending = false; in inject_pending_event()
8390 vcpu->arch.exception.injected = true; in inject_pending_event()
8392 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
8396 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
8398 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
8399 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
8410 * due to architectural conditions (e.g. IF=0) a window-open exit in inject_pending_event()
8411 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in inject_pending_event()
8417 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in inject_pending_event()
8419 if (vcpu->arch.smi_pending) { in inject_pending_event()
8420 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8424 vcpu->arch.smi_pending = false; in inject_pending_event()
8425 ++vcpu->arch.smi_count; in inject_pending_event()
8432 if (vcpu->arch.nmi_pending) { in inject_pending_event()
8433 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8437 --vcpu->arch.nmi_pending; in inject_pending_event()
8438 vcpu->arch.nmi_injected = true; in inject_pending_event()
8443 if (vcpu->arch.nmi_pending) in inject_pending_event()
8448 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8461 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8462 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8465 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
8482 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8485 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
8486 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
8493 flags |= seg->g << 23; in enter_smm_get_segment_flags()
8494 flags |= seg->db << 22; in enter_smm_get_segment_flags()
8495 flags |= seg->l << 21; in enter_smm_get_segment_flags()
8496 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
8497 flags |= seg->present << 15; in enter_smm_get_segment_flags()
8498 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
8499 flags |= seg->s << 12; in enter_smm_get_segment_flags()
8500 flags |= seg->type << 8; in enter_smm_get_segment_flags()
8515 offset = 0x7f2c + (n - 3) * 12; in enter_smm_save_seg_32()
8517 put_smstate(u32, buf, offset + 8, seg.base); in enter_smm_save_seg_32()
8536 put_smstate(u64, buf, offset + 8, seg.base); in enter_smm_save_seg_64()
8562 put_smstate(u32, buf, 0x7f64, seg.base); in enter_smm_save_state_32()
8568 put_smstate(u32, buf, 0x7f80, seg.base); in enter_smm_save_state_32()
8587 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
8599 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
8613 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
8618 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
8624 put_smstate(u64, buf, 0x7e98, seg.base); in enter_smm_save_state_64()
8634 put_smstate(u64, buf, 0x7e78, seg.base); in enter_smm_save_state_64()
8652 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
8662 * Give pre_enter_smm() a chance to make ISA-specific changes to the in enter_smm()
8664 * the SMM state-save area. in enter_smm()
8668 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
8669 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
8672 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
8679 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
8681 vcpu->arch.cr0 = cr0; in enter_smm()
8691 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
8692 cs.base = vcpu->arch.smbase; in enter_smm()
8695 ds.base = 0; in enter_smm()
8727 vcpu->arch.smi_pending = true; in process_smi()
8754 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
8763 * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
8765 * synchronize_srcu(&kvm->srcu).
8776 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); in kvm_request_apicv_update()
8785 old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new); in kvm_request_apicv_update()
8813 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
8815 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
8816 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8818 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
8820 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
8821 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8825 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
8834 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
8837 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
8838 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
8869 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
8890 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
8900 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
8924 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
8929 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
8930 vcpu->mmio_needed = 0; in vcpu_enter_guest()
8936 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
8951 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
8952 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
8953 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
8954 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
8955 vcpu->run->eoi.vector = in vcpu_enter_guest()
8956 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
8968 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
8969 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
8974 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
8975 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
8980 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
8981 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
8988 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
8989 * depend on the guest clock being up-to-date in vcpu_enter_guest()
9002 ++vcpu->stat.req_event; in vcpu_enter_guest()
9004 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
9034 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
9036 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
9039 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
9042 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
9056 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
9060 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9064 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9080 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
9082 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
9083 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
9084 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
9085 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
9086 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
9087 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9100 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
9101 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
9105 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9118 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
9119 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
9121 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9128 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
9135 ++vcpu->stat.exits; in vcpu_enter_guest()
9149 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9151 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9152 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9159 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9169 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9172 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9182 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9192 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9194 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
9204 switch(vcpu->arch.mp_state) { in vcpu_block()
9206 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
9207 vcpu->arch.mp_state = in vcpu_block()
9211 vcpu->arch.apf.halted = false; in vcpu_block()
9216 return -EINTR; in vcpu_block()
9224 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9226 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
9227 !vcpu->arch.apf.halted); in kvm_vcpu_running()
9233 struct kvm *kvm = vcpu->kvm; in vcpu_run()
9235 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9236 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
9245 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
9262 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
9263 ++vcpu->stat.request_irq_exits; in vcpu_run()
9268 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9272 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9276 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9285 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
9287 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
9293 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
9318 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
9322 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
9325 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
9326 len = min(8u, frag->len); in complete_emulated_mmio()
9327 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
9328 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
9330 if (frag->len <= 8) { in complete_emulated_mmio()
9333 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
9336 frag->data += len; in complete_emulated_mmio()
9337 frag->gpa += len; in complete_emulated_mmio()
9338 frag->len -= len; in complete_emulated_mmio()
9341 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
9342 vcpu->mmio_needed = 0; in complete_emulated_mmio()
9344 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
9345 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9347 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
9351 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
9352 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
9353 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9354 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
9355 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
9356 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
9357 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
9368 memcpy(&fpu->state, &current->thread.fpu.state, in kvm_save_current_fpu()
9379 kvm_save_current_fpu(vcpu->arch.user_fpu); in kvm_load_guest_fpu()
9382 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, in kvm_load_guest_fpu()
9396 kvm_save_current_fpu(vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
9398 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); in kvm_put_guest_fpu()
9403 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
9409 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
9416 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
9417 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
9418 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9424 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
9426 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9427 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
9428 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
9433 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { in kvm_arch_vcpu_ioctl_run()
9434 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9438 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
9444 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
9446 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
9447 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9452 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
9453 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
9454 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
9459 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
9461 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
9462 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9468 if (kvm_run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
9479 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
9487 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
9488 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
9490 regs->rax = kvm_rax_read(vcpu); in __get_regs()
9491 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
9492 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
9493 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
9494 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
9495 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
9496 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
9497 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
9499 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
9500 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
9501 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
9502 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
9503 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
9504 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
9505 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
9506 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
9509 regs->rip = kvm_rip_read(vcpu); in __get_regs()
9510 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
9523 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
9524 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
9526 kvm_rax_write(vcpu, regs->rax); in __set_regs()
9527 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
9528 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
9529 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
9530 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
9531 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
9532 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
9533 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
9535 kvm_r8_write(vcpu, regs->r8); in __set_regs()
9536 kvm_r9_write(vcpu, regs->r9); in __set_regs()
9537 kvm_r10_write(vcpu, regs->r10); in __set_regs()
9538 kvm_r11_write(vcpu, regs->r11); in __set_regs()
9539 kvm_r12_write(vcpu, regs->r12); in __set_regs()
9540 kvm_r13_write(vcpu, regs->r13); in __set_regs()
9541 kvm_r14_write(vcpu, regs->r14); in __set_regs()
9542 kvm_r15_write(vcpu, regs->r15); in __set_regs()
9545 kvm_rip_write(vcpu, regs->rip); in __set_regs()
9546 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
9548 vcpu->arch.exception.pending = false; in __set_regs()
9575 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
9576 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
9577 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
9578 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
9579 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
9580 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
9582 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
9583 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
9586 sregs->idt.limit = dt.size; in __get_sregs()
9587 sregs->idt.base = dt.address; in __get_sregs()
9589 sregs->gdt.limit = dt.size; in __get_sregs()
9590 sregs->gdt.base = dt.address; in __get_sregs()
9592 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
9593 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
9594 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
9595 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
9596 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
9597 sregs->efer = vcpu->arch.efer; in __get_sregs()
9598 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
9600 memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); in __get_sregs()
9602 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
9603 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
9604 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
9624 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
9625 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
9626 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
9628 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
9639 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
9644 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) in kvm_arch_vcpu_ioctl_set_mpstate()
9652 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
9653 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || in kvm_arch_vcpu_ioctl_set_mpstate()
9654 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) in kvm_arch_vcpu_ioctl_set_mpstate()
9657 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
9658 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
9659 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
9661 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
9673 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
9681 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
9682 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
9683 vcpu->run->internal.ndata = 0; in kvm_task_switch()
9687 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
9688 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
9695 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_valid_sregs()
9698 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_valid_sregs()
9701 if (!(sregs->cr4 & X86_CR4_PAE) in kvm_valid_sregs()
9702 || !(sregs->efer & EFER_LMA)) in kvm_valid_sregs()
9703 return -EINVAL; in kvm_valid_sregs()
9704 if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) in kvm_valid_sregs()
9705 return -EINVAL; in kvm_valid_sregs()
9708 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_valid_sregs()
9709 * segment cannot be 64-bit. in kvm_valid_sregs()
9711 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_valid_sregs()
9712 return -EINVAL; in kvm_valid_sregs()
9715 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
9725 int ret = -EINVAL; in __set_sregs()
9730 apic_base_msr.data = sregs->apic_base; in __set_sregs()
9735 dt.size = sregs->idt.limit; in __set_sregs()
9736 dt.address = sregs->idt.base; in __set_sregs()
9738 dt.size = sregs->gdt.limit; in __set_sregs()
9739 dt.address = sregs->gdt.base; in __set_sregs()
9742 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
9743 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
9744 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
9747 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
9749 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
9750 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9752 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
9753 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9754 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
9756 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
9757 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
9759 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9763 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
9765 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
9768 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
9775 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
9781 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
9782 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
9783 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
9784 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
9785 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
9786 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
9788 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
9789 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
9795 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs()
9797 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
9825 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9826 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
9827 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
9829 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
9841 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
9842 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
9843 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
9845 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9847 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9848 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9851 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9855 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
9856 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
9880 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
9886 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
9888 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
9889 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
9890 tr->valid = gpa != UNMAPPED_GVA; in kvm_arch_vcpu_ioctl_translate()
9891 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
9892 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
9904 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
9905 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
9906 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
9907 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
9908 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
9909 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
9910 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
9911 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
9912 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
9924 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
9926 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
9927 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
9928 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
9929 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
9930 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
9931 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
9932 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
9933 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
9943 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
9944 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
9946 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
9947 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
9949 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
9951 vcpu, &vcpu->run->s.regs.events); in store_regs()
9956 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
9957 return -EINVAL; in sync_regs()
9959 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
9960 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
9961 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
9963 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
9964 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
9965 return -EINVAL; in sync_regs()
9966 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
9968 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
9970 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
9971 return -EINVAL; in sync_regs()
9972 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
9980 fpstate_init(&vcpu->arch.guest_fpu->state); in fx_init()
9982 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = in fx_init()
9988 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
9990 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
9995 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_precreate()
10007 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
10008 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
10010 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
10018 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
10022 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
10023 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
10027 r = -ENOMEM; in kvm_arch_vcpu_create()
10032 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
10034 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
10036 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
10038 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
10040 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
10047 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10049 if (!vcpu->arch.user_fpu) { in kvm_arch_vcpu_create()
10054 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10056 if (!vcpu->arch.guest_fpu) { in kvm_arch_vcpu_create()
10062 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
10063 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); in kvm_arch_vcpu_create()
10065 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
10070 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
10071 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
10079 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
10080 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
10089 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
10091 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_create()
10093 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
10095 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
10097 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
10099 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
10109 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
10113 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
10120 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
10122 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
10124 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
10125 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
10131 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; in kvm_arch_vcpu_destroy()
10134 kvm_release_pfn(cache->pfn, cache->dirty, cache); in kvm_arch_vcpu_destroy()
10140 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
10141 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
10142 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_destroy()
10143 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
10147 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
10149 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
10151 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
10152 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
10153 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
10162 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
10164 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
10165 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
10166 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
10167 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
10168 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
10172 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
10174 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
10175 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
10178 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
10181 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
10182 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
10183 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
10189 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
10200 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10204 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10214 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
10216 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
10218 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
10221 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
10222 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
10223 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
10225 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
10236 cs.base = vector << 12; in kvm_vcpu_deliver_sipi_vector()
10260 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
10262 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
10264 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
10265 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
10295 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_hardware_enable()
10309 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_hardware_enable()
10311 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
10313 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
10314 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
10324 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
10325 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
10348 r = ops->hardware_setup(); in kvm_arch_hardware_setup()
10352 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10393 return -EIO; in kvm_arch_check_processor_compat()
10395 return ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
10400 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
10406 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
10416 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
10417 if (pmu->version && unlikely(pmu->event_count)) { in kvm_arch_sched_in()
10418 pmu->need_cleanup = true; in kvm_arch_sched_in()
10426 kfree(kvm->arch.hyperv.hv_pa_pg); in kvm_arch_free_vm()
10436 return -EINVAL; in kvm_arch_init_vm()
10442 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
10443 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
10444 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
10445 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); in kvm_arch_init_vm()
10446 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
10447 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
10450 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10451 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ in kvm_arch_init_vm()
10453 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10455 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
10456 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
10457 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
10459 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
10462 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
10464 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
10465 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
10500 mutex_lock(&kvm->lock); in kvm_free_vcpus()
10501 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
10502 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
10504 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
10505 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
10510 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
10511 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
10522 /* Called with kvm->slots_lock held. */ in __x86_set_memory_region()
10524 return -EINVAL; in __x86_set_memory_region()
10528 if (slot && slot->npages) in __x86_set_memory_region()
10529 return -EEXIST; in __x86_set_memory_region()
10540 if (!slot || !slot->npages) in __x86_set_memory_region()
10543 old_npages = slot->npages; in __x86_set_memory_region()
10574 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
10580 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10586 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10590 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10594 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
10595 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10606 kvfree(slot->arch.rmap[i]); in kvm_arch_free_memslot()
10607 slot->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
10612 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
10613 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
10629 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
10637 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10638 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10640 slot->arch.rmap[i] = in kvm_alloc_memslot_metadata()
10641 kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), in kvm_alloc_memslot_metadata()
10643 if (!slot->arch.rmap[i]) in kvm_alloc_memslot_metadata()
10652 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
10654 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10656 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10657 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
10658 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
10663 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
10678 kvfree(slot->arch.rmap[i]); in kvm_alloc_memslot_metadata()
10679 slot->arch.rmap[i] = NULL; in kvm_alloc_memslot_metadata()
10683 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
10684 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
10686 return -ENOMEM; in kvm_alloc_memslot_metadata()
10695 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
10700 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
10712 mem->memory_size >> PAGE_SHIFT); in kvm_arch_prepare_memory_region()
10725 if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) in kvm_mmu_slot_apply_flags()
10737 * which can be collapsed into a single large-page spte. Later in kvm_mmu_slot_apply_flags()
10738 * page faults will create the large-page sptes. in kvm_mmu_slot_apply_flags()
10745 if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_mmu_slot_apply_flags()
10746 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_mmu_slot_apply_flags()
10762 * When disabling dirty logging with PML enabled, the D-bit is set in kvm_mmu_slot_apply_flags()
10769 * When enabling dirty logging, large sptes are write-protected in kvm_mmu_slot_apply_flags()
10774 * initial-all-set state. Otherwise, depending on whether pml in kvm_mmu_slot_apply_flags()
10775 * is enabled the D-bit or the W-bit will be cleared. in kvm_mmu_slot_apply_flags()
10777 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_mmu_slot_apply_flags()
10786 * If we're with initial-all-set, we don't need in kvm_mmu_slot_apply_flags()
10789 * we still need to write-protect huge pages in kvm_mmu_slot_apply_flags()
10807 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
10812 * FIXME: const-ify all uses of struct kvm_memory_slot. in kvm_arch_commit_memory_region()
10841 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
10847 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
10850 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
10854 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
10859 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
10872 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
10873 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
10886 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
10894 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
10902 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
10935 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
10943 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
10944 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
10960 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
10961 work->wakeup_all) in kvm_arch_async_page_ready()
10968 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
10969 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
10972 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
10984 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
10991 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
10994 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
11003 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
11004 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
11012 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
11021 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
11025 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
11028 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
11030 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
11037 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
11046 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
11054 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
11063 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
11072 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
11076 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11086 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
11089 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
11104 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
11105 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
11113 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
11136 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
11139 if (work->wakeup_all) in kvm_arch_async_page_present()
11140 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
11142 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
11143 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
11145 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
11147 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
11148 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
11152 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
11153 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
11159 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
11173 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
11179 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
11185 return arch_atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
11191 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
11197 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
11203 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
11219 irqfd->producer = prod; in kvm_arch_irq_bypass_add_producer()
11220 kvm_arch_start_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11221 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11222 prod->irq, irqfd->gsi, 1); in kvm_arch_irq_bypass_add_producer()
11225 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11237 WARN_ON(irqfd->producer != prod); in kvm_arch_irq_bypass_del_producer()
11238 irqfd->producer = NULL; in kvm_arch_irq_bypass_del_producer()
11242 * remapped mode, so we can re-use the current implementation in kvm_arch_irq_bypass_del_producer()
11246 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11249 " fails: %d\n", irqfd->consumer.token, ret); in kvm_arch_irq_bypass_del_producer()
11251 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_del_producer()
11267 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
11305 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
11307 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
11317 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
11338 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
11341 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_handle_memory_failure()
11342 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_handle_memory_failure()
11343 vcpu->run->internal.ndata = 0; in kvm_handle_memory_failure()
11394 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd) in kvm_handle_invpcid()
11398 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_handle_invpcid()
11410 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()