Lines Matching +full:pic +full:- +full:base +full:- +full:vec
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
16 * Ben-Ami Yassour <benami@il.ibm.com>
43 #include <linux/intel-iommu.h>
45 #include <linux/user-return-notifier.h>
59 #include <linux/entry-kvm.h>
88 ((struct kvm_vcpu *)(ctxt)->vcpu)
91 * - enable syscall per default because its emulated by KVM
92 * - enable LME and LMA per default on 64 bit KVM
140 /* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
145 * lapic timer advance (tscdeadline mode only) in nanoseconds. '-1' enables
147 * advancement entirely. Any other value is used as-is and disables adaptive
150 static int __read_mostly lapic_timer_advance_ns = -1;
163 int __read_mostly pi_inject_timer = -1;
288 size - useroffset, NULL); in kvm_alloc_emulator_cache()
297 vcpu->arch.apf.gfns[i] = ~0; in kvm_async_pf_hash_reset()
313 if (msrs->registered) { in kvm_on_user_return()
314 msrs->registered = false; in kvm_on_user_return()
319 values = &msrs->values[slot]; in kvm_on_user_return()
320 if (values->host != values->curr) { in kvm_on_user_return()
321 wrmsrl(user_return_msrs_global.msrs[slot], values->host); in kvm_on_user_return()
322 values->curr = values->host; in kvm_on_user_return()
361 msrs->values[i].host = value; in kvm_user_return_msr_cpu_online()
362 msrs->values[i].curr = value; in kvm_user_return_msr_cpu_online()
372 value = (value & mask) | (msrs->values[slot].host & ~mask); in kvm_set_user_return_msr()
373 if (value == msrs->values[slot].curr) in kvm_set_user_return_msr()
379 msrs->values[slot].curr = value; in kvm_set_user_return_msr()
380 if (!msrs->registered) { in kvm_set_user_return_msr()
381 msrs->urn.on_user_return = kvm_on_user_return; in kvm_set_user_return_msr()
382 user_return_notifier_register(&msrs->urn); in kvm_set_user_return_msr()
383 msrs->registered = true; in kvm_set_user_return_msr()
394 if (msrs->registered) in drop_user_return_notifiers()
395 kvm_on_user_return(&msrs->urn); in drop_user_return_notifiers()
400 return vcpu->arch.apic_base; in kvm_get_apic_base()
413 enum lapic_mode new_mode = kvm_apic_mode(msr_info->data); in kvm_set_apic_base()
417 if ((msr_info->data & reserved_bits) != 0 || new_mode == LAPIC_MODE_INVALID) in kvm_set_apic_base()
419 if (!msr_info->host_initiated) { in kvm_set_apic_base()
426 kvm_lapic_set_base(vcpu, msr_info->data); in kvm_set_apic_base()
427 kvm_recalculate_apic_map(vcpu->kvm); in kvm_set_apic_base()
476 * #DBs can be trap-like or fault-like, the caller must check other CPU in exception_type()
494 unsigned nr = vcpu->arch.exception.nr; in kvm_deliver_exception_payload()
495 bool has_payload = vcpu->arch.exception.has_payload; in kvm_deliver_exception_payload()
496 unsigned long payload = vcpu->arch.exception.payload; in kvm_deliver_exception_payload()
504 * "Certain debug exceptions may clear bit 0-3. The in kvm_deliver_exception_payload()
508 vcpu->arch.dr6 &= ~DR_TRAP_BITS; in kvm_deliver_exception_payload()
512 vcpu->arch.dr6 |= DR6_RTM; in kvm_deliver_exception_payload()
513 vcpu->arch.dr6 |= payload; in kvm_deliver_exception_payload()
522 vcpu->arch.dr6 ^= payload & DR6_RTM; in kvm_deliver_exception_payload()
530 vcpu->arch.dr6 &= ~BIT(12); in kvm_deliver_exception_payload()
533 vcpu->arch.cr2 = payload; in kvm_deliver_exception_payload()
537 vcpu->arch.exception.has_payload = false; in kvm_deliver_exception_payload()
538 vcpu->arch.exception.payload = 0; in kvm_deliver_exception_payload()
551 if (!vcpu->arch.exception.pending && !vcpu->arch.exception.injected) { in kvm_multiple_exception()
555 * On vmentry, vcpu->arch.exception.pending is only in kvm_multiple_exception()
562 WARN_ON_ONCE(vcpu->arch.exception.pending); in kvm_multiple_exception()
563 vcpu->arch.exception.injected = true; in kvm_multiple_exception()
573 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
574 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
576 vcpu->arch.exception.has_error_code = has_error; in kvm_multiple_exception()
577 vcpu->arch.exception.nr = nr; in kvm_multiple_exception()
578 vcpu->arch.exception.error_code = error_code; in kvm_multiple_exception()
579 vcpu->arch.exception.has_payload = has_payload; in kvm_multiple_exception()
580 vcpu->arch.exception.payload = payload; in kvm_multiple_exception()
587 prev_nr = vcpu->arch.exception.nr; in kvm_multiple_exception()
589 /* triple fault -> shutdown */ in kvm_multiple_exception()
598 * Generate double fault per SDM Table 5-5. Set in kvm_multiple_exception()
602 vcpu->arch.exception.pending = true; in kvm_multiple_exception()
603 vcpu->arch.exception.injected = false; in kvm_multiple_exception()
604 vcpu->arch.exception.has_error_code = true; in kvm_multiple_exception()
605 vcpu->arch.exception.nr = DF_VECTOR; in kvm_multiple_exception()
606 vcpu->arch.exception.error_code = 0; in kvm_multiple_exception()
607 vcpu->arch.exception.has_payload = false; in kvm_multiple_exception()
608 vcpu->arch.exception.payload = 0; in kvm_multiple_exception()
611 that instruction re-execution will regenerate lost in kvm_multiple_exception()
655 ++vcpu->stat.pf_guest; in kvm_inject_page_fault()
656 vcpu->arch.exception.nested_apf = in kvm_inject_page_fault()
657 is_guest_mode(vcpu) && fault->async_page_fault; in kvm_inject_page_fault()
658 if (vcpu->arch.exception.nested_apf) { in kvm_inject_page_fault()
659 vcpu->arch.apf.nested_apf_token = fault->address; in kvm_inject_page_fault()
660 kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code); in kvm_inject_page_fault()
662 kvm_queue_exception_e_p(vcpu, PF_VECTOR, fault->error_code, in kvm_inject_page_fault()
663 fault->address); in kvm_inject_page_fault()
672 WARN_ON_ONCE(fault->vector != PF_VECTOR); in kvm_inject_emulated_page_fault()
674 fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu : in kvm_inject_emulated_page_fault()
675 vcpu->arch.walk_mmu; in kvm_inject_emulated_page_fault()
681 if ((fault->error_code & PFERR_PRESENT_MASK) && in kvm_inject_emulated_page_fault()
682 !(fault->error_code & PFERR_RSVD_MASK)) in kvm_inject_emulated_page_fault()
683 kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address, in kvm_inject_emulated_page_fault()
684 fault_mmu->root_hpa); in kvm_inject_emulated_page_fault()
686 fault_mmu->inject_page_fault(vcpu, fault); in kvm_inject_emulated_page_fault()
687 return fault->nested_page_fault; in kvm_inject_emulated_page_fault()
693 atomic_inc(&vcpu->arch.nmi_queued); in kvm_inject_nmi()
747 real_gfn = mmu->translate_gpa(vcpu, ngpa, access, &exception); in kvm_read_guest_page_mmu()
749 return -EFAULT; in kvm_read_guest_page_mmu()
760 return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn, in kvm_read_nested_guest_page()
776 unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2; in load_pdptrs()
779 u64 pdpte[ARRAY_SIZE(mmu->pdptrs)]; in load_pdptrs()
797 memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)); in load_pdptrs()
808 u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)]; in pdptrs_changed()
820 offset = (kvm_read_cr3(vcpu) & 0xffffffe0ul) & (PAGE_SIZE - 1); in pdptrs_changed()
826 return memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0; in pdptrs_changed()
852 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0()
863 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0()
865 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu))) in kvm_set_cr0()
882 kvm_arch_has_noncoherent_dma(vcpu->kvm) && in kvm_set_cr0()
883 !kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in kvm_set_cr0()
884 kvm_zap_gfn_range(vcpu->kvm, 0, ~0ULL); in kvm_set_cr0()
900 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_guest_xsave_state()
901 xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); in kvm_load_guest_xsave_state()
903 if (vcpu->arch.xsaves_enabled && in kvm_load_guest_xsave_state()
904 vcpu->arch.ia32_xss != host_xss) in kvm_load_guest_xsave_state()
905 wrmsrl(MSR_IA32_XSS, vcpu->arch.ia32_xss); in kvm_load_guest_xsave_state()
910 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU)) && in kvm_load_guest_xsave_state()
911 vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_guest_xsave_state()
912 __write_pkru(vcpu->arch.pkru); in kvm_load_guest_xsave_state()
920 (vcpu->arch.xcr0 & XFEATURE_MASK_PKRU))) { in kvm_load_host_xsave_state()
921 vcpu->arch.pkru = rdpkru(); in kvm_load_host_xsave_state()
922 if (vcpu->arch.pkru != vcpu->arch.host_pkru) in kvm_load_host_xsave_state()
923 __write_pkru(vcpu->arch.host_pkru); in kvm_load_host_xsave_state()
928 if (vcpu->arch.xcr0 != host_xcr0) in kvm_load_host_xsave_state()
931 if (vcpu->arch.xsaves_enabled && in kvm_load_host_xsave_state()
932 vcpu->arch.ia32_xss != host_xss) in kvm_load_host_xsave_state()
942 u64 old_xcr0 = vcpu->arch.xcr0; in __kvm_set_xcr()
958 valid_bits = vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FP; in __kvm_set_xcr()
972 vcpu->arch.xcr0 = xcr0; in __kvm_set_xcr()
993 return -EINVAL; in kvm_valid_cr4()
995 if (cr4 & vcpu->arch.cr4_guest_rsvd_bits) in kvm_valid_cr4()
996 return -EINVAL; in kvm_valid_cr4()
999 return -EINVAL; in kvm_valid_cr4()
1022 && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, in kvm_set_cr4()
1069 (cr3 & vcpu->arch.cr3_lm_rsvd_bits)) in kvm_set_cr3()
1072 !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)) in kvm_set_cr3()
1076 vcpu->arch.cr3 = cr3; in kvm_set_cr3()
1090 vcpu->arch.cr8 = cr8; in kvm_set_cr8()
1100 return vcpu->arch.cr8; in kvm_get_cr8()
1108 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { in kvm_update_dr0123()
1110 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_update_dr0123()
1111 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD; in kvm_update_dr0123()
1119 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) in kvm_update_dr7()
1120 dr7 = vcpu->arch.guest_debug_dr7; in kvm_update_dr7()
1122 dr7 = vcpu->arch.dr7; in kvm_update_dr7()
1124 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1126 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; in kvm_update_dr7()
1141 size_t size = ARRAY_SIZE(vcpu->arch.db); in __kvm_set_dr()
1145 vcpu->arch.db[array_index_nospec(dr, size)] = val; in __kvm_set_dr()
1146 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) in __kvm_set_dr()
1147 vcpu->arch.eff_db[dr] = val; in __kvm_set_dr()
1152 return -1; /* #GP */ in __kvm_set_dr()
1153 vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); in __kvm_set_dr()
1158 return -1; /* #GP */ in __kvm_set_dr()
1159 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; in __kvm_set_dr()
1179 size_t size = ARRAY_SIZE(vcpu->arch.db); in kvm_get_dr()
1183 *val = vcpu->arch.db[array_index_nospec(dr, size)]; in kvm_get_dr()
1187 *val = vcpu->arch.dr6; in kvm_get_dr()
1191 *val = vcpu->arch.dr7; in kvm_get_dr()
1221 * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
1344 * List of msr numbers which are used to expose MSR-based features that
1379 * 10 - MISC_PACKAGE_CTRLS
1380 * 11 - ENERGY_FILTERING_CTL
1381 * 12 - DOITM
1382 * 18 - FB_CLEAR_CTRL
1383 * 21 - XAPIC_DISABLE_STATUS
1384 * 23 - OVERCLOCKING_STATUS
1454 switch (msr->index) { in kvm_get_msr_feature()
1456 msr->data = kvm_get_arch_capabilities(); in kvm_get_msr_feature()
1459 rdmsrl_safe(msr->index, &msr->data); in kvm_get_msr_feature()
1519 u64 old_efer = vcpu->arch.efer; in set_efer()
1520 u64 efer = msr_info->data; in set_efer()
1526 if (!msr_info->host_initiated) { in set_efer()
1531 (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) in set_efer()
1536 efer |= vcpu->arch.efer & EFER_LMA; in set_efer()
1561 struct kvm *kvm = vcpu->kvm; in kvm_msr_allowed()
1570 idx = srcu_read_lock(&kvm->srcu); in kvm_msr_allowed()
1572 msr_filter = srcu_dereference(kvm->arch.msr_filter, &kvm->srcu); in kvm_msr_allowed()
1578 allowed = msr_filter->default_allow; in kvm_msr_allowed()
1579 ranges = msr_filter->ranges; in kvm_msr_allowed()
1581 for (i = 0; i < msr_filter->count; i++) { in kvm_msr_allowed()
1582 u32 start = ranges[i].base; in kvm_msr_allowed()
1588 allowed = !!test_bit(index - start, bitmap); in kvm_msr_allowed()
1594 srcu_read_unlock(&kvm->srcu, idx); in kvm_msr_allowed()
1603 * Returns 0 on success, non-0 otherwise.
1627 * non-canonical address is written on Intel but not on in __kvm_set_msr()
1628 * AMD (which ignores the top 32-bits, because it does in __kvm_set_msr()
1629 * not implement 64-bit SYSENTER). in __kvm_set_msr()
1631 * 64-bit code should hence be able to write a non-canonical in __kvm_set_msr()
1633 * vmentry does not fail on Intel after writing a non-canonical in __kvm_set_msr()
1635 * invokes 64-bit SYSENTER. in __kvm_set_msr()
1662 * Returns 0 on success, non-0 otherwise.
1712 if (vcpu->run->msr.error) { in complete_emulated_msr()
1716 kvm_rax_write(vcpu, (u32)vcpu->run->msr.data); in complete_emulated_msr()
1717 kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32); in complete_emulated_msr()
1753 if (!(vcpu->kvm->arch.user_space_msr_mask & msr_reason)) in kvm_msr_user_space()
1756 vcpu->run->exit_reason = exit_reason; in kvm_msr_user_space()
1757 vcpu->run->msr.error = 0; in kvm_msr_user_space()
1758 memset(vcpu->run->msr.pad, 0, sizeof(vcpu->run->msr.pad)); in kvm_msr_user_space()
1759 vcpu->run->msr.reason = msr_reason; in kvm_msr_user_space()
1760 vcpu->run->msr.index = index; in kvm_msr_user_space()
1761 vcpu->run->msr.data = data; in kvm_msr_user_space()
1762 vcpu->arch.complete_userspace_io = completion; in kvm_msr_user_space()
1802 kvm_rax_write(vcpu, data & -1u); in kvm_emulate_rdmsr()
1803 kvm_rdx_write(vcpu, (data >> 32) & -1u); in kvm_emulate_rdmsr()
1839 return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) || in kvm_vcpu_exit_request()
1846 * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
1853 if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(vcpu->arch.apic)) in handle_fastpath_set_x2apic_icr_irqoff()
1862 kvm_apic_send_ipi(vcpu->arch.apic, (u32)data, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1863 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR2, (u32)(data >> 32)); in handle_fastpath_set_x2apic_icr_irqoff()
1864 kvm_lapic_set_reg(vcpu->arch.apic, APIC_ICR, (u32)data); in handle_fastpath_set_x2apic_icr_irqoff()
1953 write_seqcount_begin(&vdata->seq); in update_pvclock_gtod()
1956 vdata->clock.vclock_mode = tk->tkr_mono.clock->vdso_clock_mode; in update_pvclock_gtod()
1957 vdata->clock.cycle_last = tk->tkr_mono.cycle_last; in update_pvclock_gtod()
1958 vdata->clock.mask = tk->tkr_mono.mask; in update_pvclock_gtod()
1959 vdata->clock.mult = tk->tkr_mono.mult; in update_pvclock_gtod()
1960 vdata->clock.shift = tk->tkr_mono.shift; in update_pvclock_gtod()
1961 vdata->clock.base_cycles = tk->tkr_mono.xtime_nsec; in update_pvclock_gtod()
1962 vdata->clock.offset = tk->tkr_mono.base; in update_pvclock_gtod()
1964 vdata->raw_clock.vclock_mode = tk->tkr_raw.clock->vdso_clock_mode; in update_pvclock_gtod()
1965 vdata->raw_clock.cycle_last = tk->tkr_raw.cycle_last; in update_pvclock_gtod()
1966 vdata->raw_clock.mask = tk->tkr_raw.mask; in update_pvclock_gtod()
1967 vdata->raw_clock.mult = tk->tkr_raw.mult; in update_pvclock_gtod()
1968 vdata->raw_clock.shift = tk->tkr_raw.shift; in update_pvclock_gtod()
1969 vdata->raw_clock.base_cycles = tk->tkr_raw.xtime_nsec; in update_pvclock_gtod()
1970 vdata->raw_clock.offset = tk->tkr_raw.base; in update_pvclock_gtod()
1972 vdata->wall_time_sec = tk->xtime_sec; in update_pvclock_gtod()
1974 vdata->offs_boot = tk->offs_boot; in update_pvclock_gtod()
1976 write_seqcount_end(&vdata->seq); in update_pvclock_gtod()
1999 kvm->arch.wall_clock = wall_clock; in kvm_write_wall_clock()
2021 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_write_wall_clock()
2036 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_write_system_time()
2038 if (vcpu->vcpu_id == 0 && !host_initiated) { in kvm_write_system_time()
2039 if (ka->boot_vcpu_runs_old_kvmclock != old_msr) in kvm_write_system_time()
2042 ka->boot_vcpu_runs_old_kvmclock = old_msr; in kvm_write_system_time()
2045 vcpu->arch.time = system_time; in kvm_write_system_time()
2049 vcpu->arch.pv_time_enabled = false; in kvm_write_system_time()
2053 if (!kvm_gfn_to_hva_cache_init(vcpu->kvm, in kvm_write_system_time()
2054 &vcpu->arch.pv_time, system_time & ~1ULL, in kvm_write_system_time()
2056 vcpu->arch.pv_time_enabled = true; in kvm_write_system_time()
2079 shift--; in kvm_get_time_scale()
2115 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in set_tsc_khz()
2122 vcpu->arch.tsc_catchup = 1; in set_tsc_khz()
2123 vcpu->arch.tsc_always_catchup = 1; in set_tsc_khz()
2127 return -1; in set_tsc_khz()
2131 /* TSC scaling required - calculate ratio */ in set_tsc_khz()
2136 pr_warn_ratelimited("Invalid TSC scaling ratio - virtual-tsc-khz=%u\n", in set_tsc_khz()
2138 return -1; in set_tsc_khz()
2141 vcpu->arch.tsc_scaling_ratio = ratio; in set_tsc_khz()
2153 vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio; in kvm_set_tsc_khz()
2154 return -1; in kvm_set_tsc_khz()
2159 &vcpu->arch.virtual_tsc_shift, in kvm_set_tsc_khz()
2160 &vcpu->arch.virtual_tsc_mult); in kvm_set_tsc_khz()
2161 vcpu->arch.virtual_tsc_khz = user_tsc_khz; in kvm_set_tsc_khz()
2169 thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm); in kvm_set_tsc_khz()
2180 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec, in compute_guest_tsc()
2181 vcpu->arch.virtual_tsc_mult, in compute_guest_tsc()
2182 vcpu->arch.virtual_tsc_shift); in compute_guest_tsc()
2183 tsc += vcpu->arch.this_tsc_write; in compute_guest_tsc()
2196 struct kvm_arch *ka = &vcpu->kvm->arch; in kvm_track_tsc_matching()
2199 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in kvm_track_tsc_matching()
2200 atomic_read(&vcpu->kvm->online_vcpus)); in kvm_track_tsc_matching()
2210 if (ka->use_master_clock || in kvm_track_tsc_matching()
2211 (gtod_is_based_on_tsc(gtod->clock.vclock_mode) && vcpus_matched)) in kvm_track_tsc_matching()
2214 trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, in kvm_track_tsc_matching()
2215 atomic_read(&vcpu->kvm->online_vcpus), in kvm_track_tsc_matching()
2216 ka->use_master_clock, gtod->clock.vclock_mode); in kvm_track_tsc_matching()
2223 * The most significant 64-N bits (mult) of ratio represent the
2226 * point number (mult + frac * 2^(-N)).
2238 u64 ratio = vcpu->arch.tsc_scaling_ratio; in kvm_scale_tsc()
2253 return target_tsc - tsc; in kvm_compute_tsc_offset()
2258 return vcpu->arch.l1_tsc_offset + kvm_scale_tsc(vcpu, host_tsc); in kvm_read_l1_tsc()
2264 vcpu->arch.l1_tsc_offset = offset; in kvm_vcpu_write_tsc_offset()
2265 vcpu->arch.tsc_offset = kvm_x86_ops.write_l1_tsc_offset(vcpu, offset); in kvm_vcpu_write_tsc_offset()
2272 * TSC is marked unstable when we're running on Hyper-V, in kvm_check_tsc_unstable()
2283 struct kvm *kvm = vcpu->kvm; in kvm_synchronize_tsc()
2290 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2293 elapsed = ns - kvm->arch.last_tsc_nsec; in kvm_synchronize_tsc()
2295 if (vcpu->arch.virtual_tsc_khz) { in kvm_synchronize_tsc()
2298 * detection of vcpu initialization -- need to sync in kvm_synchronize_tsc()
2304 u64 tsc_exp = kvm->arch.last_tsc_write + in kvm_synchronize_tsc()
2306 u64 tsc_hz = vcpu->arch.virtual_tsc_khz * 1000LL; in kvm_synchronize_tsc()
2324 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { in kvm_synchronize_tsc()
2326 offset = kvm->arch.cur_tsc_offset; in kvm_synchronize_tsc()
2333 already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); in kvm_synchronize_tsc()
2342 * These values are tracked in kvm->arch.cur_xxx variables. in kvm_synchronize_tsc()
2344 kvm->arch.cur_tsc_generation++; in kvm_synchronize_tsc()
2345 kvm->arch.cur_tsc_nsec = ns; in kvm_synchronize_tsc()
2346 kvm->arch.cur_tsc_write = data; in kvm_synchronize_tsc()
2347 kvm->arch.cur_tsc_offset = offset; in kvm_synchronize_tsc()
2355 kvm->arch.last_tsc_nsec = ns; in kvm_synchronize_tsc()
2356 kvm->arch.last_tsc_write = data; in kvm_synchronize_tsc()
2357 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; in kvm_synchronize_tsc()
2359 vcpu->arch.last_guest_tsc = data; in kvm_synchronize_tsc()
2362 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; in kvm_synchronize_tsc()
2363 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; in kvm_synchronize_tsc()
2364 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; in kvm_synchronize_tsc()
2367 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); in kvm_synchronize_tsc()
2369 spin_lock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2371 kvm->arch.nr_vcpus_matched_tsc = 0; in kvm_synchronize_tsc()
2373 kvm->arch.nr_vcpus_matched_tsc++; in kvm_synchronize_tsc()
2377 spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); in kvm_synchronize_tsc()
2383 u64 tsc_offset = vcpu->arch.l1_tsc_offset; in adjust_tsc_offset_guest()
2389 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio) in adjust_tsc_offset_host()
2423 switch (clock->vclock_mode) { in vgettsc()
2430 v = (tsc_pg_val - clock->cycle_last) & in vgettsc()
2431 clock->mask; in vgettsc()
2440 v = (*tsc_timestamp - clock->cycle_last) & in vgettsc()
2441 clock->mask; in vgettsc()
2450 return v * clock->mult; in vgettsc()
2461 seq = read_seqcount_begin(>od->seq); in do_monotonic_raw()
2462 ns = gtod->raw_clock.base_cycles; in do_monotonic_raw()
2463 ns += vgettsc(>od->raw_clock, tsc_timestamp, &mode); in do_monotonic_raw()
2464 ns >>= gtod->raw_clock.shift; in do_monotonic_raw()
2465 ns += ktime_to_ns(ktime_add(gtod->raw_clock.offset, gtod->offs_boot)); in do_monotonic_raw()
2466 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_monotonic_raw()
2480 seq = read_seqcount_begin(>od->seq); in do_realtime()
2481 ts->tv_sec = gtod->wall_time_sec; in do_realtime()
2482 ns = gtod->clock.base_cycles; in do_realtime()
2483 ns += vgettsc(>od->clock, tsc_timestamp, &mode); in do_realtime()
2484 ns >>= gtod->clock.shift; in do_realtime()
2485 } while (unlikely(read_seqcount_retry(>od->seq, seq))); in do_realtime()
2487 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns); in do_realtime()
2488 ts->tv_nsec = ns; in do_realtime()
2532 * 4. ret0 = timespec0 + (rdtsc - tsc0) |
2533 * 5. | ret1 = timespec1 + (rdtsc - tsc1)
2534 * | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
2538 * - ret0 < ret1
2539 * - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
2541 * - 0 < N - M => M < N
2560 struct kvm_arch *ka = &kvm->arch; in pvclock_update_vm_gtod_copy()
2564 vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == in pvclock_update_vm_gtod_copy()
2565 atomic_read(&kvm->online_vcpus)); in pvclock_update_vm_gtod_copy()
2572 &ka->master_kernel_ns, in pvclock_update_vm_gtod_copy()
2573 &ka->master_cycle_now); in pvclock_update_vm_gtod_copy()
2575 ka->use_master_clock = host_tsc_clocksource && vcpus_matched in pvclock_update_vm_gtod_copy()
2576 && !ka->backwards_tsc_observed in pvclock_update_vm_gtod_copy()
2577 && !ka->boot_vcpu_runs_old_kvmclock; in pvclock_update_vm_gtod_copy()
2579 if (ka->use_master_clock) in pvclock_update_vm_gtod_copy()
2583 trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, in pvclock_update_vm_gtod_copy()
2598 struct kvm_arch *ka = &kvm->arch; in kvm_gen_update_masterclock()
2600 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2612 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_gen_update_masterclock()
2618 struct kvm_arch *ka = &kvm->arch; in get_kvmclock_ns()
2622 spin_lock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2623 if (!ka->use_master_clock) { in get_kvmclock_ns()
2624 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2625 return get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2628 hv_clock.tsc_timestamp = ka->master_cycle_now; in get_kvmclock_ns()
2629 hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; in get_kvmclock_ns()
2630 spin_unlock(&ka->pvclock_gtod_sync_lock); in get_kvmclock_ns()
2641 ret = get_kvmclock_base_ns() + ka->kvmclock_offset; in get_kvmclock_ns()
2650 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_setup_pvclock_page()
2653 if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2668 * and third write. The vcpu->pv_time cache is still valid, because the in kvm_setup_pvclock_page()
2676 vcpu->hv_clock.version = guest_hv_clock.version + 1; in kvm_setup_pvclock_page()
2677 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2678 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2679 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2684 vcpu->hv_clock.flags |= (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED); in kvm_setup_pvclock_page()
2686 if (vcpu->pvclock_set_guest_stopped_request) { in kvm_setup_pvclock_page()
2687 vcpu->hv_clock.flags |= PVCLOCK_GUEST_STOPPED; in kvm_setup_pvclock_page()
2688 vcpu->pvclock_set_guest_stopped_request = false; in kvm_setup_pvclock_page()
2691 trace_kvm_pvclock_update(v->vcpu_id, &vcpu->hv_clock); in kvm_setup_pvclock_page()
2693 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2694 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2695 sizeof(vcpu->hv_clock)); in kvm_setup_pvclock_page()
2699 vcpu->hv_clock.version++; in kvm_setup_pvclock_page()
2700 kvm_write_guest_cached(v->kvm, &vcpu->pv_time, in kvm_setup_pvclock_page()
2701 &vcpu->hv_clock, in kvm_setup_pvclock_page()
2702 sizeof(vcpu->hv_clock.version)); in kvm_setup_pvclock_page()
2708 struct kvm_vcpu_arch *vcpu = &v->arch; in kvm_guest_time_update()
2709 struct kvm_arch *ka = &v->kvm->arch; in kvm_guest_time_update()
2722 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2723 use_master_clock = ka->use_master_clock; in kvm_guest_time_update()
2725 host_tsc = ka->master_cycle_now; in kvm_guest_time_update()
2726 kernel_ns = ka->master_kernel_ns; in kvm_guest_time_update()
2728 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_guest_time_update()
2749 * 2) Broken TSC compensation resets the base at each VCPU in kvm_guest_time_update()
2755 if (vcpu->tsc_catchup) { in kvm_guest_time_update()
2758 adjust_tsc_offset_guest(v, tsc - tsc_timestamp); in kvm_guest_time_update()
2770 if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) { in kvm_guest_time_update()
2772 &vcpu->hv_clock.tsc_shift, in kvm_guest_time_update()
2773 &vcpu->hv_clock.tsc_to_system_mul); in kvm_guest_time_update()
2774 vcpu->hw_tsc_khz = tgt_tsc_khz; in kvm_guest_time_update()
2777 vcpu->hv_clock.tsc_timestamp = tsc_timestamp; in kvm_guest_time_update()
2778 vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset; in kvm_guest_time_update()
2779 vcpu->last_guest_tsc = tsc_timestamp; in kvm_guest_time_update()
2786 vcpu->hv_clock.flags = pvclock_flags; in kvm_guest_time_update()
2788 if (vcpu->pv_time_enabled) in kvm_guest_time_update()
2790 if (v == kvm_get_vcpu(v->kvm, 0)) in kvm_guest_time_update()
2791 kvm_hv_setup_tsc_page(v->kvm, &vcpu->hv_clock); in kvm_guest_time_update()
2797 * vcpu->cpu migration, should not allow system_timestamp from
2803 * We need to rate-limit these requests though, as they can
2806 * by the delay we use to rate-limit the updates.
2828 struct kvm *kvm = v->kvm; in kvm_gen_kvmclock_update()
2831 schedule_delayed_work(&kvm->arch.kvmclock_update_work, in kvm_gen_kvmclock_update()
2847 schedule_delayed_work(&kvm->arch.kvmclock_update_work, 0); in kvmclock_sync_fn()
2848 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvmclock_sync_fn()
2859 return !!(vcpu->arch.msr_hwcr & BIT_ULL(18)); in can_set_mci_status()
2866 u64 mcg_cap = vcpu->arch.mcg_cap; in set_msr_mce()
2868 u32 msr = msr_info->index; in set_msr_mce()
2869 u64 data = msr_info->data; in set_msr_mce()
2873 vcpu->arch.mcg_status = data; in set_msr_mce()
2877 (data || !msr_info->host_initiated)) in set_msr_mce()
2881 vcpu->arch.mcg_ctl = data; in set_msr_mce()
2887 msr - MSR_IA32_MC0_CTL, in set_msr_mce()
2888 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in set_msr_mce()
2896 * correctable, single-bit ECC data errors. in set_msr_mce()
2903 if (!msr_info->host_initiated && in set_msr_mce()
2909 vcpu->arch.mce_banks[offset] = data; in set_msr_mce()
2919 struct kvm *kvm = vcpu->kvm; in xen_hvm_config()
2921 u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64 in xen_hvm_config()
2922 : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32; in xen_hvm_config()
2923 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in xen_hvm_config()
2924 : kvm->arch.xen_hvm_config.blob_size_32; in xen_hvm_config()
2947 return (vcpu->arch.apf.msr_en_val & mask) == mask; in kvm_pv_async_pf_enabled()
2969 vcpu->arch.apf.msr_en_val = data; in kvm_pv_enable_async_pf()
2977 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, in kvm_pv_enable_async_pf()
2981 vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); in kvm_pv_enable_async_pf()
2982 vcpu->arch.apf.delivery_as_pf_vmexit = data & KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; in kvm_pv_enable_async_pf()
2991 /* Bits 8-63 are reserved */ in kvm_pv_enable_async_pf_int()
2998 vcpu->arch.apf.msr_int_val = data; in kvm_pv_enable_async_pf_int()
3000 vcpu->arch.apf.vec = data & KVM_ASYNC_PF_VEC_MASK; in kvm_pv_enable_async_pf_int()
3007 vcpu->arch.pv_time_enabled = false; in kvmclock_reset()
3008 vcpu->arch.time = 0; in kvmclock_reset()
3013 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_all()
3019 ++vcpu->stat.tlb_flush; in kvm_vcpu_flush_tlb_guest()
3028 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in record_steal_time()
3031 /* -EAGAIN is returned in atomic context so we can just return. */ in record_steal_time()
3032 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, in record_steal_time()
3033 &map, &vcpu->arch.st.cache, false)) in record_steal_time()
3037 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in record_steal_time()
3044 trace_kvm_pv_tlb_flush(vcpu->vcpu_id, in record_steal_time()
3045 st->preempted & KVM_VCPU_FLUSH_TLB); in record_steal_time()
3046 if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) in record_steal_time()
3049 st->preempted = 0; in record_steal_time()
3052 vcpu->arch.st.preempted = 0; in record_steal_time()
3054 if (st->version & 1) in record_steal_time()
3055 st->version += 1; /* first time write, random junk */ in record_steal_time()
3057 st->version += 1; in record_steal_time()
3061 st->steal += current->sched_info.run_delay - in record_steal_time()
3062 vcpu->arch.st.last_steal; in record_steal_time()
3063 vcpu->arch.st.last_steal = current->sched_info.run_delay; in record_steal_time()
3067 st->version += 1; in record_steal_time()
3069 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false); in record_steal_time()
3075 u32 msr = msr_info->index; in kvm_set_msr_common()
3076 u64 data = msr_info->data; in kvm_set_msr_common()
3089 if (msr_info->host_initiated) in kvm_set_msr_common()
3090 vcpu->arch.microcode_version = data; in kvm_set_msr_common()
3093 if (!msr_info->host_initiated) in kvm_set_msr_common()
3095 vcpu->arch.arch_capabilities = data; in kvm_set_msr_common()
3100 if (!msr_info->host_initiated) in kvm_set_msr_common()
3107 vcpu->arch.perf_capabilities = data; in kvm_set_msr_common()
3120 vcpu->arch.msr_hwcr = data; in kvm_set_msr_common()
3136 /* We support the non-activated case already */ in kvm_set_msr_common()
3139 /* Values other than LBR and BTF are vendor-specific, in kvm_set_msr_common()
3157 if (!msr_info->host_initiated) { in kvm_set_msr_common()
3158 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; in kvm_set_msr_common()
3165 vcpu->arch.ia32_tsc_adjust_msr = data; in kvm_set_msr_common()
3169 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT) && in kvm_set_msr_common()
3170 ((vcpu->arch.ia32_misc_enable_msr ^ data) & MSR_IA32_MISC_ENABLE_MWAIT)) { in kvm_set_msr_common()
3173 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3176 vcpu->arch.ia32_misc_enable_msr = data; in kvm_set_msr_common()
3180 if (!msr_info->host_initiated) in kvm_set_msr_common()
3182 vcpu->arch.smbase = data; in kvm_set_msr_common()
3185 vcpu->arch.msr_ia32_power_ctl = data; in kvm_set_msr_common()
3188 if (msr_info->host_initiated) { in kvm_set_msr_common()
3191 u64 adj = kvm_compute_tsc_offset(vcpu, data) - vcpu->arch.l1_tsc_offset; in kvm_set_msr_common()
3193 vcpu->arch.ia32_tsc_adjust_msr += adj; in kvm_set_msr_common()
3197 if (!msr_info->host_initiated && in kvm_set_msr_common()
3207 vcpu->arch.ia32_xss = data; in kvm_set_msr_common()
3211 if (!msr_info->host_initiated) in kvm_set_msr_common()
3213 vcpu->arch.smi_count = data; in kvm_set_msr_common()
3219 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3225 kvm_write_wall_clock(vcpu->kvm, data); in kvm_set_msr_common()
3231 kvm_write_system_time(vcpu, data, false, msr_info->host_initiated); in kvm_set_msr_common()
3237 kvm_write_system_time(vcpu, data, true, msr_info->host_initiated); in kvm_set_msr_common()
3257 vcpu->arch.apf.pageready_pending = false; in kvm_set_msr_common()
3271 vcpu->arch.st.msr_val = data; in kvm_set_msr_common()
3292 if (data & (-1ULL << 1)) in kvm_set_msr_common()
3295 vcpu->arch.msr_kvm_poll_control = data; in kvm_set_msr_common()
3300 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_set_msr_common()
3320 * all pre-dating SVM, but a recommended workaround from in kvm_set_msr_common()
3336 msr_info->host_initiated); in kvm_set_msr_common()
3338 /* Drop writes to this legacy MSR -- see rdmsr in kvm_set_msr_common()
3348 vcpu->arch.osvw.length = data; in kvm_set_msr_common()
3353 vcpu->arch.osvw.status = data; in kvm_set_msr_common()
3356 if (!msr_info->host_initiated || in kvm_set_msr_common()
3360 vcpu->arch.msr_platform_info = data; in kvm_set_msr_common()
3367 vcpu->arch.msr_misc_features_enables = data; in kvm_set_msr_common()
3370 if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr)) in kvm_set_msr_common()
3383 u64 mcg_cap = vcpu->arch.mcg_cap; in get_msr_mce()
3392 data = vcpu->arch.mcg_cap; in get_msr_mce()
3397 data = vcpu->arch.mcg_ctl; in get_msr_mce()
3400 data = vcpu->arch.mcg_status; in get_msr_mce()
3406 msr - MSR_IA32_MC0_CTL, in get_msr_mce()
3407 MSR_IA32_MCx_CTL(bank_num) - MSR_IA32_MC0_CTL); in get_msr_mce()
3409 data = vcpu->arch.mce_banks[offset]; in get_msr_mce()
3420 switch (msr_info->index) { in kvm_get_msr_common()
3443 * so for existing CPU-specific MSRs. in kvm_get_msr_common()
3450 msr_info->data = 0; in kvm_get_msr_common()
3457 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3459 msr_info->data = 0; in kvm_get_msr_common()
3462 msr_info->data = vcpu->arch.microcode_version; in kvm_get_msr_common()
3465 if (!msr_info->host_initiated && in kvm_get_msr_common()
3468 msr_info->data = vcpu->arch.arch_capabilities; in kvm_get_msr_common()
3471 if (!msr_info->host_initiated && in kvm_get_msr_common()
3474 msr_info->data = vcpu->arch.perf_capabilities; in kvm_get_msr_common()
3477 msr_info->data = vcpu->arch.msr_ia32_power_ctl; in kvm_get_msr_common()
3486 * return L1's TSC value to ensure backwards-compatible in kvm_get_msr_common()
3489 u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : in kvm_get_msr_common()
3490 vcpu->arch.tsc_offset; in kvm_get_msr_common()
3492 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; in kvm_get_msr_common()
3497 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3499 msr_info->data = 3; in kvm_get_msr_common()
3513 msr_info->data = 1 << 24; in kvm_get_msr_common()
3516 msr_info->data = kvm_get_apic_base(vcpu); in kvm_get_msr_common()
3519 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data); in kvm_get_msr_common()
3521 msr_info->data = kvm_get_lapic_tscdeadline_msr(vcpu); in kvm_get_msr_common()
3524 msr_info->data = (u64)vcpu->arch.ia32_tsc_adjust_msr; in kvm_get_msr_common()
3527 msr_info->data = vcpu->arch.ia32_misc_enable_msr; in kvm_get_msr_common()
3530 if (!msr_info->host_initiated) in kvm_get_msr_common()
3532 msr_info->data = vcpu->arch.smbase; in kvm_get_msr_common()
3535 msr_info->data = vcpu->arch.smi_count; in kvm_get_msr_common()
3539 msr_info->data = 1000ULL; in kvm_get_msr_common()
3541 msr_info->data |= (((uint64_t)4ULL) << 40); in kvm_get_msr_common()
3544 msr_info->data = vcpu->arch.efer; in kvm_get_msr_common()
3550 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3556 msr_info->data = vcpu->kvm->arch.wall_clock; in kvm_get_msr_common()
3562 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3568 msr_info->data = vcpu->arch.time; in kvm_get_msr_common()
3574 msr_info->data = vcpu->arch.apf.msr_en_val; in kvm_get_msr_common()
3580 msr_info->data = vcpu->arch.apf.msr_int_val; in kvm_get_msr_common()
3586 msr_info->data = 0; in kvm_get_msr_common()
3592 msr_info->data = vcpu->arch.st.msr_val; in kvm_get_msr_common()
3598 msr_info->data = vcpu->arch.pv_eoi.msr_val; in kvm_get_msr_common()
3604 msr_info->data = vcpu->arch.msr_kvm_poll_control; in kvm_get_msr_common()
3611 case MSR_IA32_MC0_CTL ... MSR_IA32_MCx_CTL(KVM_MAX_MCE_BANKS) - 1: in kvm_get_msr_common()
3612 return get_msr_mce(vcpu, msr_info->index, &msr_info->data, in kvm_get_msr_common()
3613 msr_info->host_initiated); in kvm_get_msr_common()
3615 if (!msr_info->host_initiated && in kvm_get_msr_common()
3618 msr_info->data = vcpu->arch.ia32_xss; in kvm_get_msr_common()
3622 * Provide expected ramp-up count for K7. All other in kvm_get_msr_common()
3630 msr_info->data = 0x20000000; in kvm_get_msr_common()
3642 msr_info->index, &msr_info->data, in kvm_get_msr_common()
3643 msr_info->host_initiated); in kvm_get_msr_common()
3655 msr_info->data = 0xbe702111; in kvm_get_msr_common()
3660 msr_info->data = vcpu->arch.osvw.length; in kvm_get_msr_common()
3665 msr_info->data = vcpu->arch.osvw.status; in kvm_get_msr_common()
3668 if (!msr_info->host_initiated && in kvm_get_msr_common()
3669 !vcpu->kvm->arch.guest_can_read_msr_platform_info) in kvm_get_msr_common()
3671 msr_info->data = vcpu->arch.msr_platform_info; in kvm_get_msr_common()
3674 msr_info->data = vcpu->arch.msr_misc_features_enables; in kvm_get_msr_common()
3677 msr_info->data = vcpu->arch.msr_hwcr; in kvm_get_msr_common()
3680 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index)) in kvm_get_msr_common()
3700 for (i = 0; i < msrs->nmsrs; ++i) in __msr_io()
3722 r = -EFAULT; in msr_io()
3726 r = -E2BIG; in msr_io()
3731 entries = memdup_user(user_msrs->entries, size); in msr_io()
3741 r = -EFAULT; in msr_io()
3742 if (writeback && copy_to_user(user_msrs->entries, entries, size)) in msr_io()
3874 r = kvm_x86_ops.nested_ops->get_state ? in kvm_vm_ioctl_check_extension()
3875 kvm_x86_ops.nested_ops->get_state(NULL, NULL, 0) : 0; in kvm_vm_ioctl_check_extension()
3881 r = kvm_x86_ops.nested_ops->enable_evmcs != NULL; in kvm_vm_ioctl_check_extension()
3908 r = -EFAULT; in kvm_arch_dev_ioctl()
3915 r = -E2BIG; in kvm_arch_dev_ioctl()
3918 r = -EFAULT; in kvm_arch_dev_ioctl()
3919 if (copy_to_user(user_msr_list->indices, &msrs_to_save, in kvm_arch_dev_ioctl()
3922 if (copy_to_user(user_msr_list->indices + num_msrs_to_save, in kvm_arch_dev_ioctl()
3934 r = -EFAULT; in kvm_arch_dev_ioctl()
3938 r = kvm_dev_ioctl_get_cpuid(&cpuid, cpuid_arg->entries, in kvm_arch_dev_ioctl()
3943 r = -EFAULT; in kvm_arch_dev_ioctl()
3950 r = -EFAULT; in kvm_arch_dev_ioctl()
3961 r = -EFAULT; in kvm_arch_dev_ioctl()
3968 r = -E2BIG; in kvm_arch_dev_ioctl()
3971 r = -EFAULT; in kvm_arch_dev_ioctl()
3972 if (copy_to_user(user_msr_list->indices, &msr_based_features, in kvm_arch_dev_ioctl()
3982 r = -EINVAL; in kvm_arch_dev_ioctl()
3996 return kvm_arch_has_noncoherent_dma(vcpu->kvm); in need_emulate_wbinvd()
4004 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_load()
4005 else if (vcpu->cpu != -1 && vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4006 smp_call_function_single(vcpu->cpu, in kvm_arch_vcpu_load()
4013 vcpu->arch.host_pkru = read_pkru(); in kvm_arch_vcpu_load()
4016 if (unlikely(vcpu->arch.tsc_offset_adjustment)) { in kvm_arch_vcpu_load()
4017 adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment); in kvm_arch_vcpu_load()
4018 vcpu->arch.tsc_offset_adjustment = 0; in kvm_arch_vcpu_load()
4022 if (unlikely(vcpu->cpu != cpu) || kvm_check_tsc_unstable()) { in kvm_arch_vcpu_load()
4023 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 : in kvm_arch_vcpu_load()
4024 rdtsc() - vcpu->arch.last_host_tsc; in kvm_arch_vcpu_load()
4030 vcpu->arch.last_guest_tsc); in kvm_arch_vcpu_load()
4032 vcpu->arch.tsc_catchup = 1; in kvm_arch_vcpu_load()
4040 * kvmclock on vcpu->cpu migration in kvm_arch_vcpu_load()
4042 if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1) in kvm_arch_vcpu_load()
4044 if (vcpu->cpu != cpu) in kvm_arch_vcpu_load()
4046 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
4058 * The vCPU can be marked preempted if and only if the VM-Exit was on in kvm_steal_time_set_preempted()
4062 * preempted if and only if the VM-Exit was due to a host interrupt. in kvm_steal_time_set_preempted()
4064 if (!vcpu->arch.at_instruction_boundary) { in kvm_steal_time_set_preempted()
4065 vcpu->stat.preemption_other++; in kvm_steal_time_set_preempted()
4069 vcpu->stat.preemption_reported++; in kvm_steal_time_set_preempted()
4070 if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED)) in kvm_steal_time_set_preempted()
4073 if (vcpu->arch.st.preempted) in kvm_steal_time_set_preempted()
4076 if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map, in kvm_steal_time_set_preempted()
4077 &vcpu->arch.st.cache, true)) in kvm_steal_time_set_preempted()
4081 offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS); in kvm_steal_time_set_preempted()
4083 st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED; in kvm_steal_time_set_preempted()
4085 kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true); in kvm_steal_time_set_preempted()
4092 if (vcpu->preempted) in kvm_arch_vcpu_put()
4093 vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu); in kvm_arch_vcpu_put()
4108 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_put()
4110 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_put()
4113 vcpu->arch.last_host_tsc = rdtsc(); in kvm_arch_vcpu_put()
4125 if (vcpu->arch.apicv_active) in kvm_vcpu_ioctl_get_lapic()
4167 * instruction boundary and with no events half-injected. in kvm_vcpu_ready_for_interrupt_injection()
4172 !vcpu->arch.exception.pending); in kvm_vcpu_ready_for_interrupt_injection()
4178 if (irq->irq >= KVM_NR_INTERRUPTS) in kvm_vcpu_ioctl_interrupt()
4179 return -EINVAL; in kvm_vcpu_ioctl_interrupt()
4181 if (!irqchip_in_kernel(vcpu->kvm)) { in kvm_vcpu_ioctl_interrupt()
4182 kvm_queue_interrupt(vcpu, irq->irq, false); in kvm_vcpu_ioctl_interrupt()
4188 * With in-kernel LAPIC, we only use this to inject EXTINT, so in kvm_vcpu_ioctl_interrupt()
4189 * fail for in-kernel 8259. in kvm_vcpu_ioctl_interrupt()
4191 if (pic_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_interrupt()
4192 return -ENXIO; in kvm_vcpu_ioctl_interrupt()
4194 if (vcpu->arch.pending_external_vector != -1) in kvm_vcpu_ioctl_interrupt()
4195 return -EEXIST; in kvm_vcpu_ioctl_interrupt()
4197 vcpu->arch.pending_external_vector = irq->irq; in kvm_vcpu_ioctl_interrupt()
4219 if (tac->flags) in vcpu_ioctl_tpr_access_reporting()
4220 return -EINVAL; in vcpu_ioctl_tpr_access_reporting()
4221 vcpu->arch.tpr_access_reporting = !!tac->enabled; in vcpu_ioctl_tpr_access_reporting()
4231 r = -EINVAL; in kvm_vcpu_ioctl_x86_setup_mce()
4237 vcpu->arch.mcg_cap = mcg_cap; in kvm_vcpu_ioctl_x86_setup_mce()
4240 vcpu->arch.mcg_ctl = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4243 vcpu->arch.mce_banks[bank*4] = ~(u64)0; in kvm_vcpu_ioctl_x86_setup_mce()
4253 u64 mcg_cap = vcpu->arch.mcg_cap; in kvm_vcpu_ioctl_x86_set_mce()
4255 u64 *banks = vcpu->arch.mce_banks; in kvm_vcpu_ioctl_x86_set_mce()
4257 if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL)) in kvm_vcpu_ioctl_x86_set_mce()
4258 return -EINVAL; in kvm_vcpu_ioctl_x86_set_mce()
4263 if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) && in kvm_vcpu_ioctl_x86_set_mce()
4264 vcpu->arch.mcg_ctl != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4266 banks += 4 * mce->bank; in kvm_vcpu_ioctl_x86_set_mce()
4271 if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0) in kvm_vcpu_ioctl_x86_set_mce()
4273 if (mce->status & MCI_STATUS_UC) { in kvm_vcpu_ioctl_x86_set_mce()
4274 if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) || in kvm_vcpu_ioctl_x86_set_mce()
4280 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4281 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4282 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4283 vcpu->arch.mcg_status = mce->mcg_status; in kvm_vcpu_ioctl_x86_set_mce()
4284 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4289 mce->status |= MCI_STATUS_OVER; in kvm_vcpu_ioctl_x86_set_mce()
4290 banks[2] = mce->addr; in kvm_vcpu_ioctl_x86_set_mce()
4291 banks[3] = mce->misc; in kvm_vcpu_ioctl_x86_set_mce()
4292 banks[1] = mce->status; in kvm_vcpu_ioctl_x86_set_mce()
4310 * modified under nVMX). Unless the per-VM capability, in kvm_vcpu_ioctl_x86_get_vcpu_events()
4317 if (!vcpu->kvm->arch.exception_payload_enabled && in kvm_vcpu_ioctl_x86_get_vcpu_events()
4318 vcpu->arch.exception.pending && vcpu->arch.exception.has_payload) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4327 if (kvm_exception_is_soft(vcpu->arch.exception.nr)) { in kvm_vcpu_ioctl_x86_get_vcpu_events()
4328 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4329 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4331 events->exception.injected = vcpu->arch.exception.injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4332 events->exception.pending = vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4338 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4339 events->exception.injected |= in kvm_vcpu_ioctl_x86_get_vcpu_events()
4340 vcpu->arch.exception.pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4342 events->exception.nr = vcpu->arch.exception.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4343 events->exception.has_error_code = vcpu->arch.exception.has_error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4344 events->exception.error_code = vcpu->arch.exception.error_code; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4345 events->exception_has_payload = vcpu->arch.exception.has_payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4346 events->exception_payload = vcpu->arch.exception.payload; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4348 events->interrupt.injected = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4349 vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4350 events->interrupt.nr = vcpu->arch.interrupt.nr; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4351 events->interrupt.soft = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4352 events->interrupt.shadow = kvm_x86_ops.get_interrupt_shadow(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4354 events->nmi.injected = vcpu->arch.nmi_injected; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4355 events->nmi.pending = vcpu->arch.nmi_pending != 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4356 events->nmi.masked = kvm_x86_ops.get_nmi_mask(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4357 events->nmi.pad = 0; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4359 events->sipi_vector = 0; /* never valid when reporting to user space */ in kvm_vcpu_ioctl_x86_get_vcpu_events()
4361 events->smi.smm = is_smm(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4362 events->smi.pending = vcpu->arch.smi_pending; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4363 events->smi.smm_inside_nmi = in kvm_vcpu_ioctl_x86_get_vcpu_events()
4364 !!(vcpu->arch.hflags & HF_SMM_INSIDE_NMI_MASK); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4365 events->smi.latched_init = kvm_lapic_latched_init(vcpu); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4367 events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_get_vcpu_events()
4370 if (vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_get_vcpu_events()
4371 events->flags |= KVM_VCPUEVENT_VALID_PAYLOAD; in kvm_vcpu_ioctl_x86_get_vcpu_events()
4373 memset(&events->reserved, 0, sizeof(events->reserved)); in kvm_vcpu_ioctl_x86_get_vcpu_events()
4381 if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING in kvm_vcpu_ioctl_x86_set_vcpu_events()
4386 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4388 if (events->flags & KVM_VCPUEVENT_VALID_PAYLOAD) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4389 if (!vcpu->kvm->arch.exception_payload_enabled) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4390 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4391 if (events->exception.pending) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4392 events->exception.injected = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4394 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4396 events->exception.pending = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4397 events->exception_has_payload = 0; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4400 if ((events->exception.injected || events->exception.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4401 (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR)) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4402 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4405 if (events->flags & KVM_VCPUEVENT_VALID_SMM && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4406 (events->smi.smm || events->smi.pending) && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4407 vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4408 return -EINVAL; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4411 vcpu->arch.exception.injected = events->exception.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4412 vcpu->arch.exception.pending = events->exception.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4413 vcpu->arch.exception.nr = events->exception.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4414 vcpu->arch.exception.has_error_code = events->exception.has_error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4415 vcpu->arch.exception.error_code = events->exception.error_code; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4416 vcpu->arch.exception.has_payload = events->exception_has_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4417 vcpu->arch.exception.payload = events->exception_payload; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4419 vcpu->arch.interrupt.injected = events->interrupt.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4420 vcpu->arch.interrupt.nr = events->interrupt.nr; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4421 vcpu->arch.interrupt.soft = events->interrupt.soft; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4422 if (events->flags & KVM_VCPUEVENT_VALID_SHADOW) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4424 events->interrupt.shadow); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4426 vcpu->arch.nmi_injected = events->nmi.injected; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4427 if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4428 vcpu->arch.nmi_pending = events->nmi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4429 kvm_x86_ops.set_nmi_mask(vcpu, events->nmi.masked); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4431 if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR && in kvm_vcpu_ioctl_x86_set_vcpu_events()
4433 vcpu->arch.apic->sipi_vector = events->sipi_vector; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4435 if (events->flags & KVM_VCPUEVENT_VALID_SMM) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4436 if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4437 if (events->smi.smm) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4438 vcpu->arch.hflags |= HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4440 vcpu->arch.hflags &= ~HF_SMM_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4442 kvm_x86_ops.nested_ops->leave_nested(vcpu); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4446 vcpu->arch.smi_pending = events->smi.pending; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4448 if (events->smi.smm) { in kvm_vcpu_ioctl_x86_set_vcpu_events()
4449 if (events->smi.smm_inside_nmi) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4450 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4452 vcpu->arch.hflags &= ~HF_SMM_INSIDE_NMI_MASK; in kvm_vcpu_ioctl_x86_set_vcpu_events()
4456 if (events->smi.latched_init) in kvm_vcpu_ioctl_x86_set_vcpu_events()
4457 set_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4459 clear_bit(KVM_APIC_INIT, &vcpu->arch.apic->pending_events); in kvm_vcpu_ioctl_x86_set_vcpu_events()
4474 memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_get_debugregs()
4476 dbgregs->dr6 = val; in kvm_vcpu_ioctl_x86_get_debugregs()
4477 dbgregs->dr7 = vcpu->arch.dr7; in kvm_vcpu_ioctl_x86_get_debugregs()
4483 if (dbgregs->flags) in kvm_vcpu_ioctl_x86_set_debugregs()
4484 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4486 if (dbgregs->dr6 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4487 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4488 if (dbgregs->dr7 & ~0xffffffffull) in kvm_vcpu_ioctl_x86_set_debugregs()
4489 return -EINVAL; in kvm_vcpu_ioctl_x86_set_debugregs()
4491 memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db)); in kvm_vcpu_ioctl_x86_set_debugregs()
4493 vcpu->arch.dr6 = dbgregs->dr6; in kvm_vcpu_ioctl_x86_set_debugregs()
4494 vcpu->arch.dr7 = dbgregs->dr7; in kvm_vcpu_ioctl_x86_set_debugregs()
4504 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in fill_xsave()
4505 u64 xstate_bv = xsave->header.xfeatures; in fill_xsave()
4515 xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE; in fill_xsave()
4520 * non-compacted offset. in fill_xsave()
4524 u64 xfeature_mask = valid & -valid; in fill_xsave()
4525 int xfeature_nr = fls64(xfeature_mask) - 1; in fill_xsave()
4533 memcpy(dest + offset, &vcpu->arch.pkru, in fill_xsave()
4534 sizeof(vcpu->arch.pkru)); in fill_xsave()
4540 valid -= xfeature_mask; in fill_xsave()
4546 struct xregs_state *xsave = &vcpu->arch.guest_fpu->state.xsave; in load_xsave()
4557 xsave->header.xfeatures = xstate_bv; in load_xsave()
4559 xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED; in load_xsave()
4562 * Copy each region from the non-compacted offset to the in load_xsave()
4567 u64 xfeature_mask = valid & -valid; in load_xsave()
4568 int xfeature_nr = fls64(xfeature_mask) - 1; in load_xsave()
4576 memcpy(&vcpu->arch.pkru, src + offset, in load_xsave()
4577 sizeof(vcpu->arch.pkru)); in load_xsave()
4582 valid -= xfeature_mask; in load_xsave()
4591 fill_xsave((u8 *) guest_xsave->region, vcpu); in kvm_vcpu_ioctl_x86_get_xsave()
4593 memcpy(guest_xsave->region, in kvm_vcpu_ioctl_x86_get_xsave()
4594 &vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_get_xsave()
4596 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] = in kvm_vcpu_ioctl_x86_get_xsave()
4607 *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4608 u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)]; in kvm_vcpu_ioctl_x86_set_xsave()
4617 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4618 load_xsave(vcpu, (u8 *)guest_xsave->region); in kvm_vcpu_ioctl_x86_set_xsave()
4622 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xsave()
4623 memcpy(&vcpu->arch.guest_fpu->state.fxsave, in kvm_vcpu_ioctl_x86_set_xsave()
4624 guest_xsave->region, sizeof(struct fxregs_state)); in kvm_vcpu_ioctl_x86_set_xsave()
4633 guest_xcrs->nr_xcrs = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4637 guest_xcrs->nr_xcrs = 1; in kvm_vcpu_ioctl_x86_get_xcrs()
4638 guest_xcrs->flags = 0; in kvm_vcpu_ioctl_x86_get_xcrs()
4639 guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK; in kvm_vcpu_ioctl_x86_get_xcrs()
4640 guest_xcrs->xcrs[0].value = vcpu->arch.xcr0; in kvm_vcpu_ioctl_x86_get_xcrs()
4649 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4651 if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) in kvm_vcpu_ioctl_x86_set_xcrs()
4652 return -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4654 for (i = 0; i < guest_xcrs->nr_xcrs; i++) in kvm_vcpu_ioctl_x86_set_xcrs()
4656 if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { in kvm_vcpu_ioctl_x86_set_xcrs()
4658 guest_xcrs->xcrs[i].value); in kvm_vcpu_ioctl_x86_set_xcrs()
4662 r = -EINVAL; in kvm_vcpu_ioctl_x86_set_xcrs()
4674 if (!vcpu->arch.pv_time_enabled) in kvm_set_guest_paused()
4675 return -EINVAL; in kvm_set_guest_paused()
4676 vcpu->arch.pvclock_set_guest_stopped_request = true; in kvm_set_guest_paused()
4688 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
4689 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4691 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
4693 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
4694 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4698 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
4699 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4700 return kvm_hv_activate_synic(vcpu, cap->cap == in kvm_vcpu_ioctl_enable_cap()
4703 if (!kvm_x86_ops.nested_ops->enable_evmcs) in kvm_vcpu_ioctl_enable_cap()
4704 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4705 r = kvm_x86_ops.nested_ops->enable_evmcs(vcpu, &vmcs_version); in kvm_vcpu_ioctl_enable_cap()
4707 user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4710 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
4715 return -ENOTTY; in kvm_vcpu_ioctl_enable_cap()
4720 vcpu->arch.pv_cpuid.enforce = cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
4721 if (vcpu->arch.pv_cpuid.enforce) in kvm_vcpu_ioctl_enable_cap()
4727 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
4734 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
4749 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4755 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4761 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4768 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4783 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4801 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4804 r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4811 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4815 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4822 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4826 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
4829 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4836 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4838 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4842 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4844 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4850 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4856 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4866 r = -EINVAL; in kvm_arch_vcpu_ioctl()
4869 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4872 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
4874 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
4880 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4889 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4900 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4909 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4921 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4931 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4941 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4947 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4965 r = -ENOMEM; in kvm_arch_vcpu_ioctl()
4971 r = -EFAULT; in kvm_arch_vcpu_ioctl()
4991 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5007 r = vcpu->arch.virtual_tsc_khz; in kvm_arch_vcpu_ioctl()
5017 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5027 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5028 if (!kvm_x86_ops.nested_ops->get_state) in kvm_arch_vcpu_ioctl()
5031 BUILD_BUG_ON(sizeof(user_data_size) != sizeof(user_kvm_nested_state->size)); in kvm_arch_vcpu_ioctl()
5032 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5033 if (get_user(user_data_size, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5036 r = kvm_x86_ops.nested_ops->get_state(vcpu, user_kvm_nested_state, in kvm_arch_vcpu_ioctl()
5042 if (put_user(r, &user_kvm_nested_state->size)) in kvm_arch_vcpu_ioctl()
5043 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5045 r = -E2BIG; in kvm_arch_vcpu_ioctl()
5057 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5058 if (!kvm_x86_ops.nested_ops->set_state) in kvm_arch_vcpu_ioctl()
5061 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5065 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5080 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl()
5081 r = kvm_x86_ops.nested_ops->set_state(vcpu, user_kvm_nested_state, &kvm_state); in kvm_arch_vcpu_ioctl()
5082 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl()
5089 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5094 cpuid_arg->entries); in kvm_arch_vcpu_ioctl()
5098 r = -EFAULT; in kvm_arch_vcpu_ioctl()
5105 r = -EINVAL; in kvm_arch_vcpu_ioctl()
5123 if (addr > (unsigned int)(-3 * PAGE_SIZE)) in kvm_vm_ioctl_set_tss_addr()
5124 return -EINVAL; in kvm_vm_ioctl_set_tss_addr()
5139 return -EINVAL; in kvm_vm_ioctl_set_nr_mmu_pages()
5141 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5144 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; in kvm_vm_ioctl_set_nr_mmu_pages()
5146 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_set_nr_mmu_pages()
5152 return kvm->arch.n_max_mmu_pages; in kvm_vm_ioctl_get_nr_mmu_pages()
5157 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_get_irqchip() local
5161 switch (chip->chip_id) { in kvm_vm_ioctl_get_irqchip()
5163 memcpy(&chip->chip.pic, &pic->pics[0], in kvm_vm_ioctl_get_irqchip()
5167 memcpy(&chip->chip.pic, &pic->pics[1], in kvm_vm_ioctl_get_irqchip()
5171 kvm_get_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_get_irqchip()
5174 r = -EINVAL; in kvm_vm_ioctl_get_irqchip()
5182 struct kvm_pic *pic = kvm->arch.vpic; in kvm_vm_ioctl_set_irqchip() local
5186 switch (chip->chip_id) { in kvm_vm_ioctl_set_irqchip()
5188 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5189 memcpy(&pic->pics[0], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5191 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5194 spin_lock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5195 memcpy(&pic->pics[1], &chip->chip.pic, in kvm_vm_ioctl_set_irqchip()
5197 spin_unlock(&pic->lock); in kvm_vm_ioctl_set_irqchip()
5200 kvm_set_ioapic(kvm, &chip->chip.ioapic); in kvm_vm_ioctl_set_irqchip()
5203 r = -EINVAL; in kvm_vm_ioctl_set_irqchip()
5206 kvm_pic_update_irq(pic); in kvm_vm_ioctl_set_irqchip()
5212 struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state; in kvm_vm_ioctl_get_pit()
5214 BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels)); in kvm_vm_ioctl_get_pit()
5216 mutex_lock(&kps->lock); in kvm_vm_ioctl_get_pit()
5217 memcpy(ps, &kps->channels, sizeof(*ps)); in kvm_vm_ioctl_get_pit()
5218 mutex_unlock(&kps->lock); in kvm_vm_ioctl_get_pit()
5225 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit()
5227 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5228 memcpy(&pit->pit_state.channels, ps, sizeof(*ps)); in kvm_vm_ioctl_set_pit()
5230 kvm_pit_load_count(pit, i, ps->channels[i].count, 0); in kvm_vm_ioctl_set_pit()
5231 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit()
5237 mutex_lock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5238 memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels, in kvm_vm_ioctl_get_pit2()
5239 sizeof(ps->channels)); in kvm_vm_ioctl_get_pit2()
5240 ps->flags = kvm->arch.vpit->pit_state.flags; in kvm_vm_ioctl_get_pit2()
5241 mutex_unlock(&kvm->arch.vpit->pit_state.lock); in kvm_vm_ioctl_get_pit2()
5242 memset(&ps->reserved, 0, sizeof(ps->reserved)); in kvm_vm_ioctl_get_pit2()
5251 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_set_pit2()
5253 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5254 prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5255 cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY; in kvm_vm_ioctl_set_pit2()
5258 memcpy(&pit->pit_state.channels, &ps->channels, in kvm_vm_ioctl_set_pit2()
5259 sizeof(pit->pit_state.channels)); in kvm_vm_ioctl_set_pit2()
5260 pit->pit_state.flags = ps->flags; in kvm_vm_ioctl_set_pit2()
5262 kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count, in kvm_vm_ioctl_set_pit2()
5264 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_set_pit2()
5271 struct kvm_pit *pit = kvm->arch.vpit; in kvm_vm_ioctl_reinject()
5273 /* pit->pit_state.lock was overloaded to prevent userspace from getting in kvm_vm_ioctl_reinject()
5277 mutex_lock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5278 kvm_pit_set_reinject(pit, control->pit_reinject); in kvm_vm_ioctl_reinject()
5279 mutex_unlock(&pit->pit_state.lock); in kvm_vm_ioctl_reinject()
5287 * Flush potentially hardware-cached dirty pages to dirty_bitmap. in kvm_arch_sync_dirty_log()
5297 return -ENXIO; in kvm_vm_ioctl_irq_line()
5299 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
5300 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
5310 if (cap->flags) in kvm_vm_ioctl_enable_cap()
5311 return -EINVAL; in kvm_vm_ioctl_enable_cap()
5313 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
5315 kvm->arch.disabled_quirks = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5319 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5320 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5321 if (cap->args[0] > MAX_NR_RESERVED_IOAPIC_PINS) in kvm_vm_ioctl_enable_cap()
5323 r = -EEXIST; in kvm_vm_ioctl_enable_cap()
5326 if (kvm->created_vcpus) in kvm_vm_ioctl_enable_cap()
5333 kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT; in kvm_vm_ioctl_enable_cap()
5334 kvm->arch.nr_reserved_ioapic_pins = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5337 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
5341 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5342 if (cap->args[0] & ~KVM_X2APIC_API_VALID_FLAGS) in kvm_vm_ioctl_enable_cap()
5345 if (cap->args[0] & KVM_X2APIC_API_USE_32BIT_IDS) in kvm_vm_ioctl_enable_cap()
5346 kvm->arch.x2apic_format = true; in kvm_vm_ioctl_enable_cap()
5347 if (cap->args[0] & KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK) in kvm_vm_ioctl_enable_cap()
5348 kvm->arch.x2apic_broadcast_quirk_disabled = true; in kvm_vm_ioctl_enable_cap()
5353 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5354 if (cap->args[0] & ~KVM_X86_DISABLE_VALID_EXITS) in kvm_vm_ioctl_enable_cap()
5357 if ((cap->args[0] & KVM_X86_DISABLE_EXITS_MWAIT) && in kvm_vm_ioctl_enable_cap()
5359 kvm->arch.mwait_in_guest = true; in kvm_vm_ioctl_enable_cap()
5360 if (cap->args[0] & KVM_X86_DISABLE_EXITS_HLT) in kvm_vm_ioctl_enable_cap()
5361 kvm->arch.hlt_in_guest = true; in kvm_vm_ioctl_enable_cap()
5362 if (cap->args[0] & KVM_X86_DISABLE_EXITS_PAUSE) in kvm_vm_ioctl_enable_cap()
5363 kvm->arch.pause_in_guest = true; in kvm_vm_ioctl_enable_cap()
5364 if (cap->args[0] & KVM_X86_DISABLE_EXITS_CSTATE) in kvm_vm_ioctl_enable_cap()
5365 kvm->arch.cstate_in_guest = true; in kvm_vm_ioctl_enable_cap()
5369 kvm->arch.guest_can_read_msr_platform_info = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5373 kvm->arch.exception_payload_enabled = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5377 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5378 if (cap->args[0] & ~(KVM_MSR_EXIT_REASON_INVAL | in kvm_vm_ioctl_enable_cap()
5382 kvm->arch.user_space_msr_mask = cap->args[0]; in kvm_vm_ioctl_enable_cap()
5386 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
5400 msr_filter->default_allow = default_allow; in kvm_alloc_msr_filter()
5411 for (i = 0; i < msr_filter->count; i++) in kvm_free_msr_filter()
5412 kfree(msr_filter->ranges[i].bitmap); in kvm_free_msr_filter()
5425 if (!user_range->nmsrs) in kvm_add_msr_filter()
5428 bitmap_size = BITS_TO_LONGS(user_range->nmsrs) * sizeof(long); in kvm_add_msr_filter()
5430 return -EINVAL; in kvm_add_msr_filter()
5432 bitmap = memdup_user((__user u8*)user_range->bitmap, bitmap_size); in kvm_add_msr_filter()
5437 .flags = user_range->flags, in kvm_add_msr_filter()
5438 .base = user_range->base, in kvm_add_msr_filter()
5439 .nmsrs = user_range->nmsrs, in kvm_add_msr_filter()
5444 r = -EINVAL; in kvm_add_msr_filter()
5449 r = -EINVAL; in kvm_add_msr_filter()
5454 msr_filter->ranges[msr_filter->count] = range; in kvm_add_msr_filter()
5455 msr_filter->count++; in kvm_add_msr_filter()
5472 if (filter->flags & ~KVM_MSR_FILTER_DEFAULT_DENY) in kvm_vm_ioctl_set_msr_filter()
5473 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5475 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) in kvm_vm_ioctl_set_msr_filter()
5476 empty &= !filter->ranges[i].nmsrs; in kvm_vm_ioctl_set_msr_filter()
5478 default_allow = !(filter->flags & KVM_MSR_FILTER_DEFAULT_DENY); in kvm_vm_ioctl_set_msr_filter()
5480 return -EINVAL; in kvm_vm_ioctl_set_msr_filter()
5484 return -ENOMEM; in kvm_vm_ioctl_set_msr_filter()
5486 for (i = 0; i < ARRAY_SIZE(filter->ranges); i++) { in kvm_vm_ioctl_set_msr_filter()
5487 r = kvm_add_msr_filter(new_filter, &filter->ranges[i]); in kvm_vm_ioctl_set_msr_filter()
5494 mutex_lock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5496 /* The per-VM filter is protected by kvm->lock... */ in kvm_vm_ioctl_set_msr_filter()
5497 old_filter = srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1); in kvm_vm_ioctl_set_msr_filter()
5499 rcu_assign_pointer(kvm->arch.msr_filter, new_filter); in kvm_vm_ioctl_set_msr_filter()
5500 synchronize_srcu(&kvm->srcu); in kvm_vm_ioctl_set_msr_filter()
5505 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_set_msr_filter()
5515 __u32 base; member
5530 struct kvm *kvm = filp->private_data; in kvm_arch_vm_compat_ioctl()
5531 long r = -ENOTTY; in kvm_arch_vm_compat_ioctl()
5542 return -EFAULT; in kvm_arch_vm_compat_ioctl()
5550 .flags = cr->flags, in kvm_arch_vm_compat_ioctl()
5551 .nmsrs = cr->nmsrs, in kvm_arch_vm_compat_ioctl()
5552 .base = cr->base, in kvm_arch_vm_compat_ioctl()
5553 .bitmap = (__u8 *)(ulong)cr->bitmap, in kvm_arch_vm_compat_ioctl()
5569 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
5571 int r = -ENOTTY; in kvm_arch_vm_ioctl()
5573 * This union makes it completely explicit to gcc-3.x in kvm_arch_vm_ioctl()
5590 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5591 r = -EINVAL; in kvm_arch_vm_ioctl()
5592 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5594 r = -EFAULT; in kvm_arch_vm_ioctl()
5599 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5609 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5611 r = -EEXIST; in kvm_arch_vm_ioctl()
5615 r = -EINVAL; in kvm_arch_vm_ioctl()
5616 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5635 /* Write kvm->irq_routing before enabling irqchip_in_kernel. */ in kvm_arch_vm_ioctl()
5637 kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL; in kvm_arch_vm_ioctl()
5639 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5646 r = -EFAULT; in kvm_arch_vm_ioctl()
5651 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5652 r = -EEXIST; in kvm_arch_vm_ioctl()
5653 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5655 r = -ENOMEM; in kvm_arch_vm_ioctl()
5656 kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags); in kvm_arch_vm_ioctl()
5657 if (kvm->arch.vpit) in kvm_arch_vm_ioctl()
5660 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5663 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
5672 r = -ENXIO; in kvm_arch_vm_ioctl()
5678 r = -EFAULT; in kvm_arch_vm_ioctl()
5687 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */ in kvm_arch_vm_ioctl()
5696 r = -ENXIO; in kvm_arch_vm_ioctl()
5705 r = -EFAULT; in kvm_arch_vm_ioctl()
5708 r = -ENXIO; in kvm_arch_vm_ioctl()
5709 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5714 r = -EFAULT; in kvm_arch_vm_ioctl()
5721 r = -EFAULT; in kvm_arch_vm_ioctl()
5724 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5725 r = -ENXIO; in kvm_arch_vm_ioctl()
5726 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5730 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5734 r = -ENXIO; in kvm_arch_vm_ioctl()
5735 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5740 r = -EFAULT; in kvm_arch_vm_ioctl()
5747 r = -EFAULT; in kvm_arch_vm_ioctl()
5750 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5751 r = -ENXIO; in kvm_arch_vm_ioctl()
5752 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5756 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5761 r = -EFAULT; in kvm_arch_vm_ioctl()
5764 r = -ENXIO; in kvm_arch_vm_ioctl()
5765 if (!kvm->arch.vpit) in kvm_arch_vm_ioctl()
5772 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
5773 if (kvm->created_vcpus) in kvm_arch_vm_ioctl()
5774 r = -EBUSY; in kvm_arch_vm_ioctl()
5776 kvm->arch.bsp_vcpu_id = arg; in kvm_arch_vm_ioctl()
5777 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
5781 r = -EFAULT; in kvm_arch_vm_ioctl()
5784 r = -EINVAL; in kvm_arch_vm_ioctl()
5787 memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); in kvm_arch_vm_ioctl()
5795 r = -EFAULT; in kvm_arch_vm_ioctl()
5799 r = -EINVAL; in kvm_arch_vm_ioctl()
5811 kvm->arch.kvmclock_offset += user_ns.clock - now_ns; in kvm_arch_vm_ioctl()
5821 user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; in kvm_arch_vm_ioctl()
5824 r = -EFAULT; in kvm_arch_vm_ioctl()
5831 r = -ENOTTY; in kvm_arch_vm_ioctl()
5839 r = -EFAULT; in kvm_arch_vm_ioctl()
5843 r = -ENOTTY; in kvm_arch_vm_ioctl()
5851 r = -EFAULT; in kvm_arch_vm_ioctl()
5855 r = -ENOTTY; in kvm_arch_vm_ioctl()
5863 r = -EFAULT; in kvm_arch_vm_ioctl()
5877 return -EFAULT; in kvm_arch_vm_ioctl()
5883 r = -ENOTTY; in kvm_arch_vm_ioctl()
5944 msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >= in kvm_init_msr_list()
5949 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >= in kvm_init_msr_list()
5954 if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= in kvm_init_msr_list()
5992 !kvm_iodevice_write(vcpu, &vcpu->arch.apic->dev, addr, n, v)) in vcpu_mmio_write()
5997 len -= n; in vcpu_mmio_write()
6012 !kvm_iodevice_read(vcpu, &vcpu->arch.apic->dev, in vcpu_mmio_read()
6019 len -= n; in vcpu_mmio_read()
6045 /* NPT walks are always user-walks */ in translate_nested_gpa()
6047 t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception); in translate_nested_gpa()
6056 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_read()
6064 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_fetch()
6072 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in kvm_mmu_gva_to_gpa_write()
6079 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception); in kvm_mmu_gva_to_gpa_system()
6090 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, in kvm_read_guest_virt_helper()
6092 unsigned offset = addr & (PAGE_SIZE-1); in kvm_read_guest_virt_helper()
6093 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_read_guest_virt_helper()
6105 bytes -= toread; in kvm_read_guest_virt_helper()
6124 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, in kvm_fetch_guest_virt()
6129 offset = addr & (PAGE_SIZE-1); in kvm_fetch_guest_virt()
6131 bytes = (unsigned)PAGE_SIZE - offset; in kvm_fetch_guest_virt()
6188 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, in kvm_write_guest_virt_helper()
6191 unsigned offset = addr & (PAGE_SIZE-1); in kvm_write_guest_virt_helper()
6192 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); in kvm_write_guest_virt_helper()
6203 bytes -= towrite; in kvm_write_guest_virt_helper()
6229 vcpu->arch.l1tf_flush_l1d = true; in kvm_write_guest_virt_system()
6286 && !permission_fault(vcpu, vcpu->arch.walk_mmu, in vcpu_mmio_gva_to_gpa()
6287 vcpu->arch.mmio_access, 0, access)) { in vcpu_mmio_gva_to_gpa()
6288 *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | in vcpu_mmio_gva_to_gpa()
6289 (gva & (PAGE_SIZE - 1)); in vcpu_mmio_gva_to_gpa()
6294 *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); in vcpu_mmio_gva_to_gpa()
6297 return -1; in vcpu_mmio_gva_to_gpa()
6328 if (vcpu->mmio_read_completed) { in read_prepare()
6330 vcpu->mmio_fragments[0].gpa, val); in read_prepare()
6331 vcpu->mmio_read_completed = 0; in read_prepare()
6366 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; in write_exit_mmio()
6368 memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); in write_exit_mmio()
6394 bool write = ops->write; in emulator_read_write_onepage()
6396 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in emulator_read_write_onepage()
6405 if (ctxt->gpa_available && emulator_can_use_gpa(ctxt) && in emulator_read_write_onepage()
6406 (addr & ~PAGE_MASK) == (ctxt->gpa_val & ~PAGE_MASK)) { in emulator_read_write_onepage()
6407 gpa = ctxt->gpa_val; in emulator_read_write_onepage()
6415 if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes)) in emulator_read_write_onepage()
6421 handled = ops->read_write_mmio(vcpu, gpa, bytes, val); in emulator_read_write_onepage()
6426 bytes -= handled; in emulator_read_write_onepage()
6429 WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); in emulator_read_write_onepage()
6430 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; in emulator_read_write_onepage()
6431 frag->gpa = gpa; in emulator_read_write_onepage()
6432 frag->data = val; in emulator_read_write_onepage()
6433 frag->len = bytes; in emulator_read_write_onepage()
6447 if (ops->read_write_prepare && in emulator_read_write()
6448 ops->read_write_prepare(vcpu, val, bytes)) in emulator_read_write()
6451 vcpu->mmio_nr_fragments = 0; in emulator_read_write()
6454 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { in emulator_read_write()
6457 now = -addr & ~PAGE_MASK; in emulator_read_write()
6464 if (ctxt->mode != X86EMUL_MODE_PROT64) in emulator_read_write()
6467 bytes -= now; in emulator_read_write()
6475 if (!vcpu->mmio_nr_fragments) in emulator_read_write()
6478 gpa = vcpu->mmio_fragments[0].gpa; in emulator_read_write()
6480 vcpu->mmio_needed = 1; in emulator_read_write()
6481 vcpu->mmio_cur_fragment = 0; in emulator_read_write()
6483 vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); in emulator_read_write()
6484 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; in emulator_read_write()
6485 vcpu->run->exit_reason = KVM_EXIT_MMIO; in emulator_read_write()
6486 vcpu->run->mmio.phys_addr = gpa; in emulator_read_write()
6488 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); in emulator_read_write()
6536 if (bytes > 8 || (bytes & (bytes - 1))) in emulator_cmpxchg_emulated()
6550 page_line_mask = ~(cache_line_size() - 1); in emulator_cmpxchg_emulated()
6554 if (((gpa + bytes - 1) & page_line_mask) != (gpa & page_line_mask)) in emulator_cmpxchg_emulated()
6598 for (i = 0; i < vcpu->arch.pio.count; i++) { in kernel_pio()
6599 if (vcpu->arch.pio.in) in kernel_pio()
6600 r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port, in kernel_pio()
6601 vcpu->arch.pio.size, pd); in kernel_pio()
6604 vcpu->arch.pio.port, vcpu->arch.pio.size, in kernel_pio()
6608 pd += vcpu->arch.pio.size; in kernel_pio()
6617 vcpu->arch.pio.port = port; in emulator_pio_in_out()
6618 vcpu->arch.pio.in = in; in emulator_pio_in_out()
6619 vcpu->arch.pio.count = count; in emulator_pio_in_out()
6620 vcpu->arch.pio.size = size; in emulator_pio_in_out()
6622 if (!kernel_pio(vcpu, vcpu->arch.pio_data)) { in emulator_pio_in_out()
6623 vcpu->arch.pio.count = 0; in emulator_pio_in_out()
6627 vcpu->run->exit_reason = KVM_EXIT_IO; in emulator_pio_in_out()
6628 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; in emulator_pio_in_out()
6629 vcpu->run->io.size = size; in emulator_pio_in_out()
6630 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE; in emulator_pio_in_out()
6631 vcpu->run->io.count = count; in emulator_pio_in_out()
6632 vcpu->run->io.port = port; in emulator_pio_in_out()
6642 if (vcpu->arch.pio.count) in emulator_pio_in()
6645 memset(vcpu->arch.pio_data, 0, size * count); in emulator_pio_in()
6650 memcpy(val, vcpu->arch.pio_data, size * count); in emulator_pio_in()
6651 trace_kvm_pio(KVM_PIO_IN, port, size, count, vcpu->arch.pio_data); in emulator_pio_in()
6652 vcpu->arch.pio.count = 0; in emulator_pio_in()
6671 memcpy(vcpu->arch.pio_data, val, size * count); in emulator_pio_out()
6672 trace_kvm_pio(KVM_PIO_OUT, port, size, count, vcpu->arch.pio_data); in emulator_pio_out()
6701 cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6702 smp_call_function_many(vcpu->arch.wbinvd_dirty_mask, in kvm_emulate_wbinvd_noskip()
6705 cpumask_clear(vcpu->arch.wbinvd_dirty_mask); in kvm_emulate_wbinvd_noskip()
6740 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; in mk_cr_64()
6753 value = vcpu->arch.cr2; in emulator_get_cr()
6782 vcpu->arch.cr2 = val; in emulator_set_cr()
6795 res = -1; in emulator_set_cr()
6851 set_desc_base(desc, (unsigned long)var.base); in emulator_get_segment()
6854 *base3 = var.base >> 32; in emulator_get_segment()
6856 desc->type = var.type; in emulator_get_segment()
6857 desc->s = var.s; in emulator_get_segment()
6858 desc->dpl = var.dpl; in emulator_get_segment()
6859 desc->p = var.present; in emulator_get_segment()
6860 desc->avl = var.avl; in emulator_get_segment()
6861 desc->l = var.l; in emulator_get_segment()
6862 desc->d = var.db; in emulator_get_segment()
6863 desc->g = var.g; in emulator_get_segment()
6876 var.base = get_desc_base(desc); in emulator_set_segment()
6878 var.base |= ((u64)base3) << 32; in emulator_set_segment()
6881 if (desc->g) in emulator_set_segment()
6883 var.type = desc->type; in emulator_set_segment()
6884 var.dpl = desc->dpl; in emulator_set_segment()
6885 var.db = desc->d; in emulator_set_segment()
6886 var.s = desc->s; in emulator_set_segment()
6887 var.l = desc->l; in emulator_set_segment()
6888 var.g = desc->g; in emulator_set_segment()
6889 var.avl = desc->avl; in emulator_set_segment()
6890 var.present = desc->p; in emulator_set_segment()
6934 return vcpu->arch.smbase; in emulator_get_smbase()
6941 vcpu->arch.smbase = smbase; in emulator_set_smbase()
6958 emul_to_vcpu(ctxt)->arch.halt_request = 1; in emulator_halt()
6966 &ctxt->exception); in emulator_intercept()
7013 return emul_to_vcpu(ctxt)->arch.hflags; in emulator_get_hflags()
7020 vcpu->arch.hflags = emul_flags; in emulator_set_hflags()
7109 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in inject_emulated_exception()
7110 if (ctxt->exception.vector == PF_VECTOR) in inject_emulated_exception()
7111 return kvm_inject_emulated_page_fault(vcpu, &ctxt->exception); in inject_emulated_exception()
7113 if (ctxt->exception.error_code_valid) in inject_emulated_exception()
7114 kvm_queue_exception_e(vcpu, ctxt->exception.vector, in inject_emulated_exception()
7115 ctxt->exception.error_code); in inject_emulated_exception()
7117 kvm_queue_exception(vcpu, ctxt->exception.vector); in inject_emulated_exception()
7131 ctxt->vcpu = vcpu; in alloc_emulate_ctxt()
7132 ctxt->ops = &emulate_ops; in alloc_emulate_ctxt()
7133 vcpu->arch.emulate_ctxt = ctxt; in alloc_emulate_ctxt()
7140 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in init_emulate_ctxt()
7145 ctxt->gpa_available = false; in init_emulate_ctxt()
7146 ctxt->eflags = kvm_get_rflags(vcpu); in init_emulate_ctxt()
7147 ctxt->tf = (ctxt->eflags & X86_EFLAGS_TF) != 0; in init_emulate_ctxt()
7149 ctxt->eip = kvm_rip_read(vcpu); in init_emulate_ctxt()
7150 ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : in init_emulate_ctxt()
7151 (ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 : in init_emulate_ctxt()
7159 ctxt->interruptibility = 0; in init_emulate_ctxt()
7160 ctxt->have_exception = false; in init_emulate_ctxt()
7161 ctxt->exception.vector = -1; in init_emulate_ctxt()
7162 ctxt->perm_ok = false; in init_emulate_ctxt()
7165 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in init_emulate_ctxt()
7170 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_inject_realmode_interrupt()
7175 ctxt->op_bytes = 2; in kvm_inject_realmode_interrupt()
7176 ctxt->ad_bytes = 2; in kvm_inject_realmode_interrupt()
7177 ctxt->_eip = ctxt->eip + inc_eip; in kvm_inject_realmode_interrupt()
7183 ctxt->eip = ctxt->_eip; in kvm_inject_realmode_interrupt()
7184 kvm_rip_write(vcpu, ctxt->eip); in kvm_inject_realmode_interrupt()
7185 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_inject_realmode_interrupt()
7192 ++vcpu->stat.insn_emulation_fail; in handle_emulation_failure()
7201 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7202 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7203 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7210 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_emulation_failure()
7211 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in handle_emulation_failure()
7212 vcpu->run->internal.ndata = 0; in handle_emulation_failure()
7233 if (!vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7251 * retry instruction -> write #PF -> emulation fail -> retry in reexecute_instruction()
7252 * instruction -> ... in reexecute_instruction()
7254 pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7265 /* The instructions are well-emulated on direct mmu. */ in reexecute_instruction()
7266 if (vcpu->arch.mmu->direct_map) { in reexecute_instruction()
7269 spin_lock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7270 indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; in reexecute_instruction()
7271 spin_unlock(&vcpu->kvm->mmu_lock); in reexecute_instruction()
7274 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7281 * and it failed try to unshadow page and re-enter the in reexecute_instruction()
7284 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in reexecute_instruction()
7300 last_retry_eip = vcpu->arch.last_retry_eip; in retry_instruction()
7301 last_retry_addr = vcpu->arch.last_retry_addr; in retry_instruction()
7304 * If the emulation is caused by #PF and it is non-page_table in retry_instruction()
7305 * writing instruction, it means the VM-EXIT is caused by shadow in retry_instruction()
7309 * Note: if the guest uses a non-page-table modifying instruction in retry_instruction()
7316 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; in retry_instruction()
7328 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2_or_gpa) in retry_instruction()
7331 vcpu->arch.last_retry_eip = ctxt->eip; in retry_instruction()
7332 vcpu->arch.last_retry_addr = cr2_or_gpa; in retry_instruction()
7334 if (!vcpu->arch.mmu->direct_map) in retry_instruction()
7337 kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); in retry_instruction()
7347 if (!(vcpu->arch.hflags & HF_SMM_MASK)) { in kvm_smm_changed()
7349 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, false); in kvm_smm_changed()
7375 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_do_singlestep()
7377 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { in kvm_vcpu_do_singlestep()
7378 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_do_singlestep()
7379 kvm_run->debug.arch.pc = kvm_get_linear_rip(vcpu); in kvm_vcpu_do_singlestep()
7380 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_do_singlestep()
7381 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_do_singlestep()
7413 if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && in kvm_vcpu_check_code_breakpoint()
7414 (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { in kvm_vcpu_check_code_breakpoint()
7415 struct kvm_run *kvm_run = vcpu->run; in kvm_vcpu_check_code_breakpoint()
7418 vcpu->arch.guest_debug_dr7, in kvm_vcpu_check_code_breakpoint()
7419 vcpu->arch.eff_db); in kvm_vcpu_check_code_breakpoint()
7422 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in kvm_vcpu_check_code_breakpoint()
7423 kvm_run->debug.arch.pc = eip; in kvm_vcpu_check_code_breakpoint()
7424 kvm_run->debug.arch.exception = DB_VECTOR; in kvm_vcpu_check_code_breakpoint()
7425 kvm_run->exit_reason = KVM_EXIT_DEBUG; in kvm_vcpu_check_code_breakpoint()
7431 if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && in kvm_vcpu_check_code_breakpoint()
7435 vcpu->arch.dr7, in kvm_vcpu_check_code_breakpoint()
7436 vcpu->arch.db); in kvm_vcpu_check_code_breakpoint()
7450 switch (ctxt->opcode_len) { in is_vmware_backdoor_opcode()
7452 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7469 switch (ctxt->b) { in is_vmware_backdoor_opcode()
7482 * (and wrong) when emulating on an intercepted fault-like exception[*], as
7492 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_decode_emulated_instruction()
7497 ctxt->ud = emulation_type & EMULTYPE_TRAP_UD; in x86_decode_emulated_instruction()
7502 ++vcpu->stat.insn_emulation; in x86_decode_emulated_instruction()
7512 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in x86_emulate_instruction()
7519 vcpu->arch.l1tf_flush_l1d = true; in x86_emulate_instruction()
7525 write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable; in x86_emulate_instruction()
7526 vcpu->arch.write_fault_to_shadow_pgtable = false; in x86_emulate_instruction()
7533 * are fault-like and are higher priority than any faults on in x86_emulate_instruction()
7552 if (ctxt->have_exception) { in x86_emulate_instruction()
7554 * #UD should result in just EMULATION_FAILED, and trap-like in x86_emulate_instruction()
7557 WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR || in x86_emulate_instruction()
7558 exception_type(ctxt->exception.vector) == EXCPT_TRAP); in x86_emulate_instruction()
7575 * updating interruptibility state and injecting single-step #DBs. in x86_emulate_instruction()
7578 kvm_rip_write(vcpu, ctxt->_eip); in x86_emulate_instruction()
7579 if (ctxt->eflags & X86_EFLAGS_RF) in x86_emulate_instruction()
7580 kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); in x86_emulate_instruction()
7589 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { in x86_emulate_instruction()
7590 vcpu->arch.emulate_regs_need_sync_from_vcpu = false; in x86_emulate_instruction()
7597 ctxt->exception.address = cr2_or_gpa; in x86_emulate_instruction()
7600 if (vcpu->arch.mmu->direct_map) { in x86_emulate_instruction()
7601 ctxt->gpa_available = true; in x86_emulate_instruction()
7602 ctxt->gpa_val = cr2_or_gpa; in x86_emulate_instruction()
7606 ctxt->exception.address = 0; in x86_emulate_instruction()
7622 if (ctxt->have_exception) { in x86_emulate_instruction()
7626 } else if (vcpu->arch.pio.count) { in x86_emulate_instruction()
7627 if (!vcpu->arch.pio.in) { in x86_emulate_instruction()
7628 /* FIXME: return into emulator if single-stepping. */ in x86_emulate_instruction()
7629 vcpu->arch.pio.count = 0; in x86_emulate_instruction()
7632 vcpu->arch.complete_userspace_io = complete_emulated_pio; in x86_emulate_instruction()
7635 } else if (vcpu->mmio_needed) { in x86_emulate_instruction()
7636 ++vcpu->stat.mmio_exits; in x86_emulate_instruction()
7638 if (!vcpu->mmio_is_write) in x86_emulate_instruction()
7641 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in x86_emulate_instruction()
7649 toggle_interruptibility(vcpu, ctxt->interruptibility); in x86_emulate_instruction()
7650 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in x86_emulate_instruction()
7653 * Note, EXCPT_DB is assumed to be fault-like as the emulator in x86_emulate_instruction()
7655 * of which are fault-like. in x86_emulate_instruction()
7657 if (!ctxt->have_exception || in x86_emulate_instruction()
7658 exception_type(ctxt->exception.vector) == EXCPT_TRAP) { in x86_emulate_instruction()
7659 kvm_rip_write(vcpu, ctxt->eip); in x86_emulate_instruction()
7660 if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP))) in x86_emulate_instruction()
7664 __kvm_set_rflags(vcpu, ctxt->eflags); in x86_emulate_instruction()
7673 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) in x86_emulate_instruction()
7676 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; in x86_emulate_instruction()
7696 vcpu->arch.pio.count = 0; in complete_fast_pio_out_port_0x7e()
7702 vcpu->arch.pio.count = 0; in complete_fast_pio_out()
7704 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) in complete_fast_pio_out()
7724 kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_OUT_7E_INC_RIP)) { in kvm_fast_pio_out()
7725 vcpu->arch.complete_userspace_io = in kvm_fast_pio_out()
7729 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_out()
7730 vcpu->arch.complete_userspace_io = complete_fast_pio_out; in kvm_fast_pio_out()
7740 BUG_ON(vcpu->arch.pio.count != 1); in complete_fast_pio_in()
7742 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) { in complete_fast_pio_in()
7743 vcpu->arch.pio.count = 0; in complete_fast_pio_in()
7748 val = (vcpu->arch.pio.size < 4) ? kvm_rax_read(vcpu) : 0; in complete_fast_pio_in()
7751 * Since vcpu->arch.pio.count == 1 let emulator_pio_in perform in complete_fast_pio_in()
7754 emulator_pio_in(vcpu, vcpu->arch.pio.size, vcpu->arch.pio.port, &val, 1); in complete_fast_pio_in()
7775 vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu); in kvm_fast_pio_in()
7776 vcpu->arch.complete_userspace_io = complete_fast_pio_in; in kvm_fast_pio_in()
7805 khz = freq->new; in tsc_khz_changed()
7826 /* TSC frequency always matches when on Hyper-V */ in kvm_hyperv_tsc_notifier()
7832 struct kvm_arch *ka = &kvm->arch; in kvm_hyperv_tsc_notifier()
7834 spin_lock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7844 spin_unlock(&ka->pvclock_gtod_sync_lock); in kvm_hyperv_tsc_notifier()
7900 if (vcpu->cpu != cpu) in __kvmclock_cpufreq_notifier()
7903 if (vcpu->cpu != raw_smp_processor_id()) in __kvmclock_cpufreq_notifier()
7909 if (freq->old < freq->new && send_ipi) { in __kvmclock_cpufreq_notifier()
7932 if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) in kvmclock_cpufreq_notifier()
7934 if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) in kvmclock_cpufreq_notifier()
7937 for_each_cpu(cpu, freq->policy->cpus) in kvmclock_cpufreq_notifier()
7965 if (policy->cpuinfo.max_freq) in kvm_timer_init()
7966 max_tsc_khz = policy->cpuinfo.max_freq; in kvm_timer_init()
8013 (unsigned long *)&vcpu->arch.pmu.global_status); in kvm_handle_intel_pt_intr()
8069 if (!gtod_is_based_on_tsc(gtod->clock.vclock_mode) && in pvclock_gtod_notify()
8087 r = -EEXIST; in kvm_arch_init()
8091 if (!ops->cpu_has_kvm_support()) { in kvm_arch_init()
8093 r = -EOPNOTSUPP; in kvm_arch_init()
8096 if (ops->disabled_by_bios()) { in kvm_arch_init()
8098 r = -EOPNOTSUPP; in kvm_arch_init()
8109 r = -EOPNOTSUPP; in kvm_arch_init()
8113 r = -ENOMEM; in kvm_arch_init()
8143 if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest()) in kvm_arch_init()
8153 if (pi_inject_timer == -1) in kvm_arch_init()
8202 ++vcpu->stat.halt_exits; in kvm_vcpu_halt()
8204 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_vcpu_halt()
8207 vcpu->run->exit_reason = KVM_EXIT_HLT; in kvm_vcpu_halt()
8217 * TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered in kvm_emulate_halt()
8234 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8237 return -KVM_EOPNOTSUPP; in kvm_pv_clock_pairing()
8246 if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, in kvm_pv_clock_pairing()
8248 ret = -KVM_EFAULT; in kvm_pv_clock_pairing()
8257 * @apicid - apicid of vcpu to be kicked.
8277 return (READ_ONCE(kvm->arch.apicv_inhibit_reasons) == 0); in kvm_apicv_activated()
8285 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8288 &kvm->arch.apicv_inhibit_reasons); in kvm_apicv_init()
8298 map = rcu_dereference(kvm->arch.apic_map); in kvm_sched_yield()
8300 if (likely(map) && dest_id <= map->max_apic_id && map->phys_map[dest_id]) in kvm_sched_yield()
8301 target = map->phys_map[dest_id]->vcpu; in kvm_sched_yield()
8305 if (target && READ_ONCE(target->ready)) in kvm_sched_yield()
8314 if (kvm_hv_hypercall_enabled(vcpu->kvm)) in kvm_emulate_hypercall()
8335 ret = -KVM_EPERM; in kvm_emulate_hypercall()
8339 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8349 kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1); in kvm_emulate_hypercall()
8350 kvm_sched_yield(vcpu->kvm, a1); in kvm_emulate_hypercall()
8362 ret = kvm_pv_send_ipi(vcpu->kvm, a0, a1, a2, a3, op_64_bit); in kvm_emulate_hypercall()
8368 kvm_sched_yield(vcpu->kvm, a0); in kvm_emulate_hypercall()
8372 ret = -KVM_ENOSYS; in kvm_emulate_hypercall()
8380 ++vcpu->stat.hypercalls; in kvm_emulate_hypercall()
8394 &ctxt->exception); in emulator_fix_hypercall()
8399 return vcpu->run->request_interrupt_window && in dm_request_for_irq_injection()
8400 likely(!pic_in_kernel(vcpu->kvm)); in dm_request_for_irq_injection()
8405 struct kvm_run *kvm_run = vcpu->run; in post_kvm_run_save()
8407 kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; in post_kvm_run_save()
8408 kvm_run->flags = is_smm(vcpu) ? KVM_RUN_X86_SMM : 0; in post_kvm_run_save()
8409 kvm_run->cr8 = kvm_get_cr8(vcpu); in post_kvm_run_save()
8410 kvm_run->apic_base = kvm_get_apic_base(vcpu); in post_kvm_run_save()
8411 kvm_run->ready_for_interrupt_injection = in post_kvm_run_save()
8412 pic_in_kernel(vcpu->kvm) || in post_kvm_run_save()
8426 if (vcpu->arch.apicv_active) in update_cr8_intercept()
8429 if (!vcpu->arch.apic->vapic_addr) in update_cr8_intercept()
8432 max_irr = -1; in update_cr8_intercept()
8434 if (max_irr != -1) in update_cr8_intercept()
8444 trace_kvm_inj_exception(vcpu->arch.exception.nr, in kvm_inject_exception()
8445 vcpu->arch.exception.has_error_code, in kvm_inject_exception()
8446 vcpu->arch.exception.error_code, in kvm_inject_exception()
8447 vcpu->arch.exception.injected); in kvm_inject_exception()
8449 if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) in kvm_inject_exception()
8450 vcpu->arch.exception.error_code = false; in kvm_inject_exception()
8461 if (vcpu->arch.exception.injected) { in inject_pending_event()
8469 * Trap-like exceptions, e.g. #DB, have higher priority than in inject_pending_event()
8472 * Fault-like exceptions, e.g. #GP and #PF, are the lowest in inject_pending_event()
8474 * execution, i.e. a pending fault-like exception means the in inject_pending_event()
8479 else if (!vcpu->arch.exception.pending) { in inject_pending_event()
8480 if (vcpu->arch.nmi_injected) { in inject_pending_event()
8483 } else if (vcpu->arch.interrupt.injected) { in inject_pending_event()
8489 WARN_ON_ONCE(vcpu->arch.exception.injected && in inject_pending_event()
8490 vcpu->arch.exception.pending); in inject_pending_event()
8494 * in order for caller to determine if it should require immediate-exit in inject_pending_event()
8499 r = kvm_x86_ops.nested_ops->check_events(vcpu); in inject_pending_event()
8505 if (vcpu->arch.exception.pending) { in inject_pending_event()
8507 * Fault-class exceptions, except #DBs, set RF=1 in the RFLAGS in inject_pending_event()
8508 * value pushed on the stack. Trap-like exception and all #DBs in inject_pending_event()
8509 * leave RF as-is (KVM follows Intel's behavior in this regard; in inject_pending_event()
8514 * fault-like. They do _not_ set RF, a la code breakpoints. in inject_pending_event()
8516 if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) in inject_pending_event()
8520 if (vcpu->arch.exception.nr == DB_VECTOR) { in inject_pending_event()
8522 if (vcpu->arch.dr7 & DR7_GD) { in inject_pending_event()
8523 vcpu->arch.dr7 &= ~DR7_GD; in inject_pending_event()
8530 vcpu->arch.exception.pending = false; in inject_pending_event()
8531 vcpu->arch.exception.injected = true; in inject_pending_event()
8538 * due to architectural conditions (e.g. IF=0) a window-open exit in inject_pending_event()
8539 * will re-request KVM_REQ_EVENT. Sometimes however an event is pending in inject_pending_event()
8545 * The kvm_x86_ops hooks communicate this by returning -EBUSY. in inject_pending_event()
8547 if (vcpu->arch.smi_pending) { in inject_pending_event()
8548 r = can_inject ? kvm_x86_ops.smi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8552 vcpu->arch.smi_pending = false; in inject_pending_event()
8553 ++vcpu->arch.smi_count; in inject_pending_event()
8560 if (vcpu->arch.nmi_pending) { in inject_pending_event()
8561 r = can_inject ? kvm_x86_ops.nmi_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8565 --vcpu->arch.nmi_pending; in inject_pending_event()
8566 vcpu->arch.nmi_injected = true; in inject_pending_event()
8571 if (vcpu->arch.nmi_pending) in inject_pending_event()
8576 r = can_inject ? kvm_x86_ops.interrupt_allowed(vcpu, true) : -EBUSY; in inject_pending_event()
8589 kvm_x86_ops.nested_ops->hv_timer_pending && in inject_pending_event()
8590 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in inject_pending_event()
8593 WARN_ON(vcpu->arch.exception.pending); in inject_pending_event()
8610 if (kvm_x86_ops.get_nmi_mask(vcpu) || vcpu->arch.nmi_injected) in process_nmi()
8613 vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0); in process_nmi()
8614 vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit); in process_nmi()
8621 flags |= seg->g << 23; in enter_smm_get_segment_flags()
8622 flags |= seg->db << 22; in enter_smm_get_segment_flags()
8623 flags |= seg->l << 21; in enter_smm_get_segment_flags()
8624 flags |= seg->avl << 20; in enter_smm_get_segment_flags()
8625 flags |= seg->present << 15; in enter_smm_get_segment_flags()
8626 flags |= seg->dpl << 13; in enter_smm_get_segment_flags()
8627 flags |= seg->s << 12; in enter_smm_get_segment_flags()
8628 flags |= seg->type << 8; in enter_smm_get_segment_flags()
8643 offset = 0x7f2c + (n - 3) * 12; in enter_smm_save_seg_32()
8645 put_smstate(u32, buf, offset + 8, seg.base); in enter_smm_save_seg_32()
8664 put_smstate(u64, buf, offset + 8, seg.base); in enter_smm_save_seg_64()
8690 put_smstate(u32, buf, 0x7f64, seg.base); in enter_smm_save_state_32()
8696 put_smstate(u32, buf, 0x7f80, seg.base); in enter_smm_save_state_32()
8715 put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase); in enter_smm_save_state_32()
8727 put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read(vcpu, i)); in enter_smm_save_state_64()
8741 put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase); in enter_smm_save_state_64()
8746 put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer); in enter_smm_save_state_64()
8752 put_smstate(u64, buf, 0x7e98, seg.base); in enter_smm_save_state_64()
8762 put_smstate(u64, buf, 0x7e78, seg.base); in enter_smm_save_state_64()
8780 trace_kvm_enter_smm(vcpu->vcpu_id, vcpu->arch.smbase, true); in enter_smm()
8790 * Give pre_enter_smm() a chance to make ISA-specific changes to the in enter_smm()
8792 * the SMM state-save area. in enter_smm()
8796 vcpu->arch.hflags |= HF_SMM_MASK; in enter_smm()
8797 kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)); in enter_smm()
8800 vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK; in enter_smm()
8807 cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG); in enter_smm()
8809 vcpu->arch.cr0 = cr0; in enter_smm()
8819 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; in enter_smm()
8820 cs.base = vcpu->arch.smbase; in enter_smm()
8823 ds.base = 0; in enter_smm()
8855 vcpu->arch.smi_pending = true; in process_smi()
8882 vcpu->arch.apicv_active = kvm_apicv_activated(vcpu->kvm); in kvm_vcpu_update_apicv()
8891 * In particular, kvm_request_apicv_update() expects kvm->srcu not to be
8893 * synchronize_srcu(&kvm->srcu).
8904 old = READ_ONCE(kvm->arch.apicv_inhibit_reasons); in kvm_request_apicv_update()
8913 old = cmpxchg(&kvm->arch.apicv_inhibit_reasons, expected, new); in kvm_request_apicv_update()
8941 bitmap_zero(vcpu->arch.ioapic_handled_vectors, 256); in vcpu_scan_ioapic()
8943 if (irqchip_split(vcpu->kvm)) in vcpu_scan_ioapic()
8944 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8946 if (vcpu->arch.apicv_active) in vcpu_scan_ioapic()
8948 if (ioapic_in_kernel(vcpu->kvm)) in vcpu_scan_ioapic()
8949 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); in vcpu_scan_ioapic()
8953 vcpu->arch.load_eoi_exitmap_pending = true; in vcpu_scan_ioapic()
8962 if (!kvm_apic_hw_enabled(vcpu->arch.apic)) in vcpu_load_eoi_exitmap()
8965 bitmap_or((ulong *)eoi_exit_bitmap, vcpu->arch.ioapic_handled_vectors, in vcpu_load_eoi_exitmap()
8966 vcpu_to_synic(vcpu)->vec_bitmap, 256); in vcpu_load_eoi_exitmap()
9003 smp_send_reschedule(vcpu->cpu); in __kvm_request_immediate_exit()
9024 if (unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) { in vcpu_enter_guest()
9034 kvm_gen_update_masterclock(vcpu->kvm); in vcpu_enter_guest()
9058 vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS; in vcpu_enter_guest()
9063 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in vcpu_enter_guest()
9064 vcpu->mmio_needed = 0; in vcpu_enter_guest()
9070 vcpu->arch.apf.halted = true; in vcpu_enter_guest()
9085 BUG_ON(vcpu->arch.pending_ioapic_eoi > 255); in vcpu_enter_guest()
9086 if (test_bit(vcpu->arch.pending_ioapic_eoi, in vcpu_enter_guest()
9087 vcpu->arch.ioapic_handled_vectors)) { in vcpu_enter_guest()
9088 vcpu->run->exit_reason = KVM_EXIT_IOAPIC_EOI; in vcpu_enter_guest()
9089 vcpu->run->eoi.vector = in vcpu_enter_guest()
9090 vcpu->arch.pending_ioapic_eoi; in vcpu_enter_guest()
9102 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9103 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_CRASH; in vcpu_enter_guest()
9108 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in vcpu_enter_guest()
9109 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; in vcpu_enter_guest()
9114 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in vcpu_enter_guest()
9115 vcpu->run->hyperv = vcpu->arch.hyperv.exit; in vcpu_enter_guest()
9122 * KVM_REQ_CLOCK_UPDATE, because Hyper-V SynIC timers in vcpu_enter_guest()
9123 * depend on the guest clock being up-to-date in vcpu_enter_guest()
9136 ++vcpu->stat.req_event; in vcpu_enter_guest()
9138 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { in vcpu_enter_guest()
9168 vcpu->mode = IN_GUEST_MODE; in vcpu_enter_guest()
9170 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in vcpu_enter_guest()
9173 * 1) We should set ->mode before checking ->requests. Please see in vcpu_enter_guest()
9176 * 2) For APICv, we should set ->mode before checking PID.ON. This in vcpu_enter_guest()
9190 if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active) in vcpu_enter_guest()
9194 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9198 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9214 if (unlikely(vcpu->arch.switch_db_regs)) { in vcpu_enter_guest()
9216 set_debugreg(vcpu->arch.eff_db[0], 0); in vcpu_enter_guest()
9217 set_debugreg(vcpu->arch.eff_db[1], 1); in vcpu_enter_guest()
9218 set_debugreg(vcpu->arch.eff_db[2], 2); in vcpu_enter_guest()
9219 set_debugreg(vcpu->arch.eff_db[3], 3); in vcpu_enter_guest()
9220 set_debugreg(vcpu->arch.dr6, 6); in vcpu_enter_guest()
9221 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9234 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { in vcpu_enter_guest()
9235 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); in vcpu_enter_guest()
9239 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; in vcpu_enter_guest()
9252 vcpu->arch.last_vmentry_cpu = vcpu->cpu; in vcpu_enter_guest()
9253 vcpu->arch.last_guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc()); in vcpu_enter_guest()
9255 vcpu->mode = OUTSIDE_GUEST_MODE; in vcpu_enter_guest()
9262 * VM-Exit on SVM and any ticks that occur between VM-Exit and now. in vcpu_enter_guest()
9269 ++vcpu->stat.exits; in vcpu_enter_guest()
9283 s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; in vcpu_enter_guest()
9285 trace_kvm_wait_lapic_expire(vcpu->vcpu_id, delta); in vcpu_enter_guest()
9286 vcpu->arch.apic->lapic_timer.advance_expire_delta = S64_MIN; in vcpu_enter_guest()
9293 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in vcpu_enter_guest()
9303 if (unlikely(vcpu->arch.tsc_always_catchup)) in vcpu_enter_guest()
9306 if (vcpu->arch.apic_attention) in vcpu_enter_guest()
9316 if (unlikely(vcpu->arch.apic_attention)) in vcpu_enter_guest()
9326 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_block()
9328 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_block()
9338 switch(vcpu->arch.mp_state) { in vcpu_block()
9340 vcpu->arch.pv.pv_unhalted = false; in vcpu_block()
9341 vcpu->arch.mp_state = in vcpu_block()
9345 vcpu->arch.apf.halted = false; in vcpu_block()
9350 return -EINTR; in vcpu_block()
9358 kvm_x86_ops.nested_ops->check_events(vcpu); in kvm_vcpu_running()
9360 return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE && in kvm_vcpu_running()
9361 !vcpu->arch.apf.halted); in kvm_vcpu_running()
9367 struct kvm *kvm = vcpu->kvm; in vcpu_run()
9369 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9370 vcpu->arch.l1tf_flush_l1d = true; in vcpu_run()
9379 vcpu->arch.at_instruction_boundary = false; in vcpu_run()
9396 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; in vcpu_run()
9397 ++vcpu->stat.request_irq_exits; in vcpu_run()
9402 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9406 vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); in vcpu_run()
9410 srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); in vcpu_run()
9419 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in complete_emulated_io()
9421 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); in complete_emulated_io()
9427 BUG_ON(!vcpu->arch.pio.count); in complete_emulated_pio()
9452 struct kvm_run *run = vcpu->run; in complete_emulated_mmio()
9456 BUG_ON(!vcpu->mmio_needed); in complete_emulated_mmio()
9459 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; in complete_emulated_mmio()
9460 len = min(8u, frag->len); in complete_emulated_mmio()
9461 if (!vcpu->mmio_is_write) in complete_emulated_mmio()
9462 memcpy(frag->data, run->mmio.data, len); in complete_emulated_mmio()
9464 if (frag->len <= 8) { in complete_emulated_mmio()
9467 vcpu->mmio_cur_fragment++; in complete_emulated_mmio()
9470 frag->data += len; in complete_emulated_mmio()
9471 frag->gpa += len; in complete_emulated_mmio()
9472 frag->len -= len; in complete_emulated_mmio()
9475 if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { in complete_emulated_mmio()
9476 vcpu->mmio_needed = 0; in complete_emulated_mmio()
9478 /* FIXME: return into emulator if single-stepping. */ in complete_emulated_mmio()
9479 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9481 vcpu->mmio_read_completed = 1; in complete_emulated_mmio()
9485 run->exit_reason = KVM_EXIT_MMIO; in complete_emulated_mmio()
9486 run->mmio.phys_addr = frag->gpa; in complete_emulated_mmio()
9487 if (vcpu->mmio_is_write) in complete_emulated_mmio()
9488 memcpy(run->mmio.data, frag->data, min(8u, frag->len)); in complete_emulated_mmio()
9489 run->mmio.len = min(8u, frag->len); in complete_emulated_mmio()
9490 run->mmio.is_write = vcpu->mmio_is_write; in complete_emulated_mmio()
9491 vcpu->arch.complete_userspace_io = complete_emulated_mmio; in complete_emulated_mmio()
9502 memcpy(&fpu->state, ¤t->thread.fpu.state, in kvm_save_current_fpu()
9513 kvm_save_current_fpu(vcpu->arch.user_fpu); in kvm_load_guest_fpu()
9516 __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu->state, in kvm_load_guest_fpu()
9530 kvm_save_current_fpu(vcpu->arch.guest_fpu); in kvm_put_guest_fpu()
9532 copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state); in kvm_put_guest_fpu()
9537 ++vcpu->stat.fpu_reload; in kvm_put_guest_fpu()
9543 struct kvm_run *kvm_run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
9550 if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) { in kvm_arch_vcpu_ioctl_run()
9551 if (kvm_run->immediate_exit) { in kvm_arch_vcpu_ioctl_run()
9552 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9558 r = -EAGAIN; in kvm_arch_vcpu_ioctl_run()
9560 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9561 kvm_run->exit_reason = KVM_EXIT_INTR; in kvm_arch_vcpu_ioctl_run()
9562 ++vcpu->stat.signal_exits; in kvm_arch_vcpu_ioctl_run()
9567 if (kvm_run->kvm_valid_regs & ~KVM_SYNC_X86_VALID_FIELDS) { in kvm_arch_vcpu_ioctl_run()
9568 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9572 if (kvm_run->kvm_dirty_regs) { in kvm_arch_vcpu_ioctl_run()
9578 /* re-sync apic's tpr */ in kvm_arch_vcpu_ioctl_run()
9580 if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) { in kvm_arch_vcpu_ioctl_run()
9581 r = -EINVAL; in kvm_arch_vcpu_ioctl_run()
9586 if (unlikely(vcpu->arch.complete_userspace_io)) { in kvm_arch_vcpu_ioctl_run()
9587 int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io; in kvm_arch_vcpu_ioctl_run()
9588 vcpu->arch.complete_userspace_io = NULL; in kvm_arch_vcpu_ioctl_run()
9593 WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed); in kvm_arch_vcpu_ioctl_run()
9595 if (kvm_run->immediate_exit) in kvm_arch_vcpu_ioctl_run()
9596 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
9602 if (kvm_run->kvm_valid_regs) in kvm_arch_vcpu_ioctl_run()
9613 if (vcpu->arch.emulate_regs_need_sync_to_vcpu) { in __get_regs()
9621 emulator_writeback_register_cache(vcpu->arch.emulate_ctxt); in __get_regs()
9622 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __get_regs()
9624 regs->rax = kvm_rax_read(vcpu); in __get_regs()
9625 regs->rbx = kvm_rbx_read(vcpu); in __get_regs()
9626 regs->rcx = kvm_rcx_read(vcpu); in __get_regs()
9627 regs->rdx = kvm_rdx_read(vcpu); in __get_regs()
9628 regs->rsi = kvm_rsi_read(vcpu); in __get_regs()
9629 regs->rdi = kvm_rdi_read(vcpu); in __get_regs()
9630 regs->rsp = kvm_rsp_read(vcpu); in __get_regs()
9631 regs->rbp = kvm_rbp_read(vcpu); in __get_regs()
9633 regs->r8 = kvm_r8_read(vcpu); in __get_regs()
9634 regs->r9 = kvm_r9_read(vcpu); in __get_regs()
9635 regs->r10 = kvm_r10_read(vcpu); in __get_regs()
9636 regs->r11 = kvm_r11_read(vcpu); in __get_regs()
9637 regs->r12 = kvm_r12_read(vcpu); in __get_regs()
9638 regs->r13 = kvm_r13_read(vcpu); in __get_regs()
9639 regs->r14 = kvm_r14_read(vcpu); in __get_regs()
9640 regs->r15 = kvm_r15_read(vcpu); in __get_regs()
9643 regs->rip = kvm_rip_read(vcpu); in __get_regs()
9644 regs->rflags = kvm_get_rflags(vcpu); in __get_regs()
9657 vcpu->arch.emulate_regs_need_sync_from_vcpu = true; in __set_regs()
9658 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; in __set_regs()
9660 kvm_rax_write(vcpu, regs->rax); in __set_regs()
9661 kvm_rbx_write(vcpu, regs->rbx); in __set_regs()
9662 kvm_rcx_write(vcpu, regs->rcx); in __set_regs()
9663 kvm_rdx_write(vcpu, regs->rdx); in __set_regs()
9664 kvm_rsi_write(vcpu, regs->rsi); in __set_regs()
9665 kvm_rdi_write(vcpu, regs->rdi); in __set_regs()
9666 kvm_rsp_write(vcpu, regs->rsp); in __set_regs()
9667 kvm_rbp_write(vcpu, regs->rbp); in __set_regs()
9669 kvm_r8_write(vcpu, regs->r8); in __set_regs()
9670 kvm_r9_write(vcpu, regs->r9); in __set_regs()
9671 kvm_r10_write(vcpu, regs->r10); in __set_regs()
9672 kvm_r11_write(vcpu, regs->r11); in __set_regs()
9673 kvm_r12_write(vcpu, regs->r12); in __set_regs()
9674 kvm_r13_write(vcpu, regs->r13); in __set_regs()
9675 kvm_r14_write(vcpu, regs->r14); in __set_regs()
9676 kvm_r15_write(vcpu, regs->r15); in __set_regs()
9679 kvm_rip_write(vcpu, regs->rip); in __set_regs()
9680 kvm_set_rflags(vcpu, regs->rflags | X86_EFLAGS_FIXED); in __set_regs()
9682 vcpu->arch.exception.pending = false; in __set_regs()
9709 kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __get_sregs()
9710 kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __get_sregs()
9711 kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __get_sregs()
9712 kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __get_sregs()
9713 kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __get_sregs()
9714 kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __get_sregs()
9716 kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __get_sregs()
9717 kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __get_sregs()
9720 sregs->idt.limit = dt.size; in __get_sregs()
9721 sregs->idt.base = dt.address; in __get_sregs()
9723 sregs->gdt.limit = dt.size; in __get_sregs()
9724 sregs->gdt.base = dt.address; in __get_sregs()
9726 sregs->cr0 = kvm_read_cr0(vcpu); in __get_sregs()
9727 sregs->cr2 = vcpu->arch.cr2; in __get_sregs()
9728 sregs->cr3 = kvm_read_cr3(vcpu); in __get_sregs()
9729 sregs->cr4 = kvm_read_cr4(vcpu); in __get_sregs()
9730 sregs->cr8 = kvm_get_cr8(vcpu); in __get_sregs()
9731 sregs->efer = vcpu->arch.efer; in __get_sregs()
9732 sregs->apic_base = kvm_get_apic_base(vcpu); in __get_sregs()
9734 memset(sregs->interrupt_bitmap, 0, sizeof(sregs->interrupt_bitmap)); in __get_sregs()
9736 if (vcpu->arch.interrupt.injected && !vcpu->arch.interrupt.soft) in __get_sregs()
9737 set_bit(vcpu->arch.interrupt.nr, in __get_sregs()
9738 (unsigned long *)sregs->interrupt_bitmap); in __get_sregs()
9758 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED && in kvm_arch_vcpu_ioctl_get_mpstate()
9759 vcpu->arch.pv.pv_unhalted) in kvm_arch_vcpu_ioctl_get_mpstate()
9760 mp_state->mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_ioctl_get_mpstate()
9762 mp_state->mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
9773 int ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
9778 mp_state->mp_state != KVM_MP_STATE_RUNNABLE) in kvm_arch_vcpu_ioctl_set_mpstate()
9786 if ((kvm_vcpu_latch_init(vcpu) || vcpu->arch.smi_pending) && in kvm_arch_vcpu_ioctl_set_mpstate()
9787 (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED || in kvm_arch_vcpu_ioctl_set_mpstate()
9788 mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED)) in kvm_arch_vcpu_ioctl_set_mpstate()
9791 if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { in kvm_arch_vcpu_ioctl_set_mpstate()
9792 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; in kvm_arch_vcpu_ioctl_set_mpstate()
9793 set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events); in kvm_arch_vcpu_ioctl_set_mpstate()
9795 vcpu->arch.mp_state = mp_state->mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
9807 struct x86_emulate_ctxt *ctxt = vcpu->arch.emulate_ctxt; in kvm_task_switch()
9815 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_task_switch()
9816 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_task_switch()
9817 vcpu->run->internal.ndata = 0; in kvm_task_switch()
9821 kvm_rip_write(vcpu, ctxt->eip); in kvm_task_switch()
9822 kvm_set_rflags(vcpu, ctxt->eflags); in kvm_task_switch()
9829 if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) { in kvm_valid_sregs()
9832 * 64-bit mode (though maybe in a 32-bit code segment). in kvm_valid_sregs()
9835 if (!(sregs->cr4 & X86_CR4_PAE) in kvm_valid_sregs()
9836 || !(sregs->efer & EFER_LMA)) in kvm_valid_sregs()
9837 return -EINVAL; in kvm_valid_sregs()
9838 if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits) in kvm_valid_sregs()
9839 return -EINVAL; in kvm_valid_sregs()
9842 * Not in 64-bit mode: EFER.LMA is clear and the code in kvm_valid_sregs()
9843 * segment cannot be 64-bit. in kvm_valid_sregs()
9845 if (sregs->efer & EFER_LMA || sregs->cs.l) in kvm_valid_sregs()
9846 return -EINVAL; in kvm_valid_sregs()
9849 return kvm_valid_cr4(vcpu, sregs->cr4); in kvm_valid_sregs()
9859 int ret = -EINVAL; in __set_sregs()
9864 apic_base_msr.data = sregs->apic_base; in __set_sregs()
9869 dt.size = sregs->idt.limit; in __set_sregs()
9870 dt.address = sregs->idt.base; in __set_sregs()
9872 dt.size = sregs->gdt.limit; in __set_sregs()
9873 dt.address = sregs->gdt.base; in __set_sregs()
9876 vcpu->arch.cr2 = sregs->cr2; in __set_sregs()
9877 mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; in __set_sregs()
9878 vcpu->arch.cr3 = sregs->cr3; in __set_sregs()
9881 kvm_set_cr8(vcpu, sregs->cr8); in __set_sregs()
9883 mmu_reset_needed |= vcpu->arch.efer != sregs->efer; in __set_sregs()
9884 kvm_x86_ops.set_efer(vcpu, sregs->efer); in __set_sregs()
9886 mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0; in __set_sregs()
9887 kvm_x86_ops.set_cr0(vcpu, sregs->cr0); in __set_sregs()
9888 vcpu->arch.cr0 = sregs->cr0; in __set_sregs()
9890 mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; in __set_sregs()
9891 cpuid_update_needed |= ((kvm_read_cr4(vcpu) ^ sregs->cr4) & in __set_sregs()
9893 kvm_x86_ops.set_cr4(vcpu, sregs->cr4); in __set_sregs()
9897 idx = srcu_read_lock(&vcpu->kvm->srcu); in __set_sregs()
9899 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)); in __set_sregs()
9902 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __set_sregs()
9909 (const unsigned long *)sregs->interrupt_bitmap, max_bits); in __set_sregs()
9915 kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS); in __set_sregs()
9916 kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS); in __set_sregs()
9917 kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES); in __set_sregs()
9918 kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS); in __set_sregs()
9919 kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS); in __set_sregs()
9920 kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS); in __set_sregs()
9922 kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR); in __set_sregs()
9923 kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR); in __set_sregs()
9929 sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 && in __set_sregs()
9931 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in __set_sregs()
9959 if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9960 r = -EBUSY; in kvm_arch_vcpu_ioctl_set_guest_debug()
9961 if (vcpu->arch.exception.pending) in kvm_arch_vcpu_ioctl_set_guest_debug()
9963 if (dbg->control & KVM_GUESTDBG_INJECT_DB) in kvm_arch_vcpu_ioctl_set_guest_debug()
9975 vcpu->guest_debug = dbg->control; in kvm_arch_vcpu_ioctl_set_guest_debug()
9976 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) in kvm_arch_vcpu_ioctl_set_guest_debug()
9977 vcpu->guest_debug = 0; in kvm_arch_vcpu_ioctl_set_guest_debug()
9979 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in kvm_arch_vcpu_ioctl_set_guest_debug()
9981 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9982 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9985 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; in kvm_arch_vcpu_ioctl_set_guest_debug()
9989 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_arch_vcpu_ioctl_set_guest_debug()
9990 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + in kvm_arch_vcpu_ioctl_set_guest_debug()
10014 unsigned long vaddr = tr->linear_address; in kvm_arch_vcpu_ioctl_translate()
10020 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_ioctl_translate()
10022 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_ioctl_translate()
10023 tr->physical_address = gpa; in kvm_arch_vcpu_ioctl_translate()
10024 tr->valid = gpa != UNMAPPED_GVA; in kvm_arch_vcpu_ioctl_translate()
10025 tr->writeable = 1; in kvm_arch_vcpu_ioctl_translate()
10026 tr->usermode = 0; in kvm_arch_vcpu_ioctl_translate()
10038 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_get_fpu()
10039 memcpy(fpu->fpr, fxsave->st_space, 128); in kvm_arch_vcpu_ioctl_get_fpu()
10040 fpu->fcw = fxsave->cwd; in kvm_arch_vcpu_ioctl_get_fpu()
10041 fpu->fsw = fxsave->swd; in kvm_arch_vcpu_ioctl_get_fpu()
10042 fpu->ftwx = fxsave->twd; in kvm_arch_vcpu_ioctl_get_fpu()
10043 fpu->last_opcode = fxsave->fop; in kvm_arch_vcpu_ioctl_get_fpu()
10044 fpu->last_ip = fxsave->rip; in kvm_arch_vcpu_ioctl_get_fpu()
10045 fpu->last_dp = fxsave->rdp; in kvm_arch_vcpu_ioctl_get_fpu()
10046 memcpy(fpu->xmm, fxsave->xmm_space, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_get_fpu()
10058 fxsave = &vcpu->arch.guest_fpu->state.fxsave; in kvm_arch_vcpu_ioctl_set_fpu()
10060 memcpy(fxsave->st_space, fpu->fpr, 128); in kvm_arch_vcpu_ioctl_set_fpu()
10061 fxsave->cwd = fpu->fcw; in kvm_arch_vcpu_ioctl_set_fpu()
10062 fxsave->swd = fpu->fsw; in kvm_arch_vcpu_ioctl_set_fpu()
10063 fxsave->twd = fpu->ftwx; in kvm_arch_vcpu_ioctl_set_fpu()
10064 fxsave->fop = fpu->last_opcode; in kvm_arch_vcpu_ioctl_set_fpu()
10065 fxsave->rip = fpu->last_ip; in kvm_arch_vcpu_ioctl_set_fpu()
10066 fxsave->rdp = fpu->last_dp; in kvm_arch_vcpu_ioctl_set_fpu()
10067 memcpy(fxsave->xmm_space, fpu->xmm, sizeof(fxsave->xmm_space)); in kvm_arch_vcpu_ioctl_set_fpu()
10077 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_REGS) in store_regs()
10078 __get_regs(vcpu, &vcpu->run->s.regs.regs); in store_regs()
10080 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_SREGS) in store_regs()
10081 __get_sregs(vcpu, &vcpu->run->s.regs.sregs); in store_regs()
10083 if (vcpu->run->kvm_valid_regs & KVM_SYNC_X86_EVENTS) in store_regs()
10085 vcpu, &vcpu->run->s.regs.events); in store_regs()
10090 if (vcpu->run->kvm_dirty_regs & ~KVM_SYNC_X86_VALID_FIELDS) in sync_regs()
10091 return -EINVAL; in sync_regs()
10093 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_REGS) { in sync_regs()
10094 __set_regs(vcpu, &vcpu->run->s.regs.regs); in sync_regs()
10095 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_REGS; in sync_regs()
10097 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_SREGS) { in sync_regs()
10098 if (__set_sregs(vcpu, &vcpu->run->s.regs.sregs)) in sync_regs()
10099 return -EINVAL; in sync_regs()
10100 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_SREGS; in sync_regs()
10102 if (vcpu->run->kvm_dirty_regs & KVM_SYNC_X86_EVENTS) { in sync_regs()
10104 vcpu, &vcpu->run->s.regs.events)) in sync_regs()
10105 return -EINVAL; in sync_regs()
10106 vcpu->run->kvm_dirty_regs &= ~KVM_SYNC_X86_EVENTS; in sync_regs()
10114 fpstate_init(&vcpu->arch.guest_fpu->state); in fx_init()
10116 vcpu->arch.guest_fpu->state.xsave.header.xcomp_bv = in fx_init()
10122 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in fx_init()
10124 vcpu->arch.cr0 |= X86_CR0_ET; in fx_init()
10129 if (kvm_check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0) in kvm_arch_vcpu_precreate()
10141 if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu)) in kvm_arch_vcpu_create()
10142 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_create()
10144 vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED; in kvm_arch_vcpu_create()
10152 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_arch_vcpu_create()
10156 if (kvm_apicv_activated(vcpu->kvm)) in kvm_arch_vcpu_create()
10157 vcpu->arch.apicv_active = true; in kvm_arch_vcpu_create()
10161 r = -ENOMEM; in kvm_arch_vcpu_create()
10166 vcpu->arch.pio_data = page_address(page); in kvm_arch_vcpu_create()
10168 vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4, in kvm_arch_vcpu_create()
10170 if (!vcpu->arch.mce_banks) in kvm_arch_vcpu_create()
10172 vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS; in kvm_arch_vcpu_create()
10174 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, in kvm_arch_vcpu_create()
10181 vcpu->arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10183 if (!vcpu->arch.user_fpu) { in kvm_arch_vcpu_create()
10188 vcpu->arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in kvm_arch_vcpu_create()
10190 if (!vcpu->arch.guest_fpu) { in kvm_arch_vcpu_create()
10196 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); in kvm_arch_vcpu_create()
10197 vcpu->arch.cr3_lm_rsvd_bits = rsvd_bits(cpuid_maxphyaddr(vcpu), 63); in kvm_arch_vcpu_create()
10199 vcpu->arch.pat = MSR_IA32_CR_PAT_DEFAULT; in kvm_arch_vcpu_create()
10204 vcpu->arch.pending_external_vector = -1; in kvm_arch_vcpu_create()
10205 vcpu->arch.preempted_in_kernel = false; in kvm_arch_vcpu_create()
10213 vcpu->arch.arch_capabilities = kvm_get_arch_capabilities(); in kvm_arch_vcpu_create()
10214 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT; in kvm_arch_vcpu_create()
10223 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_create()
10225 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_create()
10227 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_create()
10229 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_create()
10231 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_create()
10233 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_create()
10243 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_postcreate()
10247 if (mutex_lock_killable(&vcpu->mutex)) in kvm_arch_vcpu_postcreate()
10254 vcpu->arch.msr_kvm_poll_control = 1; in kvm_arch_vcpu_postcreate()
10256 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_postcreate()
10258 if (kvmclock_periodic_sync && vcpu->vcpu_idx == 0) in kvm_arch_vcpu_postcreate()
10259 schedule_delayed_work(&kvm->arch.kvmclock_sync_work, in kvm_arch_vcpu_postcreate()
10265 struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache; in kvm_arch_vcpu_destroy()
10268 kvm_release_pfn(cache->pfn, cache->dirty, cache); in kvm_arch_vcpu_destroy()
10274 kmem_cache_free(x86_emulator_cache, vcpu->arch.emulate_ctxt); in kvm_arch_vcpu_destroy()
10275 free_cpumask_var(vcpu->arch.wbinvd_dirty_mask); in kvm_arch_vcpu_destroy()
10276 kmem_cache_free(x86_fpu_cache, vcpu->arch.user_fpu); in kvm_arch_vcpu_destroy()
10277 kmem_cache_free(x86_fpu_cache, vcpu->arch.guest_fpu); in kvm_arch_vcpu_destroy()
10281 kfree(vcpu->arch.mce_banks); in kvm_arch_vcpu_destroy()
10283 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_arch_vcpu_destroy()
10285 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_arch_vcpu_destroy()
10286 free_page((unsigned long)vcpu->arch.pio_data); in kvm_arch_vcpu_destroy()
10287 kvfree(vcpu->arch.cpuid_entries); in kvm_arch_vcpu_destroy()
10296 vcpu->arch.hflags = 0; in kvm_vcpu_reset()
10298 vcpu->arch.smi_pending = 0; in kvm_vcpu_reset()
10299 vcpu->arch.smi_count = 0; in kvm_vcpu_reset()
10300 atomic_set(&vcpu->arch.nmi_queued, 0); in kvm_vcpu_reset()
10301 vcpu->arch.nmi_pending = 0; in kvm_vcpu_reset()
10302 vcpu->arch.nmi_injected = false; in kvm_vcpu_reset()
10306 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); in kvm_vcpu_reset()
10308 vcpu->arch.dr6 = DR6_INIT; in kvm_vcpu_reset()
10309 vcpu->arch.dr7 = DR7_FIXED_1; in kvm_vcpu_reset()
10312 vcpu->arch.cr2 = 0; in kvm_vcpu_reset()
10315 vcpu->arch.apf.msr_en_val = 0; in kvm_vcpu_reset()
10316 vcpu->arch.apf.msr_int_val = 0; in kvm_vcpu_reset()
10317 vcpu->arch.st.msr_val = 0; in kvm_vcpu_reset()
10323 vcpu->arch.apf.halted = false; in kvm_vcpu_reset()
10334 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10338 mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave, in kvm_vcpu_reset()
10348 vcpu->arch.smbase = 0x30000; in kvm_vcpu_reset()
10350 vcpu->arch.msr_misc_features_enables = 0; in kvm_vcpu_reset()
10352 vcpu->arch.xcr0 = XFEATURE_MASK_FP; in kvm_vcpu_reset()
10355 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); in kvm_vcpu_reset()
10356 vcpu->arch.regs_avail = ~0; in kvm_vcpu_reset()
10357 vcpu->arch.regs_dirty = ~0; in kvm_vcpu_reset()
10359 vcpu->arch.ia32_xss = 0; in kvm_vcpu_reset()
10370 cs.base = vector << 12; in kvm_vcpu_deliver_sipi_vector()
10394 if (!stable && vcpu->cpu == smp_processor_id()) in kvm_arch_hardware_enable()
10396 if (stable && vcpu->arch.last_host_tsc > local_tsc) { in kvm_arch_hardware_enable()
10398 if (vcpu->arch.last_host_tsc > max_tsc) in kvm_arch_hardware_enable()
10399 max_tsc = vcpu->arch.last_host_tsc; in kvm_arch_hardware_enable()
10429 * N.B. - this code below runs only on platforms with reliable TSC, in kvm_arch_hardware_enable()
10443 u64 delta_cyc = max_tsc - local_tsc; in kvm_arch_hardware_enable()
10445 kvm->arch.backwards_tsc_observed = true; in kvm_arch_hardware_enable()
10447 vcpu->arch.tsc_offset_adjustment += delta_cyc; in kvm_arch_hardware_enable()
10448 vcpu->arch.last_host_tsc = local_tsc; in kvm_arch_hardware_enable()
10458 kvm->arch.last_tsc_nsec = 0; in kvm_arch_hardware_enable()
10459 kvm->arch.last_tsc_write = 0; in kvm_arch_hardware_enable()
10482 r = ops->hardware_setup(); in kvm_arch_hardware_setup()
10486 memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); in kvm_arch_hardware_setup()
10527 return -EIO; in kvm_arch_check_processor_compat()
10529 return ops->check_processor_compatibility(); in kvm_arch_check_processor_compat()
10534 return vcpu->kvm->arch.bsp_vcpu_id == vcpu->vcpu_id; in kvm_vcpu_is_reset_bsp()
10540 return (vcpu->arch.apic_base & MSR_IA32_APICBASE_BSP) != 0; in kvm_vcpu_is_bsp()
10550 vcpu->arch.l1tf_flush_l1d = true; in kvm_arch_sched_in()
10551 if (pmu->version && unlikely(pmu->event_count)) { in kvm_arch_sched_in()
10552 pmu->need_cleanup = true; in kvm_arch_sched_in()
10560 kfree(kvm->arch.hyperv.hv_pa_pg); in kvm_arch_free_vm()
10570 return -EINVAL; in kvm_arch_init_vm()
10576 INIT_HLIST_HEAD(&kvm->arch.mask_notifier_list); in kvm_arch_init_vm()
10577 INIT_LIST_HEAD(&kvm->arch.active_mmu_pages); in kvm_arch_init_vm()
10578 INIT_LIST_HEAD(&kvm->arch.zapped_obsolete_pages); in kvm_arch_init_vm()
10579 INIT_LIST_HEAD(&kvm->arch.lpage_disallowed_mmu_pages); in kvm_arch_init_vm()
10580 INIT_LIST_HEAD(&kvm->arch.assigned_dev_head); in kvm_arch_init_vm()
10581 atomic_set(&kvm->arch.noncoherent_dma_count, 0); in kvm_arch_init_vm()
10584 set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10585 /* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */ in kvm_arch_init_vm()
10587 &kvm->arch.irq_sources_bitmap); in kvm_arch_init_vm()
10589 raw_spin_lock_init(&kvm->arch.tsc_write_lock); in kvm_arch_init_vm()
10590 mutex_init(&kvm->arch.apic_map_lock); in kvm_arch_init_vm()
10591 spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock); in kvm_arch_init_vm()
10593 kvm->arch.kvmclock_offset = -get_kvmclock_base_ns(); in kvm_arch_init_vm()
10596 kvm->arch.guest_can_read_msr_platform_info = true; in kvm_arch_init_vm()
10598 INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn); in kvm_arch_init_vm()
10599 INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn); in kvm_arch_init_vm()
10634 mutex_lock(&kvm->lock); in kvm_free_vcpus()
10635 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) in kvm_free_vcpus()
10636 kvm->vcpus[i] = NULL; in kvm_free_vcpus()
10638 atomic_set(&kvm->online_vcpus, 0); in kvm_free_vcpus()
10639 mutex_unlock(&kvm->lock); in kvm_free_vcpus()
10644 cancel_delayed_work_sync(&kvm->arch.kvmclock_sync_work); in kvm_arch_sync_events()
10645 cancel_delayed_work_sync(&kvm->arch.kvmclock_update_work); in kvm_arch_sync_events()
10656 /* Called with kvm->slots_lock held. */ in __x86_set_memory_region()
10658 return -EINVAL; in __x86_set_memory_region()
10662 if (slot && slot->npages) in __x86_set_memory_region()
10663 return -EEXIST; in __x86_set_memory_region()
10674 if (!slot || !slot->npages) in __x86_set_memory_region()
10677 old_npages = slot->npages; in __x86_set_memory_region()
10708 if (current->mm == kvm->mm) { in kvm_arch_destroy_vm()
10714 mutex_lock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10720 mutex_unlock(&kvm->slots_lock); in kvm_arch_destroy_vm()
10724 kvm_free_msr_filter(srcu_dereference_check(kvm->arch.msr_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10728 kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); in kvm_arch_destroy_vm()
10729 kfree(srcu_dereference_check(kvm->arch.pmu_event_filter, &kvm->srcu, 1)); in kvm_arch_destroy_vm()
10740 kvfree(slot->arch.rmap[i]); in kvm_arch_free_memslot()
10741 slot->arch.rmap[i] = NULL; in kvm_arch_free_memslot()
10746 kvfree(slot->arch.lpage_info[i - 1]); in kvm_arch_free_memslot()
10747 slot->arch.lpage_info[i - 1] = NULL; in kvm_arch_free_memslot()
10763 memset(&slot->arch, 0, sizeof(slot->arch)); in kvm_alloc_memslot_metadata()
10771 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10772 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10774 slot->arch.rmap[i] = in kvm_alloc_memslot_metadata()
10775 kvcalloc(lpages, sizeof(*slot->arch.rmap[i]), in kvm_alloc_memslot_metadata()
10777 if (!slot->arch.rmap[i]) in kvm_alloc_memslot_metadata()
10786 slot->arch.lpage_info[i - 1] = linfo; in kvm_alloc_memslot_metadata()
10788 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10790 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10791 linfo[lpages - 1].disallow_lpage = 1; in kvm_alloc_memslot_metadata()
10792 ugfn = slot->userspace_addr >> PAGE_SHIFT; in kvm_alloc_memslot_metadata()
10797 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
10812 kvfree(slot->arch.rmap[i]); in kvm_alloc_memslot_metadata()
10813 slot->arch.rmap[i] = NULL; in kvm_alloc_memslot_metadata()
10817 kvfree(slot->arch.lpage_info[i - 1]); in kvm_alloc_memslot_metadata()
10818 slot->arch.lpage_info[i - 1] = NULL; in kvm_alloc_memslot_metadata()
10820 return -ENOMEM; in kvm_alloc_memslot_metadata()
10829 * memslots->generation has been incremented. in kvm_arch_memslots_updated()
10834 /* Force re-initialization of steal_time cache */ in kvm_arch_memslots_updated()
10846 mem->memory_size >> PAGE_SHIFT); in kvm_arch_prepare_memory_region()
10859 if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY)) in kvm_mmu_slot_apply_flags()
10871 * which can be collapsed into a single large-page spte. Later in kvm_mmu_slot_apply_flags()
10872 * page faults will create the large-page sptes. in kvm_mmu_slot_apply_flags()
10879 if ((old->flags & KVM_MEM_LOG_DIRTY_PAGES) && in kvm_mmu_slot_apply_flags()
10880 !(new->flags & KVM_MEM_LOG_DIRTY_PAGES)) in kvm_mmu_slot_apply_flags()
10896 * When disabling dirty logging with PML enabled, the D-bit is set in kvm_mmu_slot_apply_flags()
10903 * When enabling dirty logging, large sptes are write-protected in kvm_mmu_slot_apply_flags()
10908 * initial-all-set state. Otherwise, depending on whether pml in kvm_mmu_slot_apply_flags()
10909 * is enabled the D-bit or the W-bit will be cleared. in kvm_mmu_slot_apply_flags()
10911 if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { in kvm_mmu_slot_apply_flags()
10920 * If we're with initial-all-set, we don't need in kvm_mmu_slot_apply_flags()
10923 * we still need to write-protect huge pages in kvm_mmu_slot_apply_flags()
10941 if (!kvm->arch.n_requested_mmu_pages) in kvm_arch_commit_memory_region()
10946 * FIXME: const-ify all uses of struct kvm_memory_slot. in kvm_arch_commit_memory_region()
10975 if (!list_empty_careful(&vcpu->async_pf.done)) in kvm_vcpu_has_events()
10981 if (vcpu->arch.pv.pv_unhalted) in kvm_vcpu_has_events()
10984 if (vcpu->arch.exception.pending) in kvm_vcpu_has_events()
10988 (vcpu->arch.nmi_pending && in kvm_vcpu_has_events()
10993 (vcpu->arch.smi_pending && in kvm_vcpu_has_events()
11006 kvm_x86_ops.nested_ops->hv_timer_pending && in kvm_vcpu_has_events()
11007 kvm_x86_ops.nested_ops->hv_timer_pending(vcpu)) in kvm_vcpu_has_events()
11020 if (READ_ONCE(vcpu->arch.pv.pv_unhalted)) in kvm_arch_dy_runnable()
11028 if (vcpu->arch.apicv_active && kvm_x86_ops.dy_apicv_has_pending_interrupt(vcpu)) in kvm_arch_dy_runnable()
11036 return vcpu->arch.preempted_in_kernel; in kvm_arch_vcpu_in_kernel()
11069 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in kvm_get_rflags()
11077 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && in __kvm_set_rflags()
11078 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) in __kvm_set_rflags()
11094 if ((vcpu->arch.mmu->direct_map != work->arch.direct_map) || in kvm_arch_async_page_ready()
11095 work->wakeup_all) in kvm_arch_async_page_ready()
11102 if (!vcpu->arch.mmu->direct_map && in kvm_arch_async_page_ready()
11103 work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu)) in kvm_arch_async_page_ready()
11106 kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true); in kvm_arch_async_page_ready()
11118 return (key + 1) & (ASYNC_PF_PER_VCPU - 1); in kvm_async_pf_next_probe()
11125 while (vcpu->arch.apf.gfns[key] != ~0) in kvm_add_async_pf_gfn()
11128 vcpu->arch.apf.gfns[key] = gfn; in kvm_add_async_pf_gfn()
11137 (vcpu->arch.apf.gfns[key] != gfn && in kvm_async_pf_gfn_slot()
11138 vcpu->arch.apf.gfns[key] != ~0); i++) in kvm_async_pf_gfn_slot()
11146 return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn; in kvm_find_async_pf_gfn()
11155 if (WARN_ON_ONCE(vcpu->arch.apf.gfns[i] != gfn)) in kvm_del_async_pf_gfn()
11159 vcpu->arch.apf.gfns[i] = ~0; in kvm_del_async_pf_gfn()
11162 if (vcpu->arch.apf.gfns[j] == ~0) in kvm_del_async_pf_gfn()
11164 k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]); in kvm_del_async_pf_gfn()
11171 vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j]; in kvm_del_async_pf_gfn()
11180 return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &reason, in apf_put_user_notpresent()
11188 return kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_put_user_ready()
11197 if (kvm_read_guest_offset_cached(vcpu->kvm, &vcpu->arch.apf.data, in apf_pageready_slot_free()
11206 if (!vcpu->arch.apf.delivery_as_pf_vmexit && is_guest_mode(vcpu)) in kvm_can_deliver_async_pf()
11210 (vcpu->arch.apf.send_user_only && kvm_x86_ops.get_cpl(vcpu) == 0)) in kvm_can_deliver_async_pf()
11220 vcpu->arch.exception.pending)) in kvm_can_do_async_pf()
11223 if (kvm_hlt_in_guest(vcpu->kvm) && !kvm_can_deliver_async_pf(vcpu)) in kvm_can_do_async_pf()
11238 trace_kvm_async_pf_not_present(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_not_present()
11239 kvm_add_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_not_present()
11247 fault.address = work->arch.token; in kvm_arch_async_page_not_present()
11270 .vector = vcpu->arch.apf.vec in kvm_arch_async_page_present()
11273 if (work->wakeup_all) in kvm_arch_async_page_present()
11274 work->arch.token = ~0; /* broadcast wakeup */ in kvm_arch_async_page_present()
11276 kvm_del_async_pf_gfn(vcpu, work->arch.gfn); in kvm_arch_async_page_present()
11277 trace_kvm_async_pf_ready(work->arch.token, work->cr2_or_gpa); in kvm_arch_async_page_present()
11279 if ((work->wakeup_all || work->notpresent_injected) && in kvm_arch_async_page_present()
11281 !apf_put_user_ready(vcpu, work->arch.token)) { in kvm_arch_async_page_present()
11282 vcpu->arch.apf.pageready_pending = true; in kvm_arch_async_page_present()
11286 vcpu->arch.apf.halted = false; in kvm_arch_async_page_present()
11287 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_arch_async_page_present()
11293 if (!vcpu->arch.apf.pageready_pending) in kvm_arch_async_page_present_queued()
11307 atomic_inc(&kvm->arch.assigned_device_count); in kvm_arch_start_assignment()
11313 atomic_dec(&kvm->arch.assigned_device_count); in kvm_arch_end_assignment()
11319 return arch_atomic_read(&kvm->arch.assigned_device_count); in kvm_arch_has_assigned_device()
11325 atomic_inc(&kvm->arch.noncoherent_dma_count); in kvm_arch_register_noncoherent_dma()
11331 atomic_dec(&kvm->arch.noncoherent_dma_count); in kvm_arch_unregister_noncoherent_dma()
11337 return atomic_read(&kvm->arch.noncoherent_dma_count); in kvm_arch_has_noncoherent_dma()
11353 irqfd->producer = prod; in kvm_arch_irq_bypass_add_producer()
11354 kvm_arch_start_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11355 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, in kvm_arch_irq_bypass_add_producer()
11356 prod->irq, irqfd->gsi, 1); in kvm_arch_irq_bypass_add_producer()
11359 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_add_producer()
11371 WARN_ON(irqfd->producer != prod); in kvm_arch_irq_bypass_del_producer()
11372 irqfd->producer = NULL; in kvm_arch_irq_bypass_del_producer()
11376 * remapped mode, so we can re-use the current implementation in kvm_arch_irq_bypass_del_producer()
11380 ret = kvm_x86_ops.update_pi_irte(irqfd->kvm, prod->irq, irqfd->gsi, 0); in kvm_arch_irq_bypass_del_producer()
11383 " fails: %d\n", irqfd->consumer.token, ret); in kvm_arch_irq_bypass_del_producer()
11385 kvm_arch_end_assignment(irqfd->kvm); in kvm_arch_irq_bypass_del_producer()
11401 return (vcpu->arch.msr_kvm_poll_control & 1) == 0; in kvm_arch_no_poll()
11439 vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) { in kvm_fixup_and_inject_pf_error()
11441 * If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page in kvm_fixup_and_inject_pf_error()
11451 vcpu->arch.walk_mmu->inject_page_fault(vcpu, &fault); in kvm_fixup_and_inject_pf_error()
11472 * doesn't seem to be a real use-case behind such requests, just return in kvm_handle_memory_failure()
11475 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvm_handle_memory_failure()
11476 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvm_handle_memory_failure()
11477 vcpu->run->internal.ndata = 0; in kvm_handle_memory_failure()
11528 if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd) in kvm_handle_invpcid()
11532 kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free); in kvm_handle_invpcid()
11544 * page tables, so a non-global flush just degenerates to a in kvm_handle_invpcid()