Lines Matching +full:ecx +full:- +full:1000
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
5 * This module enables machines with Intel VT-x extensions to run virtual
30 #include <linux/entry-kvm.h>
48 #include <asm/spec-ctrl.h>
125 /* Guest_tsc -> host_tsc conversion requires 64-bit division. */
175 * These 2 parameters are used to config the controls for Pause-Loop Exiting:
191 /* Default doubles per-vcpu window every exit. */
195 /* Default resets per-vcpu window every exit to ple_window. */
203 /* Default is SYSTEM mode, 1 for host-guest mode */
285 return -ENOMEM; in vmx_setup_l1d_flush()
324 return -EINVAL; in vmentry_l1d_flush_parse()
380 if (!vmx->disable_fb_clear) in vmx_disable_fb_clear()
387 vmx->msr_ia32_mcu_opt_ctrl = msr; in vmx_disable_fb_clear()
392 if (!vmx->disable_fb_clear) in vmx_enable_fb_clear()
395 vmx->msr_ia32_mcu_opt_ctrl &= ~FB_CLEAR_DIS; in vmx_enable_fb_clear()
396 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, vmx->msr_ia32_mcu_opt_ctrl); in vmx_enable_fb_clear()
401 vmx->disable_fb_clear = vmx_fb_clear_ctrl_available; in vmx_update_fb_clear_dis()
408 if ((vcpu->arch.arch_capabilities & ARCH_CAP_FB_CLEAR) || in vmx_update_fb_clear_dis()
409 ((vcpu->arch.arch_capabilities & ARCH_CAP_MDS_NO) && in vmx_update_fb_clear_dis()
410 (vcpu->arch.arch_capabilities & ARCH_CAP_TAA_NO) && in vmx_update_fb_clear_dis()
411 (vcpu->arch.arch_capabilities & ARCH_CAP_PSDP_NO) && in vmx_update_fb_clear_dis()
412 (vcpu->arch.arch_capabilities & ARCH_CAP_FBSDP_NO) && in vmx_update_fb_clear_dis()
413 (vcpu->arch.arch_capabilities & ARCH_CAP_SBDR_SSDP_NO))) in vmx_update_fb_clear_dis()
414 vmx->disable_fb_clear = false; in vmx_update_fb_clear_dis()
474 * We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
511 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear()
517 * Though SYSCALL is only supported in 64-bit mode on Intel CPUs, kvm
519 * CPUID.0:{EBX,ECX,EDX} is "AuthenticAMD" or "AMDisbetter!" To
544 tmp_eptp = to_vmx(vcpu)->ept_pointer; in check_ept_pointer_match()
545 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { in check_ept_pointer_match()
546 to_kvm_vmx(kvm)->ept_pointers_match in check_ept_pointer_match()
552 to_kvm_vmx(kvm)->ept_pointers_match = EPT_POINTERS_MATCH; in check_ept_pointer_match()
560 return hyperv_fill_flush_guest_mapping_list(flush, range->start_gfn, in kvm_fill_hv_flush_list_func()
561 range->pages); in kvm_fill_hv_flush_list_func()
567 u64 ept_pointer = to_vmx(vcpu)->ept_pointer; in __hv_remote_flush_tlb_with_range()
587 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); in hv_remote_flush_tlb_with_range()
589 if (to_kvm_vmx(kvm)->ept_pointers_match == EPT_POINTERS_CHECK) in hv_remote_flush_tlb_with_range()
592 if (to_kvm_vmx(kvm)->ept_pointers_match != EPT_POINTERS_MATCH) { in hv_remote_flush_tlb_with_range()
595 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) in hv_remote_flush_tlb_with_range()
604 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); in hv_remote_flush_tlb_with_range()
616 &vcpu->kvm->arch.hyperv.hv_pa_pg; in hv_enable_direct_tlbflush()
618 * Synthetic VM-Exit is not enabled in current code and so All in hv_enable_direct_tlbflush()
625 return -ENOMEM; in hv_enable_direct_tlbflush()
627 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; in hv_enable_direct_tlbflush()
629 evmcs->partition_assist_page = in hv_enable_direct_tlbflush()
631 evmcs->hv_vm_id = (unsigned long)vcpu->kvm; in hv_enable_direct_tlbflush()
632 evmcs->hv_enlightenments_control.nested_flush_hypercall = 1; in hv_enable_direct_tlbflush()
640 * Comment's format: document - errata name - stepping - processor name.
645 /* 323344.pdf - BA86 - D0 - Xeon 7500 Series */
647 /* 323056.pdf - AAX65 - C2 - Xeon L3406 */
648 /* 322814.pdf - AAT59 - C2 - i7-600, i5-500, i5-400 and i3-300 Mobile */
649 /* 322911.pdf - AAU65 - C2 - i5-600, i3-500 Desktop and Pentium G6950 */
651 /* 322911.pdf - AAU65 - K0 - i5-600, i3-500 Desktop and Pentium G6950 */
653 /* 322373.pdf - AAO95 - B1 - Xeon 3400 Series */
654 /* 322166.pdf - AAN92 - B1 - i7-800 and i5-700 Desktop */
656 * 320767.pdf - AAP86 - B1 -
657 * i7-900 Mobile Extreme, i7-800 and i7-700 Mobile
660 /* 321333.pdf - AAM126 - C0 - Xeon 3500 */
662 /* 321333.pdf - AAM126 - C1 - Xeon 3500 */
664 /* 320836.pdf - AAJ124 - C0 - i7-900 Desktop Extreme and i7-900 Desktop */
666 /* 321333.pdf - AAM126 - D0 - Xeon 3500 */
667 /* 321324.pdf - AAK139 - D0 - Xeon 5500 */
668 /* 320836.pdf - AAJ124 - D0 - i7-900 Extreme and i7-900 Desktop */
670 /* Xeon E3-1220 V2 */
705 return -ENOENT; in possible_passthrough_msr_slot()
725 r = possible_passthrough_msr_slot(msr) != -ENOENT; in is_valid_passthrough_msr()
736 for (i = 0; i < vmx->nr_uret_msrs; ++i) in __vmx_find_uret_msr()
737 if (vmx_uret_msrs_list[vmx->guest_uret_msrs[i].slot] == msr) in __vmx_find_uret_msr()
739 return -1; in __vmx_find_uret_msr()
748 return &vmx->guest_uret_msrs[i]; in vmx_find_uret_msr()
757 u64 old_msr_data = msr->data; in vmx_set_guest_uret_msr()
758 msr->data = data; in vmx_set_guest_uret_msr()
759 if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) { in vmx_set_guest_uret_msr()
761 ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask); in vmx_set_guest_uret_msr()
764 msr->data = old_msr_data; in vmx_set_guest_uret_msr()
777 vmcs_clear(v->vmcs); in crash_vmclear_local_loaded_vmcss()
786 if (loaded_vmcs->cpu != cpu) in __loaded_vmcs_clear()
788 if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs) in __loaded_vmcs_clear()
791 vmcs_clear(loaded_vmcs->vmcs); in __loaded_vmcs_clear()
792 if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched) in __loaded_vmcs_clear()
793 vmcs_clear(loaded_vmcs->shadow_vmcs); in __loaded_vmcs_clear()
795 list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link); in __loaded_vmcs_clear()
799 * current percpu list, complete before setting loaded_vmcs->vcpu to in __loaded_vmcs_clear()
800 * -1, otherwise a different cpu can see vcpu == -1 first and add in __loaded_vmcs_clear()
806 loaded_vmcs->cpu = -1; in __loaded_vmcs_clear()
807 loaded_vmcs->launched = 0; in __loaded_vmcs_clear()
812 int cpu = loaded_vmcs->cpu; in loaded_vmcs_clear()
814 if (cpu != -1) in loaded_vmcs_clear()
825 if (!kvm_register_is_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS)) { in vmx_segment_cache_test_set()
826 kvm_register_mark_available(&vmx->vcpu, VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
827 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
829 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
830 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
836 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
845 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
854 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
863 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
884 if ((vcpu->guest_debug & in update_exception_bitmap()
888 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
899 eb |= get_vmcs12(vcpu)->exception_bitmap; in update_exception_bitmap()
927 msr_bitmap = vmx->loaded_vmcs->msr_bitmap; in msr_write_intercepted()
943 if (vmx->loaded_vmcs->launched) in __vmx_vcpu_run_flags()
949 * it after vmexit and store it in vmx->spec_ctrl. in __vmx_vcpu_run_flags()
968 for (i = 0; i < m->nr; ++i) { in vmx_find_loadstore_msr_slot()
969 if (m->val[i].index == msr) in vmx_find_loadstore_msr_slot()
972 return -ENOENT; in vmx_find_loadstore_msr_slot()
978 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
998 i = vmx_find_loadstore_msr_slot(&m->guest, msr); in clear_atomic_switch_msr()
1001 --m->guest.nr; in clear_atomic_switch_msr()
1002 m->guest.val[i] = m->guest.val[m->guest.nr]; in clear_atomic_switch_msr()
1003 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); in clear_atomic_switch_msr()
1006 i = vmx_find_loadstore_msr_slot(&m->host, msr); in clear_atomic_switch_msr()
1010 --m->host.nr; in clear_atomic_switch_msr()
1011 m->host.val[i] = m->host.val[m->host.nr]; in clear_atomic_switch_msr()
1012 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); in clear_atomic_switch_msr()
1031 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
1065 i = vmx_find_loadstore_msr_slot(&m->guest, msr); in add_atomic_switch_msr()
1067 j = vmx_find_loadstore_msr_slot(&m->host, msr); in add_atomic_switch_msr()
1069 if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) || in add_atomic_switch_msr()
1070 (j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) { in add_atomic_switch_msr()
1076 i = m->guest.nr++; in add_atomic_switch_msr()
1077 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr); in add_atomic_switch_msr()
1079 m->guest.val[i].index = msr; in add_atomic_switch_msr()
1080 m->guest.val[i].value = guest_val; in add_atomic_switch_msr()
1086 j = m->host.nr++; in add_atomic_switch_msr()
1087 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr); in add_atomic_switch_msr()
1089 m->host.val[j].index = msr; in add_atomic_switch_msr()
1090 m->host.val[j].value = host_val; in add_atomic_switch_msr()
1095 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
1120 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
1140 vmx->guest_uret_msrs[i].data = guest_efer; in update_transition_efer()
1141 vmx->guest_uret_msrs[i].mask = ~ignore_bits; in update_transition_efer()
1148 * On 32-bit kernels, VM exits still load the FS and GS bases from the
1178 !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_can_write_msr()
1183 /* The base must be 128-byte aligned and a legal physical address. */ in pt_output_base_valid()
1191 wrmsrl(MSR_IA32_RTIT_STATUS, ctx->status); in pt_load_msr()
1192 wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); in pt_load_msr()
1193 wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); in pt_load_msr()
1194 wrmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); in pt_load_msr()
1196 wrmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); in pt_load_msr()
1197 wrmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); in pt_load_msr()
1205 rdmsrl(MSR_IA32_RTIT_STATUS, ctx->status); in pt_save_msr()
1206 rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, ctx->output_base); in pt_save_msr()
1207 rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, ctx->output_mask); in pt_save_msr()
1208 rdmsrl(MSR_IA32_RTIT_CR3_MATCH, ctx->cr3_match); in pt_save_msr()
1210 rdmsrl(MSR_IA32_RTIT_ADDR0_A + i * 2, ctx->addr_a[i]); in pt_save_msr()
1211 rdmsrl(MSR_IA32_RTIT_ADDR0_B + i * 2, ctx->addr_b[i]); in pt_save_msr()
1224 rdmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_enter()
1225 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_enter()
1227 pt_save_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); in pt_guest_enter()
1228 pt_load_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); in pt_guest_enter()
1237 if (vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) { in pt_guest_exit()
1238 pt_save_msr(&vmx->pt_desc.guest, vmx->pt_desc.addr_range); in pt_guest_exit()
1239 pt_load_msr(&vmx->pt_desc.host, vmx->pt_desc.addr_range); in pt_guest_exit()
1243 wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl); in pt_guest_exit()
1249 if (unlikely(fs_sel != host->fs_sel)) { in vmx_set_host_fs_gs()
1254 host->fs_sel = fs_sel; in vmx_set_host_fs_gs()
1256 if (unlikely(gs_sel != host->gs_sel)) { in vmx_set_host_fs_gs()
1261 host->gs_sel = gs_sel; in vmx_set_host_fs_gs()
1263 if (unlikely(fs_base != host->fs_base)) { in vmx_set_host_fs_gs()
1265 host->fs_base = fs_base; in vmx_set_host_fs_gs()
1267 if (unlikely(gs_base != host->gs_base)) { in vmx_set_host_fs_gs()
1269 host->gs_base = gs_base; in vmx_set_host_fs_gs()
1284 vmx->req_immediate_exit = false; in vmx_prepare_switch_to_guest()
1289 * to/from long-mode by setting MSR_EFER.LMA. in vmx_prepare_switch_to_guest()
1291 if (!vmx->guest_uret_msrs_loaded) { in vmx_prepare_switch_to_guest()
1292 vmx->guest_uret_msrs_loaded = true; in vmx_prepare_switch_to_guest()
1293 for (i = 0; i < vmx->nr_active_uret_msrs; ++i) in vmx_prepare_switch_to_guest()
1294 kvm_set_user_return_msr(vmx->guest_uret_msrs[i].slot, in vmx_prepare_switch_to_guest()
1295 vmx->guest_uret_msrs[i].data, in vmx_prepare_switch_to_guest()
1296 vmx->guest_uret_msrs[i].mask); in vmx_prepare_switch_to_guest()
1300 if (vmx->nested.need_vmcs12_to_shadow_sync) in vmx_prepare_switch_to_guest()
1303 if (vmx->guest_state_loaded) in vmx_prepare_switch_to_guest()
1306 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_guest()
1312 host_state->ldt_sel = kvm_read_ldt(); in vmx_prepare_switch_to_guest()
1315 savesegment(ds, host_state->ds_sel); in vmx_prepare_switch_to_guest()
1316 savesegment(es, host_state->es_sel); in vmx_prepare_switch_to_guest()
1319 if (likely(is_64bit_mm(current->mm))) { in vmx_prepare_switch_to_guest()
1321 fs_sel = current->thread.fsindex; in vmx_prepare_switch_to_guest()
1322 gs_sel = current->thread.gsindex; in vmx_prepare_switch_to_guest()
1323 fs_base = current->thread.fsbase; in vmx_prepare_switch_to_guest()
1324 vmx->msr_host_kernel_gs_base = current->thread.gsbase; in vmx_prepare_switch_to_guest()
1329 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); in vmx_prepare_switch_to_guest()
1332 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_guest()
1341 vmx->guest_state_loaded = true; in vmx_prepare_switch_to_guest()
1348 if (!vmx->guest_state_loaded) in vmx_prepare_switch_to_host()
1351 host_state = &vmx->loaded_vmcs->host_state; in vmx_prepare_switch_to_host()
1353 ++vmx->vcpu.stat.host_state_reload; in vmx_prepare_switch_to_host()
1356 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_host()
1358 if (host_state->ldt_sel || (host_state->gs_sel & 7)) { in vmx_prepare_switch_to_host()
1359 kvm_load_ldt(host_state->ldt_sel); in vmx_prepare_switch_to_host()
1361 load_gs_index(host_state->gs_sel); in vmx_prepare_switch_to_host()
1363 loadsegment(gs, host_state->gs_sel); in vmx_prepare_switch_to_host()
1366 if (host_state->fs_sel & 7) in vmx_prepare_switch_to_host()
1367 loadsegment(fs, host_state->fs_sel); in vmx_prepare_switch_to_host()
1369 if (unlikely(host_state->ds_sel | host_state->es_sel)) { in vmx_prepare_switch_to_host()
1370 loadsegment(ds, host_state->ds_sel); in vmx_prepare_switch_to_host()
1371 loadsegment(es, host_state->es_sel); in vmx_prepare_switch_to_host()
1376 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_prepare_switch_to_host()
1379 vmx->guest_state_loaded = false; in vmx_prepare_switch_to_host()
1380 vmx->guest_uret_msrs_loaded = false; in vmx_prepare_switch_to_host()
1387 if (vmx->guest_state_loaded) in vmx_read_guest_kernel_gs_base()
1388 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_read_guest_kernel_gs_base()
1390 return vmx->msr_guest_kernel_gs_base; in vmx_read_guest_kernel_gs_base()
1396 if (vmx->guest_state_loaded) in vmx_write_guest_kernel_gs_base()
1399 vmx->msr_guest_kernel_gs_base = data; in vmx_write_guest_kernel_gs_base()
1407 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; in vmx_vcpu_load_vmcs()
1411 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load_vmcs()
1415 * Ensure loaded_vmcs->cpu is read before adding loaded_vmcs to in vmx_vcpu_load_vmcs()
1422 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load_vmcs()
1428 if (prev != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load_vmcs()
1429 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load_vmcs()
1430 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load_vmcs()
1436 * performs IBPB on nested VM-Exit (a single nested transition in vmx_vcpu_load_vmcs()
1439 if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev)) in vmx_vcpu_load_vmcs()
1454 * Linux uses per-cpu TSS and GDT, so set these when switching in vmx_vcpu_load_vmcs()
1458 (unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss); in vmx_vcpu_load_vmcs()
1464 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load_vmcs()
1469 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) in vmx_vcpu_load_vmcs()
1485 vmx->host_debugctlmsr = get_debugctlmsr(); in vmx_vcpu_load()
1508 if (vmx->rmode.vm86_active) { in vmx_get_rflags()
1510 save_rflags = vmx->rmode.save_rflags; in vmx_get_rflags()
1513 vmx->rflags = rflags; in vmx_get_rflags()
1515 return vmx->rflags; in vmx_get_rflags()
1525 vmx->rflags = rflags; in vmx_set_rflags()
1531 vmx->rflags = rflags; in vmx_set_rflags()
1532 if (vmx->rmode.vm86_active) { in vmx_set_rflags()
1533 vmx->rmode.save_rflags = rflags; in vmx_set_rflags()
1538 if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM) in vmx_set_rflags()
1539 vmx->emulation_required = emulation_required(vcpu); in vmx_set_rflags()
1580 if (data & vmx->pt_desc.ctl_bitmask) in vmx_rtit_ctl_check()
1587 if ((vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN) && in vmx_rtit_ctl_check()
1588 ((vmx->pt_desc.guest.ctl ^ data) & ~RTIT_CTL_TRACEEN)) in vmx_rtit_ctl_check()
1594 * CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 0 in vmx_rtit_ctl_check()
1598 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1606 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc_periods); in vmx_rtit_ctl_check()
1607 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc) && in vmx_rtit_ctl_check()
1611 value = intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_rtit_ctl_check()
1613 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1617 value = intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_periods); in vmx_rtit_ctl_check()
1618 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc) && in vmx_rtit_ctl_check()
1628 if ((value && (vmx->pt_desc.addr_range < 1)) || (value > 2)) in vmx_rtit_ctl_check()
1631 if ((value && (vmx->pt_desc.addr_range < 2)) || (value > 2)) in vmx_rtit_ctl_check()
1634 if ((value && (vmx->pt_desc.addr_range < 3)) || (value > 2)) in vmx_rtit_ctl_check()
1637 if ((value && (vmx->pt_desc.addr_range < 4)) || (value > 2)) in vmx_rtit_ctl_check()
1657 * (namely Hyper-V) don't set it due to it being undefined behavior, in skip_emulated_instruction()
1661 to_vmx(vcpu)->exit_reason.basic != EXIT_REASON_EPT_MISCONFIG) { in skip_emulated_instruction()
1666 * We need to mask out the high 32 bits of RIP if not in 64-bit in skip_emulated_instruction()
1667 * mode, but just finding out that we are in 64-bit mode is in skip_emulated_instruction()
1686 * Recognizes a pending MTF VM-exit and records the nested state for later
1698 * Per the SDM, MTF takes priority over debug-trap exceptions besides in vmx_update_emulated_instruction()
1699 * T-bit traps. As instruction emulation is completed (i.e. at the in vmx_update_emulated_instruction()
1701 * debug-trap. Record the pending MTF state to be delivered in in vmx_update_emulated_instruction()
1705 (!vcpu->arch.exception.pending || in vmx_update_emulated_instruction()
1706 vcpu->arch.exception.nr == DB_VECTOR)) in vmx_update_emulated_instruction()
1707 vmx->nested.mtf_pending = true; in vmx_update_emulated_instruction()
1709 vmx->nested.mtf_pending = false; in vmx_update_emulated_instruction()
1726 if (kvm_hlt_in_guest(vcpu->kvm) && in vmx_clear_hlt()
1734 unsigned nr = vcpu->arch.exception.nr; in vmx_queue_exception()
1735 bool has_error_code = vcpu->arch.exception.has_error_code; in vmx_queue_exception()
1736 u32 error_code = vcpu->arch.exception.error_code; in vmx_queue_exception()
1746 if (vmx->rmode.vm86_active) { in vmx_queue_exception()
1749 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_queue_exception()
1754 WARN_ON_ONCE(vmx->emulation_required); in vmx_queue_exception()
1758 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
1776 to = vmx->nr_active_uret_msrs++; in vmx_setup_uret_msr()
1778 tmp = vmx->guest_uret_msrs[to]; in vmx_setup_uret_msr()
1779 vmx->guest_uret_msrs[to] = vmx->guest_uret_msrs[from]; in vmx_setup_uret_msr()
1780 vmx->guest_uret_msrs[from] = tmp; in vmx_setup_uret_msr()
1785 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
1790 vmx->guest_uret_msrs_loaded = false; in setup_msrs()
1791 vmx->nr_active_uret_msrs = 0; in setup_msrs()
1797 if (is_long_mode(&vmx->vcpu) && (vmx->vcpu.arch.efer & EFER_SCE)) { in setup_msrs()
1806 if (guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) in setup_msrs()
1812 vmx_update_msr_bitmap(&vmx->vcpu); in setup_msrs()
1827 (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)) in vmx_write_l1_tsc_offset()
1828 g_tsc_offset = vmcs12->tsc_offset; in vmx_write_l1_tsc_offset()
1830 trace_kvm_write_tsc_offset(vcpu->vcpu_id, in vmx_write_l1_tsc_offset()
1831 vcpu->arch.tsc_offset - g_tsc_offset, in vmx_write_l1_tsc_offset()
1851 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; in vmx_feature_control_msr_valid()
1858 switch (msr->index) { in vmx_get_msr_feature()
1862 return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data); in vmx_get_msr_feature()
1864 msr->data = vmx_get_perf_capabilities(); in vmx_get_msr_feature()
1873 * Returns 0 on success, non-0 otherwise.
1882 switch (msr_info->index) { in vmx_get_msr()
1885 msr_info->data = vmcs_readl(GUEST_FS_BASE); in vmx_get_msr()
1888 msr_info->data = vmcs_readl(GUEST_GS_BASE); in vmx_get_msr()
1891 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); in vmx_get_msr()
1897 if (!msr_info->host_initiated && in vmx_get_msr()
1898 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) in vmx_get_msr()
1902 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_get_msr()
1905 msr_info->data = vmx->msr_ia32_umwait_control; in vmx_get_msr()
1908 if (!msr_info->host_initiated && in vmx_get_msr()
1912 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
1915 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS); in vmx_get_msr()
1918 msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP); in vmx_get_msr()
1921 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); in vmx_get_msr()
1925 (!msr_info->host_initiated && in vmx_get_msr()
1928 msr_info->data = vmcs_read64(GUEST_BNDCFGS); in vmx_get_msr()
1931 if (!msr_info->host_initiated && in vmx_get_msr()
1932 !(vmx->msr_ia32_feature_control & in vmx_get_msr()
1935 msr_info->data = vcpu->arch.mcg_ext_ctl; in vmx_get_msr()
1938 msr_info->data = vmx->msr_ia32_feature_control; in vmx_get_msr()
1943 if (vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, in vmx_get_msr()
1944 &msr_info->data)) in vmx_get_msr()
1948 * instead of just ignoring the features, different Hyper-V in vmx_get_msr()
1953 if (!msr_info->host_initiated && in vmx_get_msr()
1954 vmx->nested.enlightened_vmcs_enabled) in vmx_get_msr()
1955 nested_evmcs_filter_control_msr(msr_info->index, in vmx_get_msr()
1956 &msr_info->data); in vmx_get_msr()
1961 msr_info->data = vmx->pt_desc.guest.ctl; in vmx_get_msr()
1966 msr_info->data = vmx->pt_desc.guest.status; in vmx_get_msr()
1970 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1973 msr_info->data = vmx->pt_desc.guest.cr3_match; in vmx_get_msr()
1977 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1979 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1982 msr_info->data = vmx->pt_desc.guest.output_base; in vmx_get_msr()
1986 (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1988 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
1991 msr_info->data = vmx->pt_desc.guest.output_mask; in vmx_get_msr()
1994 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; in vmx_get_msr()
1996 (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_get_msr()
2000 msr_info->data = vmx->pt_desc.guest.addr_b[index / 2]; in vmx_get_msr()
2002 msr_info->data = vmx->pt_desc.guest.addr_a[index / 2]; in vmx_get_msr()
2005 if (!msr_info->host_initiated && in vmx_get_msr()
2011 msr = vmx_find_uret_msr(vmx, msr_info->index); in vmx_get_msr()
2013 msr_info->data = msr->data; in vmx_get_msr()
2034 * Returns 0 on success, non-0 otherwise.
2042 u32 msr_index = msr_info->index; in vmx_set_msr()
2043 u64 data = msr_info->data; in vmx_set_msr()
2065 get_vmcs12(vcpu)->guest_sysenter_cs = data; in vmx_set_msr()
2071 get_vmcs12(vcpu)->guest_sysenter_eip = data; in vmx_set_msr()
2078 get_vmcs12(vcpu)->guest_sysenter_esp = data; in vmx_set_msr()
2083 if (is_guest_mode(vcpu) && get_vmcs12(vcpu)->vm_exit_controls & in vmx_set_msr()
2085 get_vmcs12(vcpu)->guest_ia32_debugctl = data; in vmx_set_msr()
2092 (!msr_info->host_initiated && in vmx_set_msr()
2101 if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) in vmx_set_msr()
2104 /* The reserved bit 1 and non-32 bit [63:32] should be zero */ in vmx_set_msr()
2108 vmx->msr_ia32_umwait_control = data; in vmx_set_msr()
2111 if (!msr_info->host_initiated && in vmx_set_msr()
2118 vmx->spec_ctrl = data; in vmx_set_msr()
2123 * For non-nested: in vmx_set_msr()
2124 * When it's written (to non-zero) for the first time, pass in vmx_set_msr()
2139 if (!msr_info->host_initiated && in vmx_set_msr()
2140 !(vcpu->arch.arch_capabilities & ARCH_CAP_TSX_CTRL_MSR)) in vmx_set_msr()
2146 if (!msr_info->host_initiated && in vmx_set_msr()
2160 * For non-nested: in vmx_set_msr()
2161 * When it's written (to non-zero) for the first time, pass in vmx_set_msr()
2177 get_vmcs12(vcpu)->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) in vmx_set_msr()
2178 get_vmcs12(vcpu)->guest_ia32_pat = data; in vmx_set_msr()
2182 vcpu->arch.pat = data; in vmx_set_msr()
2191 if ((!msr_info->host_initiated && in vmx_set_msr()
2192 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2196 vcpu->arch.mcg_ext_ctl = data; in vmx_set_msr()
2200 (to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2201 FEAT_CTL_LOCKED && !msr_info->host_initiated)) in vmx_set_msr()
2203 vmx->msr_ia32_feature_control = data; in vmx_set_msr()
2204 if (msr_info->host_initiated && data == 0) in vmx_set_msr()
2208 if (!msr_info->host_initiated) in vmx_set_msr()
2209 return 1; /* they are read-only */ in vmx_set_msr()
2216 vmx->nested.vmxon) in vmx_set_msr()
2219 vmx->pt_desc.guest.ctl = data; in vmx_set_msr()
2227 vmx->pt_desc.guest.status = data; in vmx_set_msr()
2232 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2235 vmx->pt_desc.guest.cr3_match = data; in vmx_set_msr()
2240 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2242 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2247 vmx->pt_desc.guest.output_base = data; in vmx_set_msr()
2252 if (!intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2254 !intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2257 vmx->pt_desc.guest.output_mask = data; in vmx_set_msr()
2262 index = msr_info->index - MSR_IA32_RTIT_ADDR0_A; in vmx_set_msr()
2263 if (index >= 2 * intel_pt_validate_cap(vmx->pt_desc.caps, in vmx_set_msr()
2269 vmx->pt_desc.guest.addr_b[index / 2] = data; in vmx_set_msr()
2271 vmx->pt_desc.guest.addr_a[index / 2] = data; in vmx_set_msr()
2274 if (!msr_info->host_initiated && in vmx_set_msr()
2306 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP); in vmx_cache_reg()
2309 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP); in vmx_cache_reg()
2316 guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; in vmx_cache_reg()
2318 vcpu->arch.cr0 &= ~guest_owned_bits; in vmx_cache_reg()
2319 vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits; in vmx_cache_reg()
2324 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3); in vmx_cache_reg()
2327 guest_owned_bits = vcpu->arch.cr4_guest_owned_bits; in vmx_cache_reg()
2329 vcpu->arch.cr4 &= ~guest_owned_bits; in vmx_cache_reg()
2330 vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits; in vmx_cache_reg()
2368 return -EFAULT; in kvm_cpu_vmxon()
2378 return -EBUSY; in hardware_enable()
2381 * This can happen if we hot-added a CPU but failed to allocate in hardware_enable()
2386 return -EFAULT; in hardware_enable()
2450 return -EIO; in adjust_vmx_controls()
2488 return -EIO; in setup_vmcs_config()
2523 return -EIO; in setup_vmcs_config()
2538 &vmx_cap->ept, &vmx_cap->vpid); in setup_vmcs_config()
2546 } else if (vmx_cap->ept) { in setup_vmcs_config()
2547 vmx_cap->ept = 0; in setup_vmcs_config()
2549 "1-setting enable EPT VM-execution control\n"); in setup_vmcs_config()
2552 vmx_cap->vpid) { in setup_vmcs_config()
2553 vmx_cap->vpid = 0; in setup_vmcs_config()
2555 "1-setting enable VPID VM-execution control\n"); in setup_vmcs_config()
2570 return -EIO; in setup_vmcs_config()
2577 return -EIO; in setup_vmcs_config()
2594 return -EIO; in setup_vmcs_config()
2622 /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */ in setup_vmcs_config()
2624 return -EIO; in setup_vmcs_config()
2627 /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */ in setup_vmcs_config()
2629 return -EIO; in setup_vmcs_config()
2632 /* Require Write-Back (WB) memory type for VMCS accesses. */ in setup_vmcs_config()
2634 return -EIO; in setup_vmcs_config()
2636 vmcs_conf->size = vmx_msr_high & 0x1fff; in setup_vmcs_config()
2637 vmcs_conf->order = get_order(vmcs_conf->size); in setup_vmcs_config()
2638 vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff; in setup_vmcs_config()
2640 vmcs_conf->revision_id = vmx_msr_low; in setup_vmcs_config()
2642 vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control; in setup_vmcs_config()
2643 vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control; in setup_vmcs_config()
2644 vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control; in setup_vmcs_config()
2645 vmcs_conf->vmexit_ctrl = _vmexit_control; in setup_vmcs_config()
2646 vmcs_conf->vmentry_ctrl = _vmentry_control; in setup_vmcs_config()
2670 vmcs->hdr.revision_id = KVM_EVMCS_VERSION; in alloc_vmcs_cpu()
2672 vmcs->hdr.revision_id = vmcs_config.revision_id; in alloc_vmcs_cpu()
2675 vmcs->hdr.shadow_vmcs = 1; in alloc_vmcs_cpu()
2689 if (!loaded_vmcs->vmcs) in free_loaded_vmcs()
2692 free_vmcs(loaded_vmcs->vmcs); in free_loaded_vmcs()
2693 loaded_vmcs->vmcs = NULL; in free_loaded_vmcs()
2694 if (loaded_vmcs->msr_bitmap) in free_loaded_vmcs()
2695 free_page((unsigned long)loaded_vmcs->msr_bitmap); in free_loaded_vmcs()
2696 WARN_ON(loaded_vmcs->shadow_vmcs != NULL); in free_loaded_vmcs()
2701 loaded_vmcs->vmcs = alloc_vmcs(false); in alloc_loaded_vmcs()
2702 if (!loaded_vmcs->vmcs) in alloc_loaded_vmcs()
2703 return -ENOMEM; in alloc_loaded_vmcs()
2705 vmcs_clear(loaded_vmcs->vmcs); in alloc_loaded_vmcs()
2707 loaded_vmcs->shadow_vmcs = NULL; in alloc_loaded_vmcs()
2708 loaded_vmcs->hv_timer_soft_disabled = false; in alloc_loaded_vmcs()
2709 loaded_vmcs->cpu = -1; in alloc_loaded_vmcs()
2710 loaded_vmcs->launched = 0; in alloc_loaded_vmcs()
2713 loaded_vmcs->msr_bitmap = (unsigned long *) in alloc_loaded_vmcs()
2715 if (!loaded_vmcs->msr_bitmap) in alloc_loaded_vmcs()
2717 memset(loaded_vmcs->msr_bitmap, 0xff, PAGE_SIZE); in alloc_loaded_vmcs()
2723 (struct hv_enlightened_vmcs *)loaded_vmcs->vmcs; in alloc_loaded_vmcs()
2725 evmcs->hv_enlightenments_control.msr_bitmap = 1; in alloc_loaded_vmcs()
2729 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); in alloc_loaded_vmcs()
2730 memset(&loaded_vmcs->controls_shadow, 0, in alloc_loaded_vmcs()
2737 return -ENOMEM; in alloc_loaded_vmcs()
2760 return -ENOMEM; in alloc_kvm_area()
2765 * vmcs->revision_id to KVM_EVMCS_VERSION instead of in alloc_kvm_area()
2774 vmcs->hdr.revision_id = vmcs_config.revision_id; in alloc_kvm_area()
2793 save->selector &= ~SEGMENT_RPL_MASK; in fix_pmode_seg()
2794 save->dpl = save->selector & SEGMENT_RPL_MASK; in fix_pmode_seg()
2795 save->s = 1; in fix_pmode_seg()
2806 * Update real mode segment cache. It may be not up-to-date if sement in enter_pmode()
2809 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
2810 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
2811 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
2812 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
2813 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
2814 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
2816 vmx->rmode.vm86_active = 0; in enter_pmode()
2818 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
2822 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
2830 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
2831 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
2832 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
2833 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
2834 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
2835 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
2859 if (save->base & 0xf) in fix_rmode_seg()
2865 vmcs_write16(sf->selector, var.selector); in fix_rmode_seg()
2866 vmcs_writel(sf->base, var.base); in fix_rmode_seg()
2867 vmcs_write32(sf->limit, var.limit); in fix_rmode_seg()
2868 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var)); in fix_rmode_seg()
2875 struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); in enter_rmode()
2877 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
2878 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
2879 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
2880 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
2881 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
2882 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
2883 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
2885 vmx->rmode.vm86_active = 1; in enter_rmode()
2891 if (!kvm_vmx->tss_addr) in enter_rmode()
2897 vmcs_writel(GUEST_TR_BASE, kvm_vmx->tss_addr); in enter_rmode()
2898 vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1); in enter_rmode()
2902 vmx->rmode.save_rflags = flags; in enter_rmode()
2910 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
2911 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
2912 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
2913 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
2914 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
2915 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
2929 vcpu->arch.efer = efer; in vmx_set_efer()
2932 msr->data = efer; in vmx_set_efer()
2936 msr->data = efer & ~EFER_LME; in vmx_set_efer()
2958 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode()
2964 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode()
2975 * the CPU is not required to invalidate guest-physical mappings on in vmx_flush_tlb_all()
2976 * VM-Entry, even if VPID is disabled. Guest-physical mappings are in vmx_flush_tlb_all()
2978 * (INVVPID also isn't required to invalidate guest-physical mappings). in vmx_flush_tlb_all()
2986 vpid_sync_vcpu_single(vmx->vpid); in vmx_flush_tlb_all()
2987 vpid_sync_vcpu_single(vmx->nested.vpid02); in vmx_flush_tlb_all()
2996 return to_vmx(vcpu)->vpid; in vmx_get_current_vpid()
3001 struct kvm_mmu *mmu = vcpu->arch.mmu; in vmx_flush_tlb_current()
3002 u64 root_hpa = mmu->root_hpa; in vmx_flush_tlb_current()
3010 mmu->shadow_root_level)); in vmx_flush_tlb_current()
3028 * vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit are in vmx_flush_tlb_guest()
3029 * required to flush GVA->{G,H}PA mappings from the TLB if vpid is in vmx_flush_tlb_guest()
3030 * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed), in vmx_flush_tlb_guest()
3038 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in vmx_ept_load_pdptrs()
3044 vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]); in vmx_ept_load_pdptrs()
3045 vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]); in vmx_ept_load_pdptrs()
3046 vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]); in vmx_ept_load_pdptrs()
3047 vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]); in vmx_ept_load_pdptrs()
3053 struct kvm_mmu *mmu = vcpu->arch.walk_mmu; in ept_save_pdptrs()
3058 mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0); in ept_save_pdptrs()
3059 mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1); in ept_save_pdptrs()
3060 mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2); in ept_save_pdptrs()
3061 mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3); in ept_save_pdptrs()
3078 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3084 vcpu->arch.cr0 = cr0; in ept_update_paging_mode_cr0()
3103 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
3106 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
3111 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0()
3124 vcpu->arch.cr0 = cr0; in vmx_set_cr0()
3127 /* depends on vcpu->arch.cr0 to be set to a new value */ in vmx_set_cr0()
3128 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
3156 struct kvm *kvm = vcpu->kvm; in vmx_load_mmu_pgd()
3166 spin_lock(&to_kvm_vmx(kvm)->ept_pointer_lock); in vmx_load_mmu_pgd()
3167 to_vmx(vcpu)->ept_pointer = eptp; in vmx_load_mmu_pgd()
3168 to_kvm_vmx(kvm)->ept_pointers_match in vmx_load_mmu_pgd()
3170 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); in vmx_load_mmu_pgd()
3174 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr; in vmx_load_mmu_pgd()
3175 else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail)) in vmx_load_mmu_pgd()
3176 guest_cr3 = vcpu->arch.cr3; in vmx_load_mmu_pgd()
3177 else /* vmcs01.GUEST_CR3 is already up-to-date. */ in vmx_load_mmu_pgd()
3201 else if (vmx->rmode.vm86_active) in vmx_set_cr4()
3228 if (vmx->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) in vmx_set_cr4()
3231 vcpu->arch.cr4 = cr4; in vmx_set_cr4()
3245 * SMEP/SMAP/PKU is disabled if CPU is in non-paging mode in in vmx_set_cr4()
3247 * to be manually disabled when guest switches to non-paging in vmx_set_cr4()
3269 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
3270 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
3272 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
3274 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3275 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3278 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
3279 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
3280 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
3282 var->unusable = (ar >> 16) & 1; in vmx_get_segment()
3283 var->type = ar & 15; in vmx_get_segment()
3284 var->s = (ar >> 4) & 1; in vmx_get_segment()
3285 var->dpl = (ar >> 5) & 3; in vmx_get_segment()
3293 var->present = !var->unusable; in vmx_get_segment()
3294 var->avl = (ar >> 12) & 1; in vmx_get_segment()
3295 var->l = (ar >> 13) & 1; in vmx_get_segment()
3296 var->db = (ar >> 14) & 1; in vmx_get_segment()
3297 var->g = (ar >> 15) & 1; in vmx_get_segment()
3304 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3315 if (unlikely(vmx->rmode.vm86_active)) in vmx_get_cpl()
3327 if (var->unusable || !var->present) in vmx_segment_access_rights()
3330 ar = var->type & 15; in vmx_segment_access_rights()
3331 ar |= (var->s & 1) << 4; in vmx_segment_access_rights()
3332 ar |= (var->dpl & 3) << 5; in vmx_segment_access_rights()
3333 ar |= (var->present & 1) << 7; in vmx_segment_access_rights()
3334 ar |= (var->avl & 1) << 12; in vmx_segment_access_rights()
3335 ar |= (var->l & 1) << 13; in vmx_segment_access_rights()
3336 ar |= (var->db & 1) << 14; in vmx_segment_access_rights()
3337 ar |= (var->g & 1) << 15; in vmx_segment_access_rights()
3350 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
3351 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
3353 vmcs_write16(sf->selector, var->selector); in vmx_set_segment()
3354 else if (var->s) in vmx_set_segment()
3355 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
3359 vmcs_writel(sf->base, var->base); in vmx_set_segment()
3360 vmcs_write32(sf->limit, var->limit); in vmx_set_segment()
3361 vmcs_write16(sf->selector, var->selector); in vmx_set_segment()
3375 var->type |= 0x1; /* Accessed */ in vmx_set_segment()
3377 vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); in vmx_set_segment()
3380 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
3393 dt->size = vmcs_read32(GUEST_IDTR_LIMIT); in vmx_get_idt()
3394 dt->address = vmcs_readl(GUEST_IDTR_BASE); in vmx_get_idt()
3399 vmcs_write32(GUEST_IDTR_LIMIT, dt->size); in vmx_set_idt()
3400 vmcs_writel(GUEST_IDTR_BASE, dt->address); in vmx_set_idt()
3405 dt->size = vmcs_read32(GUEST_GDTR_LIMIT); in vmx_get_gdt()
3406 dt->address = vmcs_readl(GUEST_GDTR_BASE); in vmx_get_gdt()
3411 vmcs_write32(GUEST_GDTR_LIMIT, dt->size); in vmx_set_gdt()
3412 vmcs_writel(GUEST_GDTR_BASE, dt->address); in vmx_set_gdt()
3601 * - Add checks on RIP in __vmx_guest_state_valid()
3602 * - Add checks on RFLAGS in __vmx_guest_state_valid()
3614 idx = srcu_read_lock(&kvm->srcu); in init_rmode_tss()
3615 fn = to_kvm_vmx(kvm)->tss_addr >> PAGE_SHIFT; in init_rmode_tss()
3632 RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1, in init_rmode_tss()
3635 srcu_read_unlock(&kvm->srcu, idx); in init_rmode_tss()
3646 /* Protect kvm_vmx->ept_identity_pagetable_done. */ in init_rmode_identity_map()
3647 mutex_lock(&kvm->slots_lock); in init_rmode_identity_map()
3649 if (likely(kvm_vmx->ept_identity_pagetable_done)) in init_rmode_identity_map()
3652 if (!kvm_vmx->ept_identity_map_addr) in init_rmode_identity_map()
3653 kvm_vmx->ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; in init_rmode_identity_map()
3654 identity_map_pfn = kvm_vmx->ept_identity_map_addr >> PAGE_SHIFT; in init_rmode_identity_map()
3657 kvm_vmx->ept_identity_map_addr, PAGE_SIZE); in init_rmode_identity_map()
3664 /* Set up identity-mapping pagetable for EPT in real mode */ in init_rmode_identity_map()
3673 kvm_vmx->ept_identity_pagetable_done = true; in init_rmode_identity_map()
3676 mutex_unlock(&kvm->slots_lock); in init_rmode_identity_map()
3685 vmcs_write16(sf->selector, 0); in seg_setup()
3686 vmcs_writel(sf->base, 0); in seg_setup()
3687 vmcs_write32(sf->limit, 0xffff); in seg_setup()
3692 vmcs_write32(sf->ar_bytes, ar); in seg_setup()
3700 mutex_lock(&kvm->slots_lock); in alloc_apic_access_page()
3701 if (kvm->arch.apic_access_page_done) in alloc_apic_access_page()
3710 r = -EFAULT; in alloc_apic_access_page()
3715 * Do not pin the page in memory, so that memory hot-unplug in alloc_apic_access_page()
3719 kvm->arch.apic_access_page_done = true; in alloc_apic_access_page()
3721 mutex_unlock(&kvm->slots_lock); in alloc_apic_access_page()
3794 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_disable_intercept_for_msr()
3809 if (idx != -ENOENT) { in vmx_disable_intercept_for_msr()
3811 clear_bit(idx, vmx->shadow_msr_intercept.read); in vmx_disable_intercept_for_msr()
3813 clear_bit(idx, vmx->shadow_msr_intercept.write); in vmx_disable_intercept_for_msr()
3840 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_enable_intercept_for_msr()
3855 if (idx != -ENOENT) { in vmx_enable_intercept_for_msr()
3857 set_bit(idx, vmx->shadow_msr_intercept.read); in vmx_enable_intercept_for_msr()
3859 set_bit(idx, vmx->shadow_msr_intercept.write); in vmx_enable_intercept_for_msr()
3896 unsigned long *msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; in vmx_reset_x2apic_msrs()
3936 u8 changed = mode ^ vmx->msr_bitmap_mode; in vmx_update_msr_bitmap()
3944 vmx->msr_bitmap_mode = mode; in vmx_update_msr_bitmap()
3950 bool flag = !(vmx->pt_desc.guest.ctl & RTIT_CTL_TRACEEN); in pt_update_intercept_for_msr()
3957 for (i = 0; i < vmx->pt_desc.addr_range; i++) { in pt_update_intercept_for_msr()
3972 WARN_ON_ONCE(!vmx->nested.virtual_apic_map.gfn)) in vmx_guest_apic_has_interrupt()
3977 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_guest_apic_has_interrupt()
3995 bool read = test_bit(i, vmx->shadow_msr_intercept.read); in vmx_msr_filter_changed()
3996 bool write = test_bit(i, vmx->shadow_msr_intercept.write); in vmx_msr_filter_changed()
4012 if (vcpu->mode == IN_GUEST_MODE) { in kvm_vcpu_trigger_posted_interrupt()
4021 * Case 1: vcpu keeps in non-root mode. Sending a in kvm_vcpu_trigger_posted_interrupt()
4038 apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec); in kvm_vcpu_trigger_posted_interrupt()
4051 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
4056 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
4063 return -1; in vmx_deliver_nested_posted_interrupt()
4067 * 1. If target vcpu is running(non-root mode), send posted interrupt
4081 if (!vcpu->arch.apicv_active) in vmx_deliver_posted_interrupt()
4082 return -1; in vmx_deliver_posted_interrupt()
4084 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4088 if (pi_test_and_set_on(&vmx->pi_desc)) in vmx_deliver_posted_interrupt()
4098 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4100 * Note that host-state that does change is set elsewhere. E.g., host-state
4119 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_set_constant_host_state()
4124 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_set_constant_host_state()
4162 struct kvm_vcpu *vcpu = &vmx->vcpu; in set_cr4_guest_host_mask()
4164 vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS & in set_cr4_guest_host_mask()
4165 ~vcpu->arch.cr4_guest_rsvd_bits; in set_cr4_guest_host_mask()
4167 vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE; in set_cr4_guest_host_mask()
4168 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
4169 vcpu->arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
4170 ~get_vmcs12(vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
4171 vmcs_writel(CR4_GUEST_HOST_MASK, ~vcpu->arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
4178 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
4214 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
4217 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { in vmx_exec_control()
4228 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4231 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
4246 * If the control is for an opt-in feature, clear the control if the in vmx_adjust_secondary_exec_control()
4248 * control is opt-out, i.e. an exiting control, clear the control if in vmx_adjust_secondary_exec_control()
4262 vmx->nested.msrs.secondary_ctls_high |= control; in vmx_adjust_secondary_exec_control()
4264 vmx->nested.msrs.secondary_ctls_high &= ~control; in vmx_adjust_secondary_exec_control()
4278 __enabled = guest_cpuid_has(&(vmx)->vcpu, \
4285 /* More macro magic for ENABLE_/opt-in versus _EXITING/opt-out controls. */
4294 struct kvm_vcpu *vcpu = &vmx->vcpu; in vmx_compute_secondary_exec_control()
4302 if (vmx->vpid == 0) in vmx_compute_secondary_exec_control()
4310 if (kvm_pause_in_guest(vmx->vcpu.kvm)) in vmx_compute_secondary_exec_control()
4338 vcpu->arch.xsaves_enabled = xsaves_enabled; in vmx_compute_secondary_exec_control()
4364 vmx->secondary_exec_control = exec_control; in vmx_compute_secondary_exec_control()
4371 * of an EPT paging-structure entry is 110b (write/execute). in ept_set_mmio_spte_mask()
4379 * Noting that the initialization of Guest-state Area of VMCS is in
4388 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); in init_vmcs()
4390 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ in init_vmcs()
4399 secondary_exec_controls_set(vmx, vmx->secondary_exec_control); in init_vmcs()
4402 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { in init_vmcs()
4411 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in init_vmcs()
4414 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { in init_vmcs()
4416 vmx->ple_window = ple_window; in init_vmcs()
4417 vmx->ple_window_dirty = true; in init_vmcs()
4435 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in init_vmcs()
4437 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in init_vmcs()
4440 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in init_vmcs()
4447 vmx->vcpu.arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS; in init_vmcs()
4448 vmcs_writel(CR0_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr0_guest_owned_bits); in init_vmcs()
4452 if (vmx->vpid != 0) in init_vmcs()
4453 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in init_vmcs()
4459 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in init_vmcs()
4460 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); in init_vmcs()
4464 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull); in init_vmcs()
4467 memset(&vmx->pt_desc, 0, sizeof(vmx->pt_desc)); in init_vmcs()
4469 vmx->pt_desc.guest.output_mask = 0x7F; in init_vmcs()
4480 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
4481 vmx->spec_ctrl = 0; in vmx_vcpu_reset()
4483 vmx->msr_ia32_umwait_control = 0; in vmx_vcpu_reset()
4485 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
4486 vmx->hv_deadline_tsc = -1; in vmx_vcpu_reset()
4550 __pa(vcpu->arch.apic->regs)); in vmx_vcpu_reset()
4557 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
4564 vpid_sync_context(vmx->vpid); in vmx_vcpu_reset()
4591 int irq = vcpu->arch.interrupt.nr; in vmx_inject_irq()
4595 ++vcpu->stat.irq_injections; in vmx_inject_irq()
4596 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
4598 if (vcpu->arch.interrupt.soft) in vmx_inject_irq()
4599 inc_eip = vcpu->arch.event_exit_inst_len; in vmx_inject_irq()
4604 if (vcpu->arch.interrupt.soft) { in vmx_inject_irq()
4607 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
4621 * Tracking the NMI-blocked state in software is built upon in vmx_inject_nmi()
4623 * well-behaving guests: They have to keep IRQs disabled at in vmx_inject_nmi()
4628 vmx->loaded_vmcs->soft_vnmi_blocked = 1; in vmx_inject_nmi()
4629 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_inject_nmi()
4632 ++vcpu->stat.nmi_injections; in vmx_inject_nmi()
4633 vmx->loaded_vmcs->nmi_known_unmasked = false; in vmx_inject_nmi()
4635 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
4652 return vmx->loaded_vmcs->soft_vnmi_blocked; in vmx_get_nmi_mask()
4653 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_get_nmi_mask()
4656 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_get_nmi_mask()
4665 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
4666 vmx->loaded_vmcs->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
4667 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
4670 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
4685 if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_blocked()
4695 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
4696 return -EBUSY; in vmx_nmi_allowed()
4698 /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */ in vmx_nmi_allowed()
4700 return -EBUSY; in vmx_nmi_allowed()
4717 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_interrupt_allowed()
4718 return -EBUSY; in vmx_interrupt_allowed()
4721 * An IRQ must not be injected into L2 if it's supposed to VM-Exit, in vmx_interrupt_allowed()
4725 return -EBUSY; in vmx_interrupt_allowed()
4737 mutex_lock(&kvm->slots_lock); in vmx_set_tss_addr()
4740 mutex_unlock(&kvm->slots_lock); in vmx_set_tss_addr()
4744 to_kvm_vmx(kvm)->tss_addr = addr; in vmx_set_tss_addr()
4750 to_kvm_vmx(kvm)->ept_identity_map_addr = ident_addr; in vmx_set_identity_map_addr()
4762 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
4764 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) in rmode_exception()
4768 return !(vcpu->guest_debug & in rmode_exception()
4792 if (vcpu->arch.halt_request) { in handle_rmode_exception()
4793 vcpu->arch.halt_request = 0; in handle_rmode_exception()
4842 * - Guest CPL == 3 (user mode)
4843 * - Guest has #AC detection enabled in CR0
4844 * - Guest EFLAGS has AC bit set
4858 struct kvm_run *kvm_run = vcpu->run; in handle_exception_nmi()
4863 vect_info = vmx->idt_vectoring_info; in handle_exception_nmi()
4876 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { in handle_exception_nmi()
4881 * IN{S}, OUT{S}, and RDPMC, none of which generate a non-zero in handle_exception_nmi()
4898 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_exception_nmi()
4899 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX; in handle_exception_nmi()
4900 vcpu->run->internal.ndata = 4; in handle_exception_nmi()
4901 vcpu->run->internal.data[0] = vect_info; in handle_exception_nmi()
4902 vcpu->run->internal.data[1] = intr_info; in handle_exception_nmi()
4903 vcpu->run->internal.data[2] = error_code; in handle_exception_nmi()
4904 vcpu->run->internal.data[3] = vcpu->arch.last_vmentry_cpu; in handle_exception_nmi()
4910 if (enable_ept && !vcpu->arch.apf.host_apf_flags) { in handle_exception_nmi()
4924 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception_nmi()
4930 if (!(vcpu->guest_debug & in handle_exception_nmi()
4938 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; in handle_exception_nmi()
4939 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); in handle_exception_nmi()
4947 vmx->vcpu.arch.event_exit_inst_len = in handle_exception_nmi()
4949 kvm_run->exit_reason = KVM_EXIT_DEBUG; in handle_exception_nmi()
4951 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; in handle_exception_nmi()
4952 kvm_run->debug.arch.exception = ex_no; in handle_exception_nmi()
4969 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; in handle_exception_nmi()
4970 kvm_run->ex.exception = ex_no; in handle_exception_nmi()
4971 kvm_run->ex.error_code = error_code; in handle_exception_nmi()
4979 ++vcpu->stat.irq_exits; in handle_external_interrupt()
4985 vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN; in handle_triple_fault()
4986 vcpu->mmio_needed = 0; in handle_triple_fault()
4999 ++vcpu->stat.io_exits; in handle_io()
5022 /* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5034 * hardware. It consists of the L2-owned bits from the new in handle_set_cr0()
5035 * value combined with the L1-owned bits from L1's guest_cr0. in handle_set_cr0()
5037 val = (val & ~vmcs12->cr0_guest_host_mask) | in handle_set_cr0()
5038 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); in handle_set_cr0()
5048 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
5063 val = (val & ~vmcs12->cr4_guest_host_mask) | in handle_set_cr4()
5064 (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask); in handle_set_cr4()
5075 WARN_ON(!(vcpu->arch.cr4 & X86_CR4_UMIP)); in handle_desc()
5116 * KVM_GUESTDBG_SINGLESTEP-triggered in handle_cr()
5119 vcpu->run->exit_reason = KVM_EXIT_SET_TPR; in handle_cr()
5153 vcpu->run->exit_reason = 0; in handle_cr()
5171 /* Do not handle if the CPL > 0, will trigger GP on re-entry */ in handle_dr()
5177 * As the vm-exit takes precedence over the debug trap, we in handle_dr()
5181 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { in handle_dr()
5182 vcpu->run->debug.arch.dr6 = DR6_BD | DR6_RTM | DR6_FIXED_1; in handle_dr()
5183 vcpu->run->debug.arch.dr7 = dr7; in handle_dr()
5184 vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu); in handle_dr()
5185 vcpu->run->debug.arch.exception = DB_VECTOR; in handle_dr()
5186 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in handle_dr()
5194 if (vcpu->guest_debug == 0) { in handle_dr()
5202 vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in handle_dr()
5222 get_debugreg(vcpu->arch.db[0], 0); in vmx_sync_dirty_debug_regs()
5223 get_debugreg(vcpu->arch.db[1], 1); in vmx_sync_dirty_debug_regs()
5224 get_debugreg(vcpu->arch.db[2], 2); in vmx_sync_dirty_debug_regs()
5225 get_debugreg(vcpu->arch.db[3], 3); in vmx_sync_dirty_debug_regs()
5226 get_debugreg(vcpu->arch.dr6, 6); in vmx_sync_dirty_debug_regs()
5227 vcpu->arch.dr7 = vmcs_readl(GUEST_DR7); in vmx_sync_dirty_debug_regs()
5229 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT; in vmx_sync_dirty_debug_regs()
5250 ++vcpu->stat.irq_window_exits; in handle_interrupt_window()
5306 * not cared. So make a short-circuit here by avoiding in handle_apic_access()
5323 /* EOI-induced VM exit is trap-like and thus no need to adjust IP */ in handle_apic_eoi_induced()
5333 /* APIC-write VM exit is trap-like and thus no need to adjust IP */ in handle_apic_write()
5347 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
5348 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
5349 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
5357 vcpu->arch.nmi_injected = false; in handle_task_switch()
5365 if (vmx->idt_vectoring_info & in handle_task_switch()
5391 type == INTR_TYPE_SOFT_INTR ? idt_index : -1, in handle_task_switch()
5409 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5435 vcpu->arch.exit_qualification = exit_qualification; in handle_ept_violation()
5442 * would also use advanced VM-exit information for EPT violations to in handle_ept_violation()
5473 ++vcpu->stat.nmi_window_exits; in handle_nmi_window()
5488 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
5490 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
5498 if (vmx->emulation_required && !vmx->rmode.vm86_active && in handle_invalid_guest_state()
5499 vcpu->arch.exception.pending) { in handle_invalid_guest_state()
5500 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in handle_invalid_guest_state()
5501 vcpu->run->internal.suberror = in handle_invalid_guest_state()
5503 vcpu->run->internal.ndata = 0; in handle_invalid_guest_state()
5507 if (vcpu->arch.halt_request) { in handle_invalid_guest_state()
5508 vcpu->arch.halt_request = 0; in handle_invalid_guest_state()
5527 unsigned int old = vmx->ple_window; in grow_ple_window()
5529 vmx->ple_window = __grow_ple_window(old, ple_window, in grow_ple_window()
5533 if (vmx->ple_window != old) { in grow_ple_window()
5534 vmx->ple_window_dirty = true; in grow_ple_window()
5535 trace_kvm_ple_window_update(vcpu->vcpu_id, in grow_ple_window()
5536 vmx->ple_window, old); in grow_ple_window()
5543 unsigned int old = vmx->ple_window; in shrink_ple_window()
5545 vmx->ple_window = __shrink_ple_window(old, ple_window, in shrink_ple_window()
5549 if (vmx->ple_window != old) { in shrink_ple_window()
5550 vmx->ple_window_dirty = true; in shrink_ple_window()
5551 trace_kvm_ple_window_update(vcpu->vcpu_id, in shrink_ple_window()
5552 vmx->ple_window, old); in shrink_ple_window()
5569 * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
5570 * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
5574 if (!kvm_pause_in_guest(vcpu->kvm)) in handle_pause()
5578 * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting" in handle_pause()
5579 * VM-execution control is ignored if CPL > 0. OTOH, KVM in handle_pause()
5653 trace_kvm_pml_full(vcpu->vcpu_id); in handle_pml_full()
5661 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
5678 if (!vmx->req_immediate_exit && in handle_fastpath_preemption_timer()
5679 !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) { in handle_fastpath_preemption_timer()
5781 if (!(vmx->exit_reason.failed_vmentry)) { in vmx_get_exit_info()
5782 *info2 = vmx->idt_vectoring_info; in vmx_get_exit_info()
5797 if (vmx->pml_pg) { in vmx_destroy_pml_buffer()
5798 __free_page(vmx->pml_pg); in vmx_destroy_pml_buffer()
5799 vmx->pml_pg = NULL; in vmx_destroy_pml_buffer()
5812 if (pml_idx == (PML_ENTITY_NUM - 1)) in vmx_flush_pml_buffer()
5821 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
5826 WARN_ON(gpa & (PAGE_SIZE - 1)); in vmx_flush_pml_buffer()
5831 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); in vmx_flush_pml_buffer()
5856 vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR), in vmx_dump_sel()
5857 vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR), in vmx_dump_sel()
5858 vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR)); in vmx_dump_sel()
5865 vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT)); in vmx_dump_dtsel()
6000 pr_err("APIC-access addr = 0x%016llx ", vmcs_read64(APIC_ACCESS_ADDR)); in dump_vmcs()
6001 pr_cont("virt-APIC addr = 0x%016llx\n", vmcs_read64(VIRTUAL_APIC_PAGE_ADDR)); in dump_vmcs()
6022 union vmx_exit_reason exit_reason = vmx->exit_reason; in vmx_handle_exit()
6023 u32 vectoring_info = vmx->idt_vectoring_info; in vmx_handle_exit()
6037 * We should never reach this point with a pending nested VM-Enter, and in vmx_handle_exit()
6040 * nested VM-Enter with an invalid vmcs12. in vmx_handle_exit()
6042 WARN_ON_ONCE(vmx->nested.nested_run_pending); in vmx_handle_exit()
6045 if (vmx->emulation_required) in vmx_handle_exit()
6054 * address-translation-based dirty tracking (e.g. EPT write in vmx_handle_exit()
6068 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
6069 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
6071 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in vmx_handle_exit()
6075 if (unlikely(vmx->fail)) { in vmx_handle_exit()
6077 vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY; in vmx_handle_exit()
6078 vcpu->run->fail_entry.hardware_entry_failure_reason in vmx_handle_exit()
6080 vcpu->run->fail_entry.cpu = vcpu->arch.last_vmentry_cpu; in vmx_handle_exit()
6088 * The vm-exit can be triggered again after return to guest that in vmx_handle_exit()
6099 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_handle_exit()
6100 vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; in vmx_handle_exit()
6101 vcpu->run->internal.data[0] = vectoring_info; in vmx_handle_exit()
6102 vcpu->run->internal.data[1] = exit_reason.full; in vmx_handle_exit()
6103 vcpu->run->internal.data[2] = vcpu->arch.exit_qualification; in vmx_handle_exit()
6105 vcpu->run->internal.data[ndata++] = in vmx_handle_exit()
6108 vcpu->run->internal.data[ndata++] = vcpu->arch.last_vmentry_cpu; in vmx_handle_exit()
6109 vcpu->run->internal.ndata = ndata; in vmx_handle_exit()
6114 vmx->loaded_vmcs->soft_vnmi_blocked)) { in vmx_handle_exit()
6116 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
6117 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && in vmx_handle_exit()
6118 vcpu->arch.nmi_pending) { in vmx_handle_exit()
6121 * NMI-blocked window if the guest runs with IRQs in vmx_handle_exit()
6125 printk(KERN_WARNING "%s: Breaking out of NMI-blocked " in vmx_handle_exit()
6127 __func__, vcpu->vcpu_id); in vmx_handle_exit()
6128 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
6163 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in vmx_handle_exit()
6164 vcpu->run->internal.suberror = in vmx_handle_exit()
6166 vcpu->run->internal.ndata = 2; in vmx_handle_exit()
6167 vcpu->run->internal.data[0] = exit_reason.full; in vmx_handle_exit()
6168 vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu; in vmx_handle_exit()
6194 * Clear the per-vcpu flush bit, it gets set again in vmx_l1d_flush()
6198 flush_l1d = vcpu->arch.l1tf_flush_l1d; in vmx_l1d_flush()
6199 vcpu->arch.l1tf_flush_l1d = false; in vmx_l1d_flush()
6202 * Clear the per-cpu flush bit, it gets set again from in vmx_l1d_flush()
6212 vcpu->stat.l1d_flush++; in vmx_l1d_flush()
6223 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" in vmx_l1d_flush()
6232 "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t" in vmx_l1d_flush()
6239 : "eax", "ebx", "ecx", "edx"); in vmx_l1d_flush()
6251 tpr_threshold = (irr == -1 || tpr < irr) ? 0 : irr; in update_cr8_intercept()
6253 to_vmx(vcpu)->nested.l1_tpr_threshold = tpr_threshold; in update_cr8_intercept()
6272 vmx->nested.change_vmcs01_virtual_apic_mode = true; in vmx_set_virtual_apic_mode()
6294 * the guest may have inserted a non-APIC mapping into in vmx_set_virtual_apic_mode()
6317 to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true; in vmx_set_apic_access_page_addr()
6325 page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT); in vmx_set_apic_access_page_addr()
6344 if (max_isr == -1) in vmx_hwapic_isr_update()
6361 if (vector == -1) in vmx_set_rvi()
6377 * vmcs12 virtual-interrupt-delivery enabled. in vmx_hwapic_irr_update()
6379 * intercepts external-interrupts and in that case in vmx_hwapic_irr_update()
6393 WARN_ON(!vcpu->arch.apicv_active); in vmx_sync_pir_to_irr()
6394 if (pi_test_on(&vmx->pi_desc)) { in vmx_sync_pir_to_irr()
6395 pi_clear_on(&vmx->pi_desc); in vmx_sync_pir_to_irr()
6402 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); in vmx_sync_pir_to_irr()
6435 pi_clear_on(&vmx->pi_desc); in vmx_apicv_post_state_restore()
6436 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); in vmx_apicv_post_state_restore()
6452 u32 intr_info = vmx_get_intr_info(&vmx->vcpu); in handle_exception_nmi_irqoff()
6456 vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags(); in handle_exception_nmi_irqoff()
6462 handle_interrupt_nmi_irqoff(&vmx->vcpu, nmi_entry); in handle_exception_nmi_irqoff()
6472 "KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info)) in handle_external_interrupt_irqoff()
6476 vcpu->arch.at_instruction_boundary = true; in handle_external_interrupt_irqoff()
6483 if (vmx->emulation_required) in vmx_handle_exit_irqoff()
6486 if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT) in vmx_handle_exit_irqoff()
6488 else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI) in vmx_handle_exit_irqoff()
6518 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
6521 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_recover_nmi_blocking()
6524 exit_intr_info = vmx_get_intr_info(&vmx->vcpu); in vmx_recover_nmi_blocking()
6529 * Re-set bit "block by NMI" before VM entry if vmexit caused by in vmx_recover_nmi_blocking()
6533 * If the VM exit sets the valid bit in the IDT-vectoring in vmx_recover_nmi_blocking()
6542 vmx->loaded_vmcs->nmi_known_unmasked = in vmx_recover_nmi_blocking()
6545 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
6546 vmx->loaded_vmcs->vnmi_blocked_time += in vmx_recover_nmi_blocking()
6548 vmx->loaded_vmcs->entry_time)); in vmx_recover_nmi_blocking()
6562 vcpu->arch.nmi_injected = false; in __vmx_complete_interrupts()
6576 vcpu->arch.nmi_injected = true; in __vmx_complete_interrupts()
6585 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
6595 vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); in __vmx_complete_interrupts()
6607 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
6646 if (vmx->req_immediate_exit) { in vmx_update_hv_timer()
6648 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
6649 } else if (vmx->hv_deadline_tsc != -1) { in vmx_update_hv_timer()
6651 if (vmx->hv_deadline_tsc > tscl) in vmx_update_hv_timer()
6652 /* set_hv_timer ensures the delta fits in 32-bits */ in vmx_update_hv_timer()
6653 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> in vmx_update_hv_timer()
6659 vmx->loaded_vmcs->hv_timer_soft_disabled = false; in vmx_update_hv_timer()
6660 } else if (!vmx->loaded_vmcs->hv_timer_soft_disabled) { in vmx_update_hv_timer()
6661 vmcs_write32(VMX_PREEMPTION_TIMER_VALUE, -1); in vmx_update_hv_timer()
6662 vmx->loaded_vmcs->hv_timer_soft_disabled = true; in vmx_update_hv_timer()
6668 if (unlikely(host_rsp != vmx->loaded_vmcs->host_state.rsp)) { in vmx_update_host_rsp()
6669 vmx->loaded_vmcs->host_state.rsp = host_rsp; in vmx_update_host_rsp()
6683 vmx->spec_ctrl = __rdmsr(MSR_IA32_SPEC_CTRL); in vmx_spec_ctrl_restore_host()
6693 vmx->spec_ctrl != hostval) in vmx_spec_ctrl_restore_host()
6701 switch (to_vmx(vcpu)->exit_reason.basic) { in vmx_exit_handlers_fastpath()
6741 kvm_arch_has_assigned_device(vcpu->kvm)) in vmx_vcpu_enter_exit()
6746 if (vcpu->arch.cr2 != native_read_cr2()) in vmx_vcpu_enter_exit()
6747 native_write_cr2(vcpu->arch.cr2); in vmx_vcpu_enter_exit()
6749 vmx->fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in vmx_vcpu_enter_exit()
6752 vcpu->arch.cr2 = native_read_cr2(); in vmx_vcpu_enter_exit()
6785 vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_vcpu_run()
6786 vmx->loaded_vmcs->entry_time = ktime_get(); in vmx_vcpu_run()
6790 if (vmx->emulation_required) in vmx_vcpu_run()
6793 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
6794 vmx->ple_window_dirty = false; in vmx_vcpu_run()
6795 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
6802 WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync); in vmx_vcpu_run()
6805 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); in vmx_vcpu_run()
6807 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]); in vmx_vcpu_run()
6810 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in vmx_vcpu_run()
6812 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_vcpu_run()
6816 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in vmx_vcpu_run()
6818 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_vcpu_run()
6821 /* When single-stepping over STI and MOV SS, we must clear the in vmx_vcpu_run()
6826 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) in vmx_vcpu_run()
6842 * it's non-zero. Since vmentry is serialising on affected CPUs, there in vmx_vcpu_run()
6846 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); in vmx_vcpu_run()
6853 current_evmcs->hv_clean_fields |= in vmx_vcpu_run()
6857 current_evmcs->hv_vp_id = vcpu->arch.hyperv.vp_index; in vmx_vcpu_run()
6860 if (vmx->host_debugctlmsr) in vmx_vcpu_run()
6861 update_debugctlmsr(vmx->host_debugctlmsr); in vmx_vcpu_run()
6882 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
6883 vmx->idt_vectoring_info = 0; in vmx_vcpu_run()
6885 if (unlikely(vmx->fail)) { in vmx_vcpu_run()
6886 vmx->exit_reason.full = 0xdead; in vmx_vcpu_run()
6890 vmx->exit_reason.full = vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_run()
6891 if (unlikely((u16)vmx->exit_reason.basic == EXIT_REASON_MCE_DURING_VMENTRY)) in vmx_vcpu_run()
6894 trace_kvm_exit(vmx->exit_reason.full, vcpu, KVM_ISA_VMX); in vmx_vcpu_run()
6896 if (unlikely(vmx->exit_reason.failed_vmentry)) in vmx_vcpu_run()
6899 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
6900 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_run()
6916 if (vcpu->arch.apicv_active) in vmx_vcpu_run()
6932 free_vpid(vmx->vpid); in vmx_free_vcpu()
6934 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_free_vcpu()
6945 err = -ENOMEM; in vmx_create_vcpu()
6947 vmx->vpid = allocate_vpid(); in vmx_create_vcpu()
6956 vmx->pml_pg = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); in vmx_create_vcpu()
6957 if (!vmx->pml_pg) in vmx_create_vcpu()
6965 int j = vmx->nr_uret_msrs; in vmx_create_vcpu()
6970 vmx->guest_uret_msrs[j].slot = i; in vmx_create_vcpu()
6971 vmx->guest_uret_msrs[j].data = 0; in vmx_create_vcpu()
6986 vmx->guest_uret_msrs[j].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; in vmx_create_vcpu()
6988 vmx->guest_uret_msrs[j].mask = 0; in vmx_create_vcpu()
6991 vmx->guest_uret_msrs[j].mask = -1ull; in vmx_create_vcpu()
6994 ++vmx->nr_uret_msrs; in vmx_create_vcpu()
6997 err = alloc_loaded_vmcs(&vmx->vmcs01); in vmx_create_vcpu()
7002 bitmap_fill(vmx->shadow_msr_intercept.read, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_create_vcpu()
7003 bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); in vmx_create_vcpu()
7014 if (kvm_cstate_in_guest(vcpu->kvm)) { in vmx_create_vcpu()
7020 vmx->msr_bitmap_mode = 0; in vmx_create_vcpu()
7022 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_create_vcpu()
7025 vcpu->cpu = cpu; in vmx_create_vcpu()
7030 err = alloc_apic_access_page(vcpu->kvm); in vmx_create_vcpu()
7036 err = init_rmode_identity_map(vcpu->kvm); in vmx_create_vcpu()
7042 memcpy(&vmx->nested.msrs, &vmcs_config.nested, sizeof(vmx->nested.msrs)); in vmx_create_vcpu()
7044 memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs)); in vmx_create_vcpu()
7046 vmx->nested.posted_intr_nv = -1; in vmx_create_vcpu()
7047 vmx->nested.current_vmptr = -1ull; in vmx_create_vcpu()
7049 vcpu->arch.microcode_version = 0x100000000ULL; in vmx_create_vcpu()
7050 vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED; in vmx_create_vcpu()
7056 vmx->pi_desc.nv = POSTED_INTR_VECTOR; in vmx_create_vcpu()
7057 vmx->pi_desc.sn = 1; in vmx_create_vcpu()
7059 vmx->ept_pointer = INVALID_PAGE; in vmx_create_vcpu()
7064 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_create_vcpu()
7068 free_vpid(vmx->vpid); in vmx_create_vcpu()
7072 …nt and SMT on, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/ad…
7073 …tion disabled, data leak possible. See CVE-2018-3646 and https://www.kernel.org/doc/html/latest/ad…
7077 spin_lock_init(&to_kvm_vmx(kvm)->ept_pointer_lock); in vmx_vm_init()
7080 kvm->arch.pause_in_guest = true; in vmx_vm_init()
7117 return -EIO; in vmx_check_processor_compat()
7121 return -EIO; in vmx_check_processor_compat()
7127 return -EIO; in vmx_check_processor_compat()
7146 * When there is no need to deal with noncoherent DMA (e.g., no VT-d in vmx_get_mt_mask()
7147 * or VT-d has snoop control), guest CD/MTRR/PAT are all ignored. The in vmx_get_mt_mask()
7160 if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) { in vmx_get_mt_mask()
7168 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) in vmx_get_mt_mask()
7195 u32 new_ctl = vmx->secondary_exec_control; in vmcs_set_secondary_exec_control()
7203 * (indicating "allowed-1") if they are supported in the guest's CPUID.
7210 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; in nested_vmx_cr_fixed1_bits_update()
7211 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; in nested_vmx_cr_fixed1_bits_update()
7214 if (entry && (entry->_reg & (_cpuid_mask))) \ in nested_vmx_cr_fixed1_bits_update()
7215 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ in nested_vmx_cr_fixed1_bits_update()
7229 cr4_fixed1_update(X86_CR4_VMXE, ecx, feature_bit(VMX)); in nested_vmx_cr_fixed1_bits_update()
7230 cr4_fixed1_update(X86_CR4_SMXE, ecx, feature_bit(SMX)); in nested_vmx_cr_fixed1_bits_update()
7231 cr4_fixed1_update(X86_CR4_PCIDE, ecx, feature_bit(PCID)); in nested_vmx_cr_fixed1_bits_update()
7232 cr4_fixed1_update(X86_CR4_OSXSAVE, ecx, feature_bit(XSAVE)); in nested_vmx_cr_fixed1_bits_update()
7238 cr4_fixed1_update(X86_CR4_PKE, ecx, feature_bit(PKU)); in nested_vmx_cr_fixed1_bits_update()
7239 cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); in nested_vmx_cr_fixed1_bits_update()
7240 cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); in nested_vmx_cr_fixed1_bits_update()
7253 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7254 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7256 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7257 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
7272 vmx->pt_desc.caps[CPUID_EAX + i*PT_CPUID_REGS_NUM] = best->eax; in update_intel_pt_cfg()
7273 vmx->pt_desc.caps[CPUID_EBX + i*PT_CPUID_REGS_NUM] = best->ebx; in update_intel_pt_cfg()
7274 vmx->pt_desc.caps[CPUID_ECX + i*PT_CPUID_REGS_NUM] = best->ecx; in update_intel_pt_cfg()
7275 vmx->pt_desc.caps[CPUID_EDX + i*PT_CPUID_REGS_NUM] = best->edx; in update_intel_pt_cfg()
7279 vmx->pt_desc.addr_range = intel_pt_validate_cap(vmx->pt_desc.caps, in update_intel_pt_cfg()
7283 vmx->pt_desc.ctl_bitmask = ~(RTIT_CTL_TRACEEN | RTIT_CTL_OS | in update_intel_pt_cfg()
7287 * If CPUID.(EAX=14H,ECX=0):EBX[0]=1 CR3Filter can be set otherwise in update_intel_pt_cfg()
7290 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_cr3_filtering)) in update_intel_pt_cfg()
7291 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_CR3EN; in update_intel_pt_cfg()
7294 * If CPUID.(EAX=14H,ECX=0):EBX[1]=1 CYCEn, CycThresh and in update_intel_pt_cfg()
7297 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_psb_cyc)) in update_intel_pt_cfg()
7298 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_CYCLEACC | in update_intel_pt_cfg()
7302 * If CPUID.(EAX=14H,ECX=0):EBX[3]=1 MTCEn BranchEn and in update_intel_pt_cfg()
7305 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_mtc)) in update_intel_pt_cfg()
7306 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_MTC_EN | in update_intel_pt_cfg()
7309 /* If CPUID.(EAX=14H,ECX=0):EBX[4]=1 FUPonPTW and PTWEn can be set */ in update_intel_pt_cfg()
7310 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_ptwrite)) in update_intel_pt_cfg()
7311 vmx->pt_desc.ctl_bitmask &= ~(RTIT_CTL_FUP_ON_PTW | in update_intel_pt_cfg()
7314 /* If CPUID.(EAX=14H,ECX=0):EBX[5]=1 PwrEvEn can be set */ in update_intel_pt_cfg()
7315 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_power_event_trace)) in update_intel_pt_cfg()
7316 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_PWR_EVT_EN; in update_intel_pt_cfg()
7318 /* If CPUID.(EAX=14H,ECX=0):ECX[0]=1 ToPA can be set */ in update_intel_pt_cfg()
7319 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_topa_output)) in update_intel_pt_cfg()
7320 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_TOPA; in update_intel_pt_cfg()
7322 /* If CPUID.(EAX=14H,ECX=0):ECX[3]=1 FabircEn can be set */ in update_intel_pt_cfg()
7323 if (intel_pt_validate_cap(vmx->pt_desc.caps, PT_CAP_output_subsys)) in update_intel_pt_cfg()
7324 vmx->pt_desc.ctl_bitmask &= ~RTIT_CTL_FABRIC_EN; in update_intel_pt_cfg()
7327 for (i = 0; i < vmx->pt_desc.addr_range; i++) in update_intel_pt_cfg()
7328 vmx->pt_desc.ctl_bitmask &= ~(0xfULL << (32 + i * 4)); in update_intel_pt_cfg()
7336 vcpu->arch.xsaves_enabled = false; in vmx_vcpu_after_set_cpuid()
7344 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_vcpu_after_set_cpuid()
7348 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_vcpu_after_set_cpuid()
7412 to_vmx(vcpu)->req_immediate_exit = true; in vmx_request_immediate_exit()
7423 if (info->intercept == x86_intercept_in || in vmx_check_intercept_io()
7424 info->intercept == x86_intercept_ins) { in vmx_check_intercept_io()
7425 port = info->src_val; in vmx_check_intercept_io()
7426 size = info->dst_bytes; in vmx_check_intercept_io()
7428 port = info->dst_val; in vmx_check_intercept_io()
7429 size = info->src_bytes; in vmx_check_intercept_io()
7433 * If the 'use IO bitmaps' VM-execution control is 0, IO instruction in vmx_check_intercept_io()
7434 * VM-exits depend on the 'unconditional IO exiting' VM-execution in vmx_check_intercept_io()
7437 * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps. in vmx_check_intercept_io()
7456 switch (info->intercept) { in vmx_check_intercept()
7464 exception->vector = UD_VECTOR; in vmx_check_intercept()
7465 exception->error_code_valid = false; in vmx_check_intercept()
7503 u64 low = a << shift, high = a >> (64 - shift); in u64_shl_div_u64()
7522 struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer; in vmx_set_hv_timer()
7527 delta_tsc = max(guest_deadline_tsc, guest_tscl) - guest_tscl; in vmx_set_hv_timer()
7529 ktimer->timer_advance_ns); in vmx_set_hv_timer()
7532 delta_tsc -= lapic_timer_advance_cycles; in vmx_set_hv_timer()
7537 if (vcpu->arch.tsc_scaling_ratio != kvm_default_tsc_scaling_ratio && in vmx_set_hv_timer()
7540 vcpu->arch.tsc_scaling_ratio, &delta_tsc)) in vmx_set_hv_timer()
7541 return -ERANGE; in vmx_set_hv_timer()
7550 return -ERANGE; in vmx_set_hv_timer()
7552 vmx->hv_deadline_tsc = tscl + delta_tsc; in vmx_set_hv_timer()
7559 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
7565 if (!kvm_pause_in_guest(vcpu->kvm)) in vmx_sched_in()
7616 if (vcpu->arch.mcg_cap & MCG_LMCE_P) in vmx_setup_mce()
7617 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
7620 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
7627 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
7628 return -EBUSY; in vmx_smi_allowed()
7636 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); in vmx_pre_enter_smm()
7637 if (vmx->nested.smm.guest_mode) in vmx_pre_enter_smm()
7638 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_pre_enter_smm()
7640 vmx->nested.smm.vmxon = vmx->nested.vmxon; in vmx_pre_enter_smm()
7641 vmx->nested.vmxon = false; in vmx_pre_enter_smm()
7651 if (vmx->nested.smm.vmxon) { in vmx_pre_leave_smm()
7652 vmx->nested.vmxon = true; in vmx_pre_leave_smm()
7653 vmx->nested.smm.vmxon = false; in vmx_pre_leave_smm()
7656 if (vmx->nested.smm.guest_mode) { in vmx_pre_leave_smm()
7661 vmx->nested.smm.guest_mode = false; in vmx_pre_leave_smm()
7673 return to_vmx(vcpu)->nested.vmxon; in vmx_apic_init_signal_blocked()
7679 struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer; in vmx_migrate_timers()
7847 return -EIO; in hardware_setup()
7939 * and EPT A/D bit features are enabled -- PML depends on them to work. in hardware_setup()
7955 u64 use_timer_freq = 5000ULL * 1000 * 1000; in hardware_setup()
7963 use_timer_freq = (u64)tsc_khz * 1000; in hardware_setup()
7984 return -EINVAL; in hardware_setup()
8042 * Reset everything to support using non-enlightened VMCS in vmx_exit()
8052 vp_ap->nested_control.features.directhypercall = 0; in vmx_exit()
8053 vp_ap->current_nested_vmcs = 0; in vmx_exit()
8054 vp_ap->enlighten_vmentry = 0; in vmx_exit()
8089 pr_info("KVM: vmx: using Hyper-V Enlightened VMCS\n"); in vmx_init()