Lines Matching full:nested
12 #include "nested.h"
195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) in nested_vmx_fail()
205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); in nested_vmx_abort()
222 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
229 if (!vmx->nested.hv_evmcs) in nested_release_evmcs()
232 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs()
233 vmx->nested.hv_evmcs_vmptr = 0; in nested_release_evmcs()
234 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
291 vmx->nested.vmxon = false; in free_nested()
292 vmx->nested.smm.vmxon = false; in free_nested()
293 free_vpid(vmx->nested.vpid02); in free_nested()
294 vmx->nested.posted_intr_nv = -1; in free_nested()
295 vmx->nested.current_vmptr = -1ull; in free_nested()
302 kfree(vmx->nested.cached_vmcs12); in free_nested()
303 vmx->nested.cached_vmcs12 = NULL; in free_nested()
304 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
305 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
307 if (vmx->nested.apic_access_page) { in free_nested()
308 kvm_release_page_clean(vmx->nested.apic_access_page); in free_nested()
309 vmx->nested.apic_access_page = NULL; in free_nested()
311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in free_nested()
312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in free_nested()
313 vmx->nested.pi_desc = NULL; in free_nested()
319 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
341 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
343 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
360 to_vmx(vcpu)->nested.msrs.ept_caps & in nested_ept_init_mmu_context()
392 * checks whether in a nested guest, we need to inject them to L1 or L2.
436 !to_vmx(vcpu)->nested.nested_run_pending) { in vmx_inject_page_fault_nested()
569 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
570 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map; in nested_vmx_prepare_msr_bitmap()
658 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false); in nested_vmx_prepare_msr_bitmap()
696 * In nested virtualization, check if L1 has set
882 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
883 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
889 * Load guest's/host's msr at nested entry/exit.
1077 * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
1119 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1179 * while L2 entries are tagged with vmx->nested.vpid02).
1186 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); in nested_has_guest_tlb_tag()
1213 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid in nested_vmx_transition_tlb_flush()
1214 * redundant flushes further down the nested pipeline. in nested_vmx_transition_tlb_flush()
1220 * all nested vCPUs. in nested_vmx_transition_tlb_flush()
1227 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1228 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1248 u64 vmx_basic = vmcs_config.nested.basic; in vmx_restore_vmx_basic()
1267 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1306 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1318 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1332 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, in vmx_restore_vmx_misc()
1333 vmcs_config.nested.misc_high); in vmx_restore_vmx_misc()
1338 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1353 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1354 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1361 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, in vmx_restore_vmx_ept_vpid_cap()
1362 vmcs_config.nested.vpid_caps); in vmx_restore_vmx_ept_vpid_cap()
1368 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1369 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1387 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); in vmx_restore_fixed0_msr()
1396 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1413 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1454 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1457 if (data & ~vmcs_config.nested.vmfunc_controls) in vmx_set_vmx_msr()
1459 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1618 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1619 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_enlightened_to_vmcs12()
1834 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1835 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_vmcs12_to_enlightened()
1996 * This is an equivalent of the nested hypervisor executing the vmptrld
2006 if (likely(!vmx->nested.enlightened_vmcs_enabled)) in nested_vmx_handle_enlightened_vmptrld()
2012 if (unlikely(!vmx->nested.hv_evmcs || in nested_vmx_handle_enlightened_vmptrld()
2013 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2014 if (!vmx->nested.hv_evmcs) in nested_vmx_handle_enlightened_vmptrld()
2015 vmx->nested.current_vmptr = -1ull; in nested_vmx_handle_enlightened_vmptrld()
2020 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2023 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2047 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2048 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2053 vmx->nested.dirty_vmcs12 = true; in nested_vmx_handle_enlightened_vmptrld()
2054 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2076 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2086 if (vmx->nested.hv_evmcs) { in nested_sync_vmcs12_to_shadow()
2089 vmx->nested.hv_evmcs->hv_clean_fields |= in nested_sync_vmcs12_to_shadow()
2095 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2101 container_of(timer, struct vcpu_vmx, nested.preemption_timer); in vmx_preemption_timer_fn()
2103 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2118 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2119 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2121 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2123 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2136 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2146 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2153 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2170 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2172 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2191 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2196 * and then back to vmcs01 on nested vmexit. But since we flush in prepare_vmcs02_constant_state()
2228 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2229 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2241 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) in prepare_vmcs02_early()
2252 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2254 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2268 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2382 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2400 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02_rare()
2463 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2517 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2531 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02()
2534 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { in prepare_vmcs02()
2536 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2543 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2549 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
2551 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2553 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); in prepare_vmcs02()
2564 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2617 * on nested VM-Exit, which can occur without actually running L2 and in prepare_vmcs02()
2670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2674 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2688 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2701 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2717 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2718 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2720 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2721 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2726 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2727 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2754 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2776 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2777 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2793 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2794 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
2867 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled) in nested_vmx_check_controls()
3043 if (to_vmx(vcpu)->nested.nested_run_pending && in nested_vmx_check_guest_state()
3150 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { in nested_get_evmcs_page()
3177 if (vmx->nested.apic_access_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
3178 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3179 vmx->nested.apic_access_page = NULL; in nested_get_vmcs12_pages()
3183 vmx->nested.apic_access_page = page; in nested_get_vmcs12_pages()
3184 hpa = page_to_phys(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3198 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3224 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3227 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3270 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3274 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is in nested_vmx_write_pml_buffer()
3282 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3307 if (!to_vmx(vcpu)->nested.vmxon) { in nested_vmx_check_permission()
3362 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3364 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3366 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3368 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3372 * nested early checks are disabled. In the event of a "late" VM-Fail, in nested_vmx_enter_non_root_mode()
3378 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested in nested_vmx_enter_non_root_mode()
3381 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as in nested_vmx_enter_non_root_mode()
3389 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3464 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3496 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) in nested_vmx_enter_non_root_mode()
3497 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3502 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3503 * for running an L2 nested guest.
3524 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)) in nested_vmx_run()
3538 if (vmx->nested.hv_evmcs) { in nested_vmx_run()
3547 * The nested entry process starts with enforcing various prerequisites in nested_vmx_run()
3575 * the nested entry. in nested_vmx_run()
3577 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3578 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3585 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { in nested_vmx_run()
3586 vmx->nested.pi_pending = true; in nested_vmx_run()
3588 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); in nested_vmx_run()
3591 /* Hide L1D cache contents from the nested guest. */ in nested_vmx_run()
3616 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3622 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3632 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3763 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3766 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3767 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3770 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
3772 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3776 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
3801 * hardware and avoid inducing failure on nested VM-Entry if L1 in nested_vmx_inject_exception_vmexit()
3852 to_vmx(vcpu)->nested.preemption_timer_expired; in nested_vmx_preemption_timer_pending()
3860 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); in vmx_check_nested_events()
3861 bool mtf_pending = vmx->nested.mtf_pending; in vmx_check_nested_events()
3869 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
3958 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
4060 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4069 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4076 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4082 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4096 if (vmx->nested.hv_evmcs) in sync_vmcs02_to_vmcs12()
4099 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; in sync_vmcs02_to_vmcs12()
4121 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4126 * In some cases (usually, nested EPT), L2 is allowed to change its in sync_vmcs02_to_vmcs12()
4160 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4216 * A part of what we need to when the nested L2 guest exits and we want to
4219 * This function is to be called not only on normal nested exit, but also on
4220 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4386 * nested VMENTER (not worth adding a variable in nested_vmx). in nested_vmx_restore_host_state()
4427 * of VMFail), leaving the nested VM's MSRs in the software model in nested_vmx_restore_host_state()
4430 * MSR that was (prematurely) loaded from the nested VMEntry load in nested_vmx_restore_host_state()
4480 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4491 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
4518 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in nested_vmx_vmexit()
4580 if (vmx->nested.l1_tpr_threshold != -1) in nested_vmx_vmexit()
4581 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in nested_vmx_vmexit()
4586 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
4587 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
4592 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
4593 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_vmx_vmexit()
4594 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
4596 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in nested_vmx_vmexit()
4597 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in nested_vmx_vmexit()
4598 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
4600 if (vmx->nested.reload_vmcs01_apic_access_page) { in nested_vmx_vmexit()
4601 vmx->nested.reload_vmcs01_apic_access_page = false; in nested_vmx_vmexit()
4606 (enable_shadow_vmcs || vmx->nested.hv_evmcs)) in nested_vmx_vmexit()
4607 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_vmexit()
4793 vmx->nested.msrs.entry_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4795 vmx->nested.msrs.exit_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4798 vmx->nested.msrs.entry_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4800 vmx->nested.msrs.exit_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4859 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4863 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4864 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
4867 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4868 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
4874 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
4876 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
4878 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
4880 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
4881 vmx->nested.vmxon = true; in enter_vmx_operation()
4891 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
4894 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
4897 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4956 if (vmx->nested.vmxon) in handle_vmon()
4984 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; in handle_vmon()
4994 vmx->nested.vmxon_ptr = vmptr; in handle_vmon()
5006 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
5017 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
5021 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
5022 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5026 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
5061 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5072 * vmx->nested.hv_evmcs but this shouldn't be a problem. in handle_vmclear()
5074 if (likely(!vmx->nested.enlightened_vmcs_enabled || in handle_vmclear()
5076 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5122 if (vmx->nested.current_vmptr == -1ull || in handle_vmread()
5214 if (vmx->nested.current_vmptr == -1ull || in handle_vmwrite()
5286 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5294 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5299 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5301 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5320 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5324 if (vmx->nested.hv_evmcs) in handle_vmptrld()
5327 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5358 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); in handle_vmptrld()
5372 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; in handle_vmptrst()
5380 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs)) in handle_vmptrst()
5417 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5419 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5430 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5446 * Nested EPT roots are always held through guest_mmu, in handle_invept()
5497 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5499 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5510 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
5612 * VMFUNC is only supported for nested guests, but we always enable the in handle_vmfunc()
5613 * secondary control for simplicity; for non-nested mode, fake that we in handle_vmfunc()
5638 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode in handle_vmfunc()
5891 * L0 always deals with the EPT violation. If nested EPT is in nested_vmx_l0_wants_exit()
5892 * used, and the nested mmu code discovers that the address is in nested_vmx_l0_wants_exit()
6055 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6058 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM in nested_vmx_reflect_vmexit()
6125 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6126 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6127 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6132 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6141 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6144 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6150 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6153 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6157 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6161 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6187 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6188 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6216 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6221 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6294 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6302 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6337 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6338 vmx->nested.vmxon = false; in vmx_set_nested_state()
6341 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6354 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6357 vmx->nested.mtf_pending = in vmx_set_nested_state()
6382 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6384 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6385 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6394 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6402 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6416 * returned for the various VMX controls MSRs when nested VMX is enabled.
6418 * valid during nested entry from L1 to L2.
6560 /* nested EPT: emulate EPT also to L1 */ in nested_vmx_setup_ctls_msrs()