• Home
  • Raw
  • Download

Lines Matching +full:0 +full:x8ff

21 static bool __read_mostly nested_early_check = 0;
28 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
76 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
77 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
79 for (i = j = 0; i < max_shadow_read_only_fields; i++) { in init_vmcs_shadow_fields()
100 for (i = j = 0; i < max_shadow_read_write_fields; i++) { in init_vmcs_shadow_fields()
233 vmx->nested.hv_evmcs_vmptr = 0; in nested_release_evmcs()
350 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); in nested_ept_inject_page_fault()
382 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
420 *exit_qual = 0; in nested_vmx_check_exception()
424 return 0; in nested_vmx_check_exception()
451 return 0; in nested_vmx_check_io_bitmap_controls()
457 return 0; in nested_vmx_check_io_bitmap_controls()
464 return 0; in nested_vmx_check_msr_bitmap_controls()
469 return 0; in nested_vmx_check_msr_bitmap_controls()
476 return 0; in nested_vmx_check_tpr_shadow_controls()
481 return 0; in nested_vmx_check_tpr_shadow_controls()
497 if (msr <= 0x1fff) { in msr_write_intercepted_l01()
498 return !!test_bit(msr, msr_bitmap + 0x800 / f); in msr_write_intercepted_l01()
499 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in msr_write_intercepted_l01()
500 msr &= 0x1fff; in msr_write_intercepted_l01()
501 return !!test_bit(msr, msr_bitmap + 0xc00 / f); in msr_write_intercepted_l01()
520 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. in nested_vmx_disable_intercept_for_msr()
522 if (msr <= 0x1fff) { in nested_vmx_disable_intercept_for_msr()
524 !test_bit(msr, msr_bitmap_l1 + 0x000 / f)) in nested_vmx_disable_intercept_for_msr()
526 __clear_bit(msr, msr_bitmap_nested + 0x000 / f); in nested_vmx_disable_intercept_for_msr()
529 !test_bit(msr, msr_bitmap_l1 + 0x800 / f)) in nested_vmx_disable_intercept_for_msr()
531 __clear_bit(msr, msr_bitmap_nested + 0x800 / f); in nested_vmx_disable_intercept_for_msr()
533 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { in nested_vmx_disable_intercept_for_msr()
534 msr &= 0x1fff; in nested_vmx_disable_intercept_for_msr()
536 !test_bit(msr, msr_bitmap_l1 + 0x400 / f)) in nested_vmx_disable_intercept_for_msr()
538 __clear_bit(msr, msr_bitmap_nested + 0x400 / f); in nested_vmx_disable_intercept_for_msr()
541 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f)) in nested_vmx_disable_intercept_for_msr()
543 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f); in nested_vmx_disable_intercept_for_msr()
552 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in enable_x2apic_msr_intercepts()
555 msr_bitmap[word] = ~0; in enable_x2apic_msr_intercepts()
556 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; in enable_x2apic_msr_intercepts()
592 * L0 need not intercept reads for MSRs between 0x800 in nested_vmx_prepare_msr_bitmap()
593 * and 0x8ff, it just lets the processor take the value in nested_vmx_prepare_msr_bitmap()
597 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in nested_vmx_prepare_msr_bitmap()
712 return 0; in nested_vmx_check_apic_access_controls()
722 return 0; in nested_vmx_check_apicv_controls()
744 * bits 5:0 of posted_intr_desc_addr should be zero. in nested_vmx_check_apicv_controls()
749 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
750 CC((vmcs12->posted_intr_desc_addr & 0x3f)) || in nested_vmx_check_apicv_controls()
758 return 0; in nested_vmx_check_apicv_controls()
766 if (count == 0) in nested_vmx_check_msr_switch()
767 return 0; in nested_vmx_check_msr_switch()
773 return 0; in nested_vmx_check_msr_switch()
787 return 0; in nested_vmx_check_exit_msr_switch_controls()
798 return 0; in nested_vmx_check_entry_msr_switch_controls()
805 return 0; in nested_vmx_check_pml_controls()
811 return 0; in nested_vmx_check_pml_controls()
820 return 0; in nested_vmx_check_unrestricted_guest_controls()
829 return 0; in nested_vmx_check_mode_based_ept_exec_controls()
836 return 0; in nested_vmx_check_shadow_vmcs_controls()
842 return 0; in nested_vmx_check_shadow_vmcs_controls()
849 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
854 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
856 return 0; in nested_vmx_msr_check_common()
867 return 0; in nested_vmx_load_msr_check()
876 return 0; in nested_vmx_store_msr_check()
890 * return 0 for success, entry index for failure.
903 for (i = 0; i < count; i++) { in nested_vmx_load_msr()
910 "%s cannot read MSR entry (%u, 0x%08llx)\n", in nested_vmx_load_msr()
916 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_load_msr()
922 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_load_msr()
927 return 0; in nested_vmx_load_msr()
948 if (i >= 0) { in nested_vmx_get_vmexit_msr_value()
957 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, in nested_vmx_get_vmexit_msr_value()
971 "%s cannot read MSR entry (%u, 0x%08llx)\n", in read_and_check_msr_entry()
977 "%s check failed (%u, 0x%x, 0x%x)\n", in read_and_check_msr_entry()
991 for (i = 0; i < count; i++) { in nested_vmx_store_msr()
1006 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_store_msr()
1011 return 0; in nested_vmx_store_msr()
1022 for (i = 0; i < count; i++) { in nested_msr_store_list_has_msr()
1043 in_autostore_list = msr_autostore_slot >= 0; in prepare_vmx_msr_autostore_list()
1072 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu); in nested_cr3_valid()
1073 return (val & invalid_mask) == 0; in nested_cr3_valid()
1165 return 0; in nested_vmx_load_cr3()
1268 return 0; in vmx_restore_vmx_basic()
1311 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) in vmx_restore_control_msr()
1314 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1321 return 0; in vmx_restore_control_msr()
1356 return 0; in vmx_restore_vmx_misc()
1370 return 0; in vmx_restore_vmx_ept_vpid_cap()
1397 return 0; in vmx_restore_fixed0_msr()
1403 * Returns 0 on success, non-0 otherwise.
1427 * If userspace wants to emulate VMX_BASIC[55]=0, userspace in vmx_set_vmx_msr()
1455 return 0; in vmx_set_vmx_msr()
1460 return 0; in vmx_set_vmx_msr()
1469 /* Returns 0 on success, non-0 otherwise. */
1544 return 0; in vmx_get_vmx_msr()
1570 for (i = 0; i < max_shadow_read_write_fields; i++) { in copy_shadow_to_vmcs12()
1603 for (q = 0; q < ARRAY_SIZE(fields); q++) { in copy_vmcs12_to_shadow()
1604 for (i = 0; i < max_fields[q]; i++) { in copy_vmcs12_to_shadow()
1829 return 0; in copy_enlightened_to_vmcs12()
1992 return 0; in copy_vmcs12_to_enlightened()
2032 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is in nested_vmx_handle_enlightened_vmptrld()
2041 * CPUID.0x4000000A.EAX[0:15]. in nested_vmx_handle_enlightened_vmptrld()
2065 memset(vmcs12, 0, sizeof(*vmcs12)); in nested_vmx_handle_enlightened_vmptrld()
2135 if (preemption_timeout == 0) { in vmx_start_preemption_timer()
2140 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2181 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2185 vmcs_write64(VM_FUNCTION_CONTROL, 0); in prepare_vmcs02_constant_state()
2394 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); in prepare_vmcs02_early()
2441 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2478 * setting MASK=MATCH=0 and (see below) EB.PF=1. in prepare_vmcs02_rare()
2481 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when in prepare_vmcs02_rare()
2489 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); in prepare_vmcs02_rare()
2490 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); in prepare_vmcs02_rare()
2524 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2646 return 0; in prepare_vmcs02()
2659 return 0; in nested_vmx_check_nmi_controls()
2696 if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f))) in nested_vmx_check_eptp()
2764 return 0; in nested_check_vm_execution_controls()
2781 return 0; in nested_check_vm_exit_controls()
2822 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) in nested_check_vm_entry_controls()
2847 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
2856 return 0; in nested_check_vm_entry_controls()
2870 return 0; in nested_vmx_check_controls()
2881 return 0; in nested_vmx_check_address_space_size()
2930 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
2931 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
2932 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
2945 * IA32_EFER MSR must be 0 in the field for that register. In addition, in nested_vmx_check_host_state()
2956 return 0; in nested_vmx_check_host_state()
2962 int r = 0; in nested_vmx_check_vmcs_link_ptr()
2967 return 0; in nested_vmx_check_vmcs_link_ptr()
2994 return 0; in nested_check_guest_non_reg_state()
3037 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3060 return 0; in nested_vmx_check_guest_state()
3070 return 0; in nested_vmx_check_vmentry_hw()
3073 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3075 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3087 vmcs_writel(GUEST_RFLAGS, 0); in nested_vmx_check_vmentry_hw()
3138 return 0; in nested_vmx_check_vmentry_hw()
3192 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3250 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3268 return 0; in nested_vmx_write_pml_buffer()
3279 return 0; in nested_vmx_write_pml_buffer()
3286 gpa &= ~0xFFFull; in nested_vmx_write_pml_buffer()
3291 return 0; in nested_vmx_write_pml_buffer()
3295 return 0; in nested_vmx_write_pml_buffer()
3309 return 0; in nested_vmx_check_permission()
3313 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
3314 return 0; in nested_vmx_check_permission()
3325 return ((rvi & 0xf0) > (vppr & 0xf0)); in vmx_has_apicv_interrupt()
3616 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3622 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3624 return 0; in nested_vmx_run()
3695 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3730 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3779 if ((u8)max_irr > ((u8)status & 0xff)) { in vmx_complete_nested_posted_interrupt()
3780 status &= ~0xff; in vmx_complete_nested_posted_interrupt()
3877 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); in vmx_check_nested_events()
3878 return 0; in vmx_check_nested_events()
3890 return 0; in vmx_check_nested_events()
3897 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); in vmx_check_nested_events()
3898 return 0; in vmx_check_nested_events()
3907 return 0; in vmx_check_nested_events()
3913 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
3914 return 0; in vmx_check_nested_events()
3931 INTR_INFO_VALID_MASK, 0); in vmx_check_nested_events()
3936 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
3938 return 0; in vmx_check_nested_events()
3946 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
3947 return 0; in vmx_check_nested_events()
3952 return 0; in vmx_check_nested_events()
3961 if (ktime_to_ns(remaining) <= 0) in vmx_get_preemption_timer_value()
3962 return 0; in vmx_get_preemption_timer_value()
4241 vmx_set_interrupt_shadow(vcpu, 0); in load_vmcs12_host_state()
4276 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4277 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4281 vmcs_write64(GUEST_BNDCFGS, 0); in load_vmcs12_host_state()
4294 .base = 0, in load_vmcs12_host_state()
4295 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4308 .base = 0, in load_vmcs12_host_state()
4309 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4330 .limit = 0x67, in load_vmcs12_host_state()
4337 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
4338 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); in load_vmcs12_host_state()
4359 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4435 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
4439 "%s read MSR index failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4444 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
4448 "%s read MSR failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4459 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_restore_host_state()
4466 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", in nested_vmx_restore_host_state()
4616 WARN_ON(irq < 0); in nested_vmx_vmexit()
4651 vmx->fail = 0; in nested_vmx_vmexit()
4657 * On success, returns 0. When the operand is invalid, returns 1 and throws
4679 int index_reg = (vmx_instruction_info >> 18) & 0xf; in get_vmx_mem_address()
4681 int base_reg = (vmx_instruction_info >> 23) & 0xf; in get_vmx_mem_address()
4694 else if (addr_size == 0) in get_vmx_mem_address()
4709 off &= 0xffffffff; in get_vmx_mem_address()
4710 else if (addr_size == 0) /* 16 bit */ in get_vmx_mem_address()
4711 off &= 0xffff; in get_vmx_mem_address()
4726 /* Long mode: #GP(0)/#SS(0) if the memory address is in a in get_vmx_mem_address()
4737 *ret = (s.base + off) & 0xffffffff; in get_vmx_mem_address()
4741 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
4742 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
4743 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
4746 /* #GP(0) if the destination operand is located in a in get_vmx_mem_address()
4749 exn = ((s.type & 0xa) == 0 || (s.type & 8)); in get_vmx_mem_address()
4751 /* #GP(0) if the source operand is located in an in get_vmx_mem_address()
4754 exn = ((s.type & 0xa) == 8); in get_vmx_mem_address()
4756 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in get_vmx_mem_address()
4759 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. in get_vmx_mem_address()
4761 exn = (s.unusable != 0); in get_vmx_mem_address()
4764 * Protected mode: #GP(0)/#SS(0) if the memory operand is in get_vmx_mem_address()
4766 * limit checks for flat segments, i.e. segments with base==0, in get_vmx_mem_address()
4767 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
4769 if (!(s.base == 0 && s.limit == 0xffffffff && in get_vmx_mem_address()
4777 0); in get_vmx_mem_address()
4781 return 0; in get_vmx_mem_address()
4825 return 0; in nested_vmx_get_vmptr()
4860 if (r < 0) in enter_vmx_operation()
4884 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
4888 return 0; in enter_vmx_operation()
4940 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, in handle_vmon()
4947 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's in handle_vmon()
4952 kvm_inject_gp(vcpu, 0); in handle_vmon()
4966 kvm_inject_gp(vcpu, 0); in handle_vmon()
4972 kvm_inject_gp(vcpu, 0); in handle_vmon()
5022 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5047 u32 zero = 0; in handle_vmclear()
5111 gva_t gva = 0; in handle_vmread()
5128 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmread()
5131 if (offset < 0) in handle_vmread()
5146 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value); in handle_vmread()
5152 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmread()
5205 u64 value = 0; in handle_vmwrite()
5220 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf)); in handle_vmwrite()
5231 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmwrite()
5234 if (offset < 0) in handle_vmwrite()
5261 value &= 0x1f0ff; in handle_vmwrite()
5386 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmptrst()
5428 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); in handle_invept()
5457 roots_to_free = 0; in handle_invept()
5462 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in handle_invept()
5508 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); in handle_invvpid()
5602 return 0; in nested_vmx_eptp_switching()
5626 case 0: in handle_vmfunc()
5661 while (size > 0) { in nested_vmx_check_io_bitmaps()
5662 if (port < 0x8000) in nested_vmx_check_io_bitmaps()
5664 else if (port < 0x10000) in nested_vmx_check_io_bitmaps()
5668 bitmap += (port & 0x7fff) / 8; in nested_vmx_check_io_bitmaps()
5726 if (msr_index >= 0xc0000000) { in nested_vmx_exit_handled_msr()
5727 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
5755 case 0: /* mov to cr */ in nested_vmx_exit_handled_cr()
5759 case 0: in nested_vmx_exit_handled_cr()
5800 * lmsw can change bits 1..3 of cr0, and only set bit 0 of in nested_vmx_exit_handled_cr()
5803 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; in nested_vmx_exit_handled_cr()
5804 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
5807 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
5808 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
5809 (val & 0x1)) in nested_vmx_exit_handled_cr()
5828 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in nested_vmx_exit_handled_vmcs_access()
5849 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6065 exit_intr_info = 0; in nested_vmx_reflect_vmexit()
6066 exit_qual = 0; in nested_vmx_reflect_vmexit()
6107 .flags = 0, in vmx_get_nested_state()
6110 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6113 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6116 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6221 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6222 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6235 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6300 return 0; in vmx_set_nested_state()
6315 return 0; in vmx_set_nested_state()
6352 return 0; in vmx_set_nested_state()
6399 return 0; in vmx_set_nested_state()
6402 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6433 * be set to 0, meaning that L1 may turn off any of these bits. The in nested_vmx_setup_ctls_msrs()
6451 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); in nested_vmx_setup_ctls_msrs()
6539 msrs->secondary_ctls_low = 0; in nested_vmx_setup_ctls_msrs()
6623 msrs->misc_high = 0; in nested_vmx_setup_ctls_msrs()
6663 for (i = 0; i < VMX_BITMAP_NR; i++) in nested_vmx_hardware_unsetup()
6673 enable_shadow_vmcs = 0; in nested_vmx_hardware_setup()
6675 for (i = 0; i < VMX_BITMAP_NR; i++) { in nested_vmx_hardware_setup()
6704 return 0; in nested_vmx_hardware_setup()