/arch/x86/realmode/ |
D | init.c | 100 u64 efer; in setup_real_mode() local 148 rdmsrl(MSR_EFER, efer); in setup_real_mode() 149 trampoline_header->efer = efer & ~EFER_LMA; in setup_real_mode()
|
/arch/x86/include/asm/ |
D | suspend_64.h | 44 unsigned long efer; member
|
D | realmode.h | 52 u64 efer;
|
D | svm.h | 318 u64 efer; member 374 u64 efer; member
|
D | kvm_host.h | 735 u64 efer; member 1592 int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 1958 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
|
D | hyperv-tlfs.h | 770 u64 efer; member
|
/arch/x86/kvm/ |
D | smm.c | 82 CHECK_SMRAM64_OFFSET(efer, 0xFED0); in check_smram_offsets() 256 smram->efer = vcpu->arch.efer; in enter_smm_save_state_64() 539 if (kvm_set_msr(vcpu, MSR_EFER, smstate->efer & ~EFER_LMA)) in rsm_load_state_64() 621 unsigned long cr4, efer; in emulator_leave_smm() local 629 efer = 0; in emulator_leave_smm() 630 kvm_set_msr(vcpu, MSR_EFER, efer); in emulator_leave_smm()
|
D | smm.h | 104 u64 efer; member
|
D | emulate.c | 782 u64 efer; in emulator_recalc_and_set_mode() local 787 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in emulator_recalc_and_set_mode() 791 if (efer & EFER_LMA) in emulator_recalc_and_set_mode() 799 if (efer & EFER_LMA) in emulator_recalc_and_set_mode() 808 if (efer & EFER_LMA) { in emulator_recalc_and_set_mode() 1511 u64 efer = 0; in get_descriptor_ptr() local 1513 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in get_descriptor_ptr() 1514 if (!(efer & EFER_LMA)) in get_descriptor_ptr() 1685 u64 efer = 0; in __load_segment_descriptor() local 1687 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); in __load_segment_descriptor() [all …]
|
D | mmu.h | 109 unsigned long cr4, u64 efer, gpa_t nested_cr3);
|
D | x86.c | 984 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && in kvm_set_cr0() 995 if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) && in kvm_set_cr0() 1725 static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in __kvm_valid_efer() argument 1727 if (efer & EFER_AUTOIBRS && !guest_cpuid_has(vcpu, X86_FEATURE_AUTOIBRS)) in __kvm_valid_efer() 1730 if (efer & EFER_FFXSR && !guest_cpuid_has(vcpu, X86_FEATURE_FXSR_OPT)) in __kvm_valid_efer() 1733 if (efer & EFER_SVME && !guest_cpuid_has(vcpu, X86_FEATURE_SVM)) in __kvm_valid_efer() 1736 if (efer & (EFER_LME | EFER_LMA) && in __kvm_valid_efer() 1740 if (efer & EFER_NX && !guest_cpuid_has(vcpu, X86_FEATURE_NX)) in __kvm_valid_efer() 1746 bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) in kvm_valid_efer() argument 1748 if (efer & efer_reserved_bits) in kvm_valid_efer() [all …]
|
D | x86.h | 146 return !!(vcpu->arch.efer & EFER_LMA); in is_long_mode()
|
/arch/x86/kvm/svm/ |
D | nested.c | 94 svm->vmcb01.ptr->save.efer, in nested_svm_init_mmu_context() 281 if (CC(!(save->efer & EFER_SVME))) in __nested_vmcb_check_save() 296 if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { in __nested_vmcb_check_save() 307 if (CC(!kvm_valid_efer(vcpu, save->efer))) in __nested_vmcb_check_save() 387 to->efer = from->efer; in __nested_copy_vmcb_save_to_cache() 568 svm_set_efer(vcpu, svm->nested.save.efer); in nested_vmcb02_prepare_save() 893 vmcb01->save.efer = vcpu->arch.efer; in nested_svm_vmrun() 939 to_save->efer = from_save->efer; in svm_copy_vmrun_state() 1001 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit() 1110 svm_set_efer(vcpu, vmcb01->save.efer); in nested_svm_vmexit() [all …]
|
D | svm.c | 297 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) in svm_set_efer() argument 300 u64 old_efer = vcpu->arch.efer; in svm_set_efer() 301 vcpu->arch.efer = efer; in svm_set_efer() 305 efer |= EFER_NX; in svm_set_efer() 307 if (!(efer & EFER_LMA)) in svm_set_efer() 308 efer &= ~EFER_LME; in svm_set_efer() 311 if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) { in svm_set_efer() 312 if (!(efer & EFER_SVME)) { in svm_set_efer() 331 vcpu->arch.efer = old_efer; in svm_set_efer() 344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer() [all …]
|
D | svm.h | 118 u64 efer; member 548 int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
/arch/x86/power/ |
D | cpu.c | 117 rdmsrl(MSR_EFER, ctxt->efer); in __save_processor_state() 210 wrmsrl(MSR_EFER, ctxt->efer); in __restore_processor_state()
|
/arch/x86/include/uapi/asm/ |
D | kvm.h | 150 __u64 efer; member 161 __u64 efer; member
|
/arch/x86/hyperv/ |
D | hv_vtl.c | 120 input->vp_context.efer = __rdmsr(MSR_EFER); in hv_vtl_bringup_vcpu()
|
D | ivm.c | 321 vmsa->efer = native_read_msr(MSR_EFER); in hv_snp_boot_ap()
|
/arch/x86/kvm/vmx/ |
D | vmx.c | 1109 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer() 1134 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer() 1879 (vmx->vcpu.arch.efer & EFER_SCE); in vmx_setup_uret_msrs() 3122 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) in vmx_set_efer() argument 3130 vcpu->arch.efer = efer; in vmx_set_efer() 3132 if (efer & EFER_LMA) in vmx_set_efer() 3137 if (KVM_BUG_ON(efer & EFER_LMA, vcpu->kvm)) in vmx_set_efer() 3161 vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA); in enter_lmode() 3166 vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA); in exit_lmode() 3311 if (vcpu->arch.efer & EFER_LME) { in vmx_set_cr0() [all …]
|
D | nested.c | 2159 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer() 2161 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer() 2608 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02() 2610 vmx_set_efer(vcpu, vcpu->arch.efer); in prepare_vmcs02() 2903 !!(vcpu->arch.efer & EFER_LMA))) in nested_vmx_check_address_space_size() 4428 vmcs12->guest_ia32_efer = vcpu->arch.efer; in sync_vmcs02_to_vmcs12() 4505 vcpu->arch.efer = vmcs12->host_ia32_efer; in load_vmcs12_host_state() 4507 vcpu->arch.efer |= (EFER_LMA | EFER_LME); in load_vmcs12_host_state() 4509 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME); in load_vmcs12_host_state() 4510 vmx_set_efer(vcpu, vcpu->arch.efer); in load_vmcs12_host_state()
|
D | vmx.h | 391 int vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
|
/arch/x86/kvm/mmu/ |
D | mmu.c | 189 const u64 efer; member 214 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, nx, EFER_NX); 215 BUILD_MMU_ROLE_REGS_ACCESSOR(efer, lma, EFER_LMA); 234 BUILD_MMU_ROLE_ACCESSOR(base, efer, nx); 235 BUILD_MMU_ROLE_ACCESSOR(ext, efer, lma); 252 .efer = vcpu->arch.efer, in vcpu_to_role_regs() 5274 unsigned long cr4, u64 efer, gpa_t nested_cr3) in kvm_init_shadow_npt_mmu() argument 5280 .efer = efer, in kvm_init_shadow_npt_mmu()
|
/arch/x86/kernel/ |
D | sev.c | 1047 vmsa->efer = EFER_SVME; in wakeup_cpu_via_vmgexit()
|