/arch/x86/kvm/svm/ |
D | nested.c | 39 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local 40 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_npf_exit() 56 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit() 61 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local 62 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr() 75 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local 77 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3() 82 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local 93 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, in nested_svm_init_mmu_context() 94 svm->vmcb01.ptr->save.efer, in nested_svm_init_mmu_context() [all …]
|
D | svm.c | 299 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_efer() local 314 svm_set_gif(svm, true); in svm_set_efer() 317 clr_exception_intercept(svm, GP_VECTOR); in svm_set_efer() 325 svm_free_nested(svm); in svm_set_efer() 328 int ret = svm_allocate_nested(svm); in svm_set_efer() 340 set_exception_intercept(svm, GP_VECTOR); in svm_set_efer() 344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer() 345 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer() 351 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local 354 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow() [all …]
|
D | svm.h | 313 void recalc_intercepts(struct vcpu_svm *svm); 402 static inline void set_exception_intercept(struct vcpu_svm *svm, u32 bit) in set_exception_intercept() argument 404 struct vmcb *vmcb = svm->vmcb01.ptr; in set_exception_intercept() 409 recalc_intercepts(svm); in set_exception_intercept() 412 static inline void clr_exception_intercept(struct vcpu_svm *svm, u32 bit) in clr_exception_intercept() argument 414 struct vmcb *vmcb = svm->vmcb01.ptr; in clr_exception_intercept() 419 recalc_intercepts(svm); in clr_exception_intercept() 422 static inline void svm_set_intercept(struct vcpu_svm *svm, int bit) in svm_set_intercept() argument 424 struct vmcb *vmcb = svm->vmcb01.ptr; in svm_set_intercept() 428 recalc_intercepts(svm); in svm_set_intercept() [all …]
|
D | avic.c | 85 static void avic_activate_vmcb(struct vcpu_svm *svm) in avic_activate_vmcb() argument 87 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_activate_vmcb() 101 if (x2avic_enabled && apic_x2apic_mode(svm->vcpu.arch.apic)) { in avic_activate_vmcb() 105 svm_set_x2apic_msr_interception(svm, false); in avic_activate_vmcb() 111 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, &svm->vcpu); in avic_activate_vmcb() 116 svm_set_x2apic_msr_interception(svm, true); in avic_activate_vmcb() 120 static void avic_deactivate_vmcb(struct vcpu_svm *svm) in avic_deactivate_vmcb() argument 122 struct vmcb *vmcb = svm->vmcb01.ptr; in avic_deactivate_vmcb() 131 if (is_guest_mode(&svm->vcpu) && in avic_deactivate_vmcb() 132 vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)) in avic_deactivate_vmcb() [all …]
|
D | sev.c | 579 static int sev_es_sync_vmsa(struct vcpu_svm *svm) in sev_es_sync_vmsa() argument 581 struct sev_es_save_area *save = svm->sev_es.vmsa; in sev_es_sync_vmsa() 584 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) in sev_es_sync_vmsa() 593 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); in sev_es_sync_vmsa() 596 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; in sev_es_sync_vmsa() 597 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX]; in sev_es_sync_vmsa() 598 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in sev_es_sync_vmsa() 599 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; in sev_es_sync_vmsa() 600 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP]; in sev_es_sync_vmsa() 601 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP]; in sev_es_sync_vmsa() [all …]
|
D | hyperv.c | 11 struct vcpu_svm *svm = to_svm(vcpu); in svm_hv_inject_synthetic_vmexit_post_tlb_flush() local 13 svm->vmcb->control.exit_code = HV_SVM_EXITCODE_ENL; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 14 svm->vmcb->control.exit_code_hi = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 15 svm->vmcb->control.exit_info_1 = HV_SVM_ENL_EXITCODE_TRAP_AFTER_FLUSH; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 16 svm->vmcb->control.exit_info_2 = 0; in svm_hv_inject_synthetic_vmexit_post_tlb_flush() 17 nested_svm_vmexit(svm); in svm_hv_inject_synthetic_vmexit_post_tlb_flush()
|
D | hyperv.h | 16 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_hv_update_vm_vp_ids() local 17 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_hv_update_vm_vp_ids() 30 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_l2_tlb_flush_enabled() local 31 struct hv_vmcb_enlightenments *hve = &svm->nested.ctl.hv_enlightenments; in nested_svm_l2_tlb_flush_enabled()
|
/arch/x86/kvm/ |
D | Makefile | 33 kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \ 34 svm/sev.o svm/hyperv.o 37 kvm-amd-y += svm/svm_onhyperv.o 45 $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h
|
/arch/arm/mm/ |
D | ioremap.c | 52 struct static_vm *svm; in find_static_vm_paddr() local 55 list_for_each_entry(svm, &static_vmlist, list) { in find_static_vm_paddr() 56 vm = &svm->vm; in find_static_vm_paddr() 66 return svm; in find_static_vm_paddr() 74 struct static_vm *svm; in find_static_vm_vaddr() local 77 list_for_each_entry(svm, &static_vmlist, list) { in find_static_vm_vaddr() 78 vm = &svm->vm; in find_static_vm_vaddr() 85 return svm; in find_static_vm_vaddr() 91 void __init add_static_vm_early(struct static_vm *svm) in add_static_vm_early() argument 97 vm = &svm->vm; in add_static_vm_early() [all …]
|
D | mmu.c | 1025 struct static_vm *svm; in iotable_init() local 1030 svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); in iotable_init() 1031 if (!svm) in iotable_init() 1033 __func__, sizeof(*svm) * nr, __alignof__(*svm)); in iotable_init() 1038 vm = &svm->vm; in iotable_init() 1045 add_static_vm_early(svm++); in iotable_init() 1053 struct static_vm *svm; in vm_reserve_area_early() local 1055 svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); in vm_reserve_area_early() 1056 if (!svm) in vm_reserve_area_early() 1058 __func__, sizeof(*svm), __alignof__(*svm)); in vm_reserve_area_early() [all …]
|
D | mm.h | 75 extern __init void add_static_vm_early(struct static_vm *svm);
|
/arch/powerpc/platforms/pseries/ |
D | Makefile | 28 obj-$(CONFIG_PPC_SVM) += svm.o
|
/arch/x86/include/uapi/asm/ |
D | kvm.h | 499 struct kvm_svm_nested_state_hdr svm; member 512 __DECLARE_FLEX_ARRAY(struct kvm_svm_nested_state_data, svm);
|
/arch/powerpc/kernel/ |
D | sysfs.c | 750 static DEVICE_ATTR(svm, 0444, show_svm, NULL);
|