• Home
  • Raw
  • Download

Lines Matching full:save

206 /* enable/disable Next RIP Save */
344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
400 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
406 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
446 * Save the injection information, even when using next_rip, as the in svm_update_soft_interrupt_rip()
455 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
823 * save it. in msr_write_intercepted()
827 * save it. in msr_write_intercepted()
1003 to_vmcb->save.dbgctl = from_vmcb->save.dbgctl; in svm_copy_lbrs()
1004 to_vmcb->save.br_from = from_vmcb->save.br_from; in svm_copy_lbrs()
1005 to_vmcb->save.br_to = from_vmcb->save.br_to; in svm_copy_lbrs()
1006 to_vmcb->save.last_excp_from = from_vmcb->save.last_excp_from; in svm_copy_lbrs()
1007 to_vmcb->save.last_excp_to = from_vmcb->save.last_excp_to; in svm_copy_lbrs()
1065 bool enable_lbrv = (svm_get_lbr_vmcb(svm)->save.dbgctl & DEBUGCTLMSR_LBR) || in svm_update_lbrv()
1085 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
1087 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
1256 struct vmcb_save_area *save = &vmcb->save; in init_vmcb() local
1323 init_seg(&save->es); in init_vmcb()
1324 init_seg(&save->ss); in init_vmcb()
1325 init_seg(&save->ds); in init_vmcb()
1326 init_seg(&save->fs); in init_vmcb()
1327 init_seg(&save->gs); in init_vmcb()
1329 save->cs.selector = 0xf000; in init_vmcb()
1330 save->cs.base = 0xffff0000; in init_vmcb()
1332 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK | in init_vmcb()
1334 save->cs.limit = 0xffff; in init_vmcb()
1336 save->gdtr.base = 0; in init_vmcb()
1337 save->gdtr.limit = 0xffff; in init_vmcb()
1338 save->idtr.base = 0; in init_vmcb()
1339 save->idtr.limit = 0xffff; in init_vmcb()
1341 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); in init_vmcb()
1342 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); in init_vmcb()
1351 save->g_pat = vcpu->arch.pat; in init_vmcb()
1352 save->cr3 = 0; in init_vmcb()
1542 * Save additional host state that will be restored on VMEXIT (sev-es) in svm_prepare_switch_to_guest()
1543 * or subsequent vmload of host save area. in svm_prepare_switch_to_guest()
1559 * because TSC_AUX is restored on #VMEXIT from the host save area in svm_prepare_switch_to_guest()
1602 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1624 to_svm(vcpu)->vmcb->save.rflags = rflags; in svm_set_rflags()
1669 * Requesting an interrupt window if save.RFLAGS.IF=0 is pointless as in svm_set_vintr()
1710 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_seg() local
1711 struct vmcb_save_area *save01 = &to_svm(vcpu)->vmcb01.ptr->save; in svm_seg()
1714 case VCPU_SREG_CS: return &save->cs; in svm_seg()
1715 case VCPU_SREG_DS: return &save->ds; in svm_seg()
1716 case VCPU_SREG_ES: return &save->es; in svm_seg()
1719 case VCPU_SREG_SS: return &save->ss; in svm_seg()
1798 var->dpl = to_svm(vcpu)->vmcb->save.cpl; in svm_get_segment()
1805 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; in svm_get_cpl() local
1807 return save->cpl; in svm_get_cpl()
1823 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1824 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1831 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1832 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1840 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1841 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1848 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1849 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1861 * VMCB save area now, since the save area will become the initial in sev_post_set_cr3()
1862 * contents of the VMSA, and future VMCB save area updates won't be in sev_post_set_cr3()
1866 svm->vmcb->save.cr3 = cr3; in sev_post_set_cr3()
1887 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1893 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1913 svm->vmcb->save.cr0 = hcr0; in svm_set_cr0()
1954 to_svm(vcpu)->vmcb->save.cr4 = cr4; in svm_set_cr4()
1987 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
2024 if (unlikely(value != vmcb->save.dr6)) { in svm_set_dr6()
2025 vmcb->save.dr6 = value; in svm_set_dr6()
2042 * We cannot reset svm->vmcb->save.dr6 to DR6_ACTIVE_LOW here, in svm_sync_dirty_debug_regs()
2045 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
2046 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
2058 svm->vmcb->save.dr7 = value; in svm_set_dr7()
2097 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; in db_interception()
2111 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
2112 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
2114 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
2128 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
2215 * The VM save area has already been encrypted so it in shutdown_interception()
2287 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_vmsave_interception()
2420 if (svm->vmcb->save.rax & ~PAGE_MASK) in gp_interception()
2497 trace_kvm_invlpga(to_svm(vcpu)->vmcb->save.rip, asid, gva); in invlpga_interception()
2507 trace_kvm_skinit(to_svm(vcpu)->vmcb->save.rip, kvm_rax_read(vcpu)); in skinit_interception()
2863 msr_info->data = svm->vmcb01.ptr->save.star; in svm_get_msr()
2867 msr_info->data = svm->vmcb01.ptr->save.lstar; in svm_get_msr()
2870 msr_info->data = svm->vmcb01.ptr->save.cstar; in svm_get_msr()
2873 msr_info->data = svm->vmcb01.ptr->save.gs.base; in svm_get_msr()
2876 msr_info->data = svm->vmcb01.ptr->save.fs.base; in svm_get_msr()
2879 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; in svm_get_msr()
2882 msr_info->data = svm->vmcb01.ptr->save.sfmask; in svm_get_msr()
2886 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; in svm_get_msr()
2889 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; in svm_get_msr()
2894 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; in svm_get_msr()
2902 msr_info->data = svm_get_lbr_vmcb(svm)->save.dbgctl; in svm_get_msr()
2905 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_from; in svm_get_msr()
2908 msr_info->data = svm_get_lbr_vmcb(svm)->save.br_to; in svm_get_msr()
2911 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_from; in svm_get_msr()
2914 msr_info->data = svm_get_lbr_vmcb(svm)->save.last_excp_to; in svm_get_msr()
2928 msr_info->data = svm->vmcb->save.spec_ctrl; in svm_get_msr()
3046 svm->vmcb01.ptr->save.g_pat = data; in svm_set_msr()
3060 svm->vmcb->save.spec_ctrl = data; in svm_set_msr()
3090 svm->vmcb01.ptr->save.star = data; in svm_set_msr()
3094 svm->vmcb01.ptr->save.lstar = data; in svm_set_msr()
3097 svm->vmcb01.ptr->save.cstar = data; in svm_set_msr()
3100 svm->vmcb01.ptr->save.gs.base = data; in svm_set_msr()
3103 svm->vmcb01.ptr->save.fs.base = data; in svm_set_msr()
3106 svm->vmcb01.ptr->save.kernel_gs_base = data; in svm_set_msr()
3109 svm->vmcb01.ptr->save.sfmask = data; in svm_set_msr()
3113 svm->vmcb01.ptr->save.sysenter_cs = data; in svm_set_msr()
3116 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; in svm_set_msr()
3127 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; in svm_set_msr()
3135 * from the host save area (which has been initialized in in svm_set_msr()
3183 svm_get_lbr_vmcb(svm)->save.dbgctl = data; in svm_set_msr()
3374 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb() local
3375 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; in dump_vmcb()
3421 pr_err("VMCB State Save Area:\n"); in dump_vmcb()
3424 save->es.selector, save->es.attrib, in dump_vmcb()
3425 save->es.limit, save->es.base); in dump_vmcb()
3428 save->cs.selector, save->cs.attrib, in dump_vmcb()
3429 save->cs.limit, save->cs.base); in dump_vmcb()
3432 save->ss.selector, save->ss.attrib, in dump_vmcb()
3433 save->ss.limit, save->ss.base); in dump_vmcb()
3436 save->ds.selector, save->ds.attrib, in dump_vmcb()
3437 save->ds.limit, save->ds.base); in dump_vmcb()
3448 save->gdtr.selector, save->gdtr.attrib, in dump_vmcb()
3449 save->gdtr.limit, save->gdtr.base); in dump_vmcb()
3456 save->idtr.selector, save->idtr.attrib, in dump_vmcb()
3457 save->idtr.limit, save->idtr.base); in dump_vmcb()
3463 save->vmpl, save->cpl, save->efer); in dump_vmcb()
3465 "cr0:", save->cr0, "cr2:", save->cr2); in dump_vmcb()
3467 "cr3:", save->cr3, "cr4:", save->cr4); in dump_vmcb()
3469 "dr6:", save->dr6, "dr7:", save->dr7); in dump_vmcb()
3471 "rip:", save->rip, "rflags:", save->rflags); in dump_vmcb()
3473 "rsp:", save->rsp, "rax:", save->rax); in dump_vmcb()
3485 "gpat:", save->g_pat, "dbgctl:", save->dbgctl); in dump_vmcb()
3487 "br_from:", save->br_from, "br_to:", save->br_to); in dump_vmcb()
3489 "excp_from:", save->last_excp_from, in dump_vmcb()
3490 "excp_to:", save->last_excp_to); in dump_vmcb()
3557 vcpu->arch.cr0 = svm->vmcb->save.cr0; in svm_handle_exit()
3559 vcpu->arch.cr3 = svm->vmcb->save.cr3; in svm_handle_exit()
3829 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3946 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in svm_enable_nmi_window()
4187 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
4188 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
4189 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
4215 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
4248 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
4249 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
4250 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
4251 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
4329 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
4675 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_enter_smm()
4676 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_enter_smm()
4677 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_enter_smm()
4686 * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save in svm_enter_smm()
4688 * format of the area is identical to guest save area offsetted in svm_enter_smm()
4691 * L1 hypervisor to save additional host context (e.g. KVM does in svm_enter_smm()
4698 BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400); in svm_enter_smm()
4701 &svm->vmcb01.ptr->save); in svm_enter_smm()
4744 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4754 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in svm_leave_smm()