• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  x86 SVM helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
23 
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
27 
28 /* Secure Virtual Machine helpers */
29 
30 #if defined(CONFIG_USER_ONLY)
31 
helper_vmrun(CPUX86State * env,int aflag,int next_eip_addend)32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 {
34 }
helper_vmmcall(CPUX86State * env)35 void helper_vmmcall(CPUX86State *env)
36 {
37 }
helper_vmload(CPUX86State * env,int aflag)38 void helper_vmload(CPUX86State *env, int aflag)
39 {
40 }
helper_vmsave(CPUX86State * env,int aflag)41 void helper_vmsave(CPUX86State *env, int aflag)
42 {
43 }
helper_stgi(CPUX86State * env)44 void helper_stgi(CPUX86State *env)
45 {
46 }
helper_clgi(CPUX86State * env)47 void helper_clgi(CPUX86State *env)
48 {
49 }
helper_skinit(CPUX86State * env)50 void helper_skinit(CPUX86State *env)
51 {
52 }
helper_invlpga(CPUX86State * env,int aflag)53 void helper_invlpga(CPUX86State *env, int aflag)
54 {
55 }
helper_vmexit(CPUX86State * env,uint32_t exit_code,uint64_t exit_info_1)56 void helper_vmexit(CPUX86State *env,
57                    uint32_t exit_code, uint64_t exit_info_1)
58 {
59 }
helper_svm_check_intercept_param(CPUX86State * env,uint32_t type,uint64_t param)60 void helper_svm_check_intercept_param(CPUX86State *env,
61                                       uint32_t type, uint64_t param)
62 {
63 }
64 
svm_check_intercept(CPUX86State * env,uint32_t type)65 void svm_check_intercept(CPUX86State *env, uint32_t type)
66 {
67 }
68 
helper_svm_check_io(CPUX86State * env,uint32_t port,uint32_t param,uint32_t next_eip_addend)69 void helper_svm_check_io(CPUX86State* env,
70                          uint32_t port, uint32_t param,
71                          uint32_t next_eip_addend)
72 {
73 }
74 #else
75 
svm_save_seg(hwaddr addr,const SegmentCache * sc)76 static inline void svm_save_seg(hwaddr addr,
77                                 const SegmentCache *sc)
78 {
79     stw_phys(addr + offsetof(struct vmcb_seg, selector),
80              sc->selector);
81     stq_phys(addr + offsetof(struct vmcb_seg, base),
82              sc->base);
83     stl_phys(addr + offsetof(struct vmcb_seg, limit),
84              sc->limit);
85     stw_phys(addr + offsetof(struct vmcb_seg, attrib),
86              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
87 }
88 
svm_load_seg(hwaddr addr,SegmentCache * sc)89 static inline void svm_load_seg(hwaddr addr, SegmentCache *sc)
90 {
91     unsigned int flags;
92 
93     sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
94     sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
95     sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
96     flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
97     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
98 }
99 
svm_load_seg_cache(hwaddr addr,CPUX86State * env,int seg_reg)100 static inline void svm_load_seg_cache(hwaddr addr,
101                                       CPUX86State *env, int seg_reg)
102 {
103     SegmentCache sc1, *sc = &sc1;
104     svm_load_seg(addr, sc);
105     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
106                            sc->base, sc->limit, sc->flags);
107 }
108 
helper_vmrun(CPUX86State * env,int aflag,int next_eip_addend)109 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
110 {
111     target_ulong addr;
112     uint32_t event_inj;
113     uint32_t int_ctl;
114 
115     helper_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
116 
117     if (aflag == 2)
118         addr = EAX;
119     else
120         addr = (uint32_t)EAX;
121 
122     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
123 
124     env->vm_vmcb = addr;
125 
126     /* save the current CPU state in the hsave page */
127     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
128     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
129 
130     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
131     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
132 
133     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
134     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
135     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
136     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
137     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
138     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
139 
140     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
141     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env));
142 
143     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
144                   &env->segs[R_ES]);
145     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
146                  &env->segs[R_CS]);
147     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
148                  &env->segs[R_SS]);
149     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
150                  &env->segs[R_DS]);
151 
152     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
153              EIP + next_eip_addend);
154     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
155     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
156 
157     /* load the interception bitmaps so we do not need to access the
158        vmcb in svm mode */
159     env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
160     env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
161     env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
162     env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
163     env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
164     env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
165 
166     /* enable intercepts */
167     env->hflags |= HF_SVMI_MASK;
168 
169     env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
170 
171     env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
172     env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
173 
174     env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
175     env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
176 
177     /* clear exit_info_2 so we behave like the real hardware */
178     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
179 
180     cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
181     cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
182     cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
183     env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
184     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
185     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
186     if (int_ctl & V_INTR_MASKING_MASK) {
187         env->v_tpr = int_ctl & V_TPR_MASK;
188         env->hflags2 |= HF2_VINTR_MASK;
189         if (env->eflags & IF_MASK)
190             env->hflags2 |= HF2_HIF_MASK;
191     }
192 
193     cpu_load_efer(env,
194                   ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
195     env->eflags = 0;
196     cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
197                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
198     CC_OP = CC_OP_EFLAGS;
199 
200     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
201                        env, R_ES);
202     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
203                        env, R_CS);
204     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
205                        env, R_SS);
206     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
207                        env, R_DS);
208 
209     EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
210     env->eip = EIP;
211     ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
212     EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
213     env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
214     env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
215     cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
216 
217     /* FIXME: guest state consistency checks */
218 
219     switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
220         case TLB_CONTROL_DO_NOTHING:
221             break;
222         case TLB_CONTROL_FLUSH_ALL_ASID:
223             /* FIXME: this is not 100% correct but should work for now */
224             tlb_flush(env, 1);
225         break;
226     }
227 
228     env->hflags2 |= HF2_GIF_MASK;
229 
230     if (int_ctl & V_IRQ_MASK) {
231         ENV_GET_CPU(env)->interrupt_request |= CPU_INTERRUPT_VIRQ;
232     }
233 
234     /* maybe we need to inject an event */
235     event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
236     if (event_inj & SVM_EVTINJ_VALID) {
237         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
238         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
239         uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
240 
241         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
242         /* FIXME: need to implement valid_err */
243         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
244         case SVM_EVTINJ_TYPE_INTR:
245                 env->exception_index = vector;
246                 env->error_code = event_inj_err;
247                 env->exception_is_int = 0;
248                 env->exception_next_eip = -1;
249                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
250                 /* XXX: is it always correct ? */
251                 do_interrupt_x86_hardirq(env, vector, 1);
252                 break;
253         case SVM_EVTINJ_TYPE_NMI:
254                 env->exception_index = EXCP02_NMI;
255                 env->error_code = event_inj_err;
256                 env->exception_is_int = 0;
257                 env->exception_next_eip = EIP;
258                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
259                 cpu_loop_exit(env);
260                 break;
261         case SVM_EVTINJ_TYPE_EXEPT:
262                 env->exception_index = vector;
263                 env->error_code = event_inj_err;
264                 env->exception_is_int = 0;
265                 env->exception_next_eip = -1;
266                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
267                 cpu_loop_exit(env);
268                 break;
269         case SVM_EVTINJ_TYPE_SOFT:
270                 env->exception_index = vector;
271                 env->error_code = event_inj_err;
272                 env->exception_is_int = 1;
273                 env->exception_next_eip = EIP;
274                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
275                 cpu_loop_exit(env);
276                 break;
277         }
278         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
279     }
280 }
281 
helper_vmmcall(CPUX86State * env)282 void helper_vmmcall(CPUX86State *env)
283 {
284     helper_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
285     raise_exception(env, EXCP06_ILLOP);
286 }
287 
helper_vmload(CPUX86State * env,int aflag)288 void helper_vmload(CPUX86State *env, int aflag)
289 {
290     target_ulong addr;
291     helper_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
292 
293     if (aflag == 2)
294         addr = EAX;
295     else
296         addr = (uint32_t)EAX;
297 
298     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
299                 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
300                 env->segs[R_FS].base);
301 
302     svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
303                        env, R_FS);
304     svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
305                        env, R_GS);
306     svm_load_seg(addr + offsetof(struct vmcb, save.tr),
307                  &env->tr);
308     svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
309                  &env->ldt);
310 
311 #ifdef TARGET_X86_64
312     env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
313     env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
314     env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
315     env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
316 #endif
317     env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
318     env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
319     env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
320     env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
321 }
322 
helper_vmsave(CPUX86State * env,int aflag)323 void helper_vmsave(CPUX86State *env, int aflag)
324 {
325     target_ulong addr;
326     helper_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
327 
328     if (aflag == 2)
329         addr = EAX;
330     else
331         addr = (uint32_t)EAX;
332 
333     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
334                 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
335                 env->segs[R_FS].base);
336 
337     svm_save_seg(addr + offsetof(struct vmcb, save.fs),
338                  &env->segs[R_FS]);
339     svm_save_seg(addr + offsetof(struct vmcb, save.gs),
340                  &env->segs[R_GS]);
341     svm_save_seg(addr + offsetof(struct vmcb, save.tr),
342                  &env->tr);
343     svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
344                  &env->ldt);
345 
346 #ifdef TARGET_X86_64
347     stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
348     stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
349     stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
350     stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
351 #endif
352     stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
353     stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
354     stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
355     stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
356 }
357 
helper_stgi(CPUX86State * env)358 void helper_stgi(CPUX86State *env)
359 {
360     helper_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
361     env->hflags2 |= HF2_GIF_MASK;
362 }
363 
helper_clgi(CPUX86State * env)364 void helper_clgi(CPUX86State *env)
365 {
366     helper_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
367     env->hflags2 &= ~HF2_GIF_MASK;
368 }
369 
helper_skinit(CPUX86State * env)370 void helper_skinit(CPUX86State *env)
371 {
372     helper_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
373     /* XXX: not implemented */
374     raise_exception(env, EXCP06_ILLOP);
375 }
376 
helper_invlpga(CPUX86State * env,int aflag)377 void helper_invlpga(CPUX86State *env, int aflag)
378 {
379     target_ulong addr;
380     helper_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
381 
382     if (aflag == 2)
383         addr = EAX;
384     else
385         addr = (uint32_t)EAX;
386 
387     /* XXX: could use the ASID to see if it is needed to do the
388        flush */
389     tlb_flush_page(env, addr);
390 }
391 
helper_svm_check_intercept_param(CPUX86State * env,uint32_t type,uint64_t param)392 void helper_svm_check_intercept_param(CPUX86State *env,
393                                       uint32_t type, uint64_t param)
394 {
395     if (likely(!(env->hflags & HF_SVMI_MASK)))
396         return;
397     switch(type) {
398     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
399         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
400             helper_vmexit(env, type, param);
401         }
402         break;
403     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
404         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
405             helper_vmexit(env, type, param);
406         }
407         break;
408     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
409         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
410             helper_vmexit(env, type, param);
411         }
412         break;
413     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
414         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
415             helper_vmexit(env, type, param);
416         }
417         break;
418     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
419         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
420             helper_vmexit(env, type, param);
421         }
422         break;
423     case SVM_EXIT_MSR:
424         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
425             /* FIXME: this should be read in at vmrun (faster this way?) */
426             uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
427             uint32_t t0, t1;
428             switch((uint32_t)ECX) {
429             case 0 ... 0x1fff:
430                 t0 = (ECX * 2) % 8;
431                 t1 = ECX / 8;
432                 break;
433             case 0xc0000000 ... 0xc0001fff:
434                 t0 = (8192 + ECX - 0xc0000000) * 2;
435                 t1 = (t0 / 8);
436                 t0 %= 8;
437                 break;
438             case 0xc0010000 ... 0xc0011fff:
439                 t0 = (16384 + ECX - 0xc0010000) * 2;
440                 t1 = (t0 / 8);
441                 t0 %= 8;
442                 break;
443             default:
444                 helper_vmexit(env, type, param);
445                 t0 = 0;
446                 t1 = 0;
447                 break;
448             }
449             if (ldub_phys(addr + t1) & ((1 << param) << t0))
450                 helper_vmexit(env, type, param);
451         }
452         break;
453     default:
454         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
455             helper_vmexit(env, type, param);
456         }
457         break;
458     }
459 }
460 
svm_check_intercept(CPUArchState * env,uint32_t type)461 void svm_check_intercept(CPUArchState *env, uint32_t type)
462 {
463     helper_svm_check_intercept_param(env, type, 0);
464 }
465 
helper_svm_check_io(CPUX86State * env,uint32_t port,uint32_t param,uint32_t next_eip_addend)466 void helper_svm_check_io(CPUX86State *env,
467                          uint32_t port, uint32_t param,
468                          uint32_t next_eip_addend)
469 {
470     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
471         /* FIXME: this should be read in at vmrun (faster this way?) */
472         uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
473         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
474         if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
475             /* next EIP */
476             stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
477                      env->eip + next_eip_addend);
478             helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
479         }
480     }
481 }
482 
483 /* Note: currently only 32 bits of exit_code are used */
helper_vmexit(CPUX86State * env,uint32_t exit_code,uint64_t exit_info_1)484 void helper_vmexit(CPUX86State *env,
485                    uint32_t exit_code, uint64_t exit_info_1)
486 {
487     uint32_t int_ctl;
488 
489     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
490                 exit_code, exit_info_1,
491                 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
492                 EIP);
493 
494     if(env->hflags & HF_INHIBIT_IRQ_MASK) {
495         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
496         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
497     } else {
498         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
499     }
500 
501     /* Save the VM state in the vmcb */
502     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
503                  &env->segs[R_ES]);
504     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
505                  &env->segs[R_CS]);
506     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
507                  &env->segs[R_SS]);
508     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
509                  &env->segs[R_DS]);
510 
511     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
512     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
513 
514     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
515     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
516 
517     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
518     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
519     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
520     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
521     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
522 
523     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
524     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
525     int_ctl |= env->v_tpr & V_TPR_MASK;
526     if (ENV_GET_CPU(env)->interrupt_request & CPU_INTERRUPT_VIRQ)
527         int_ctl |= V_IRQ_MASK;
528     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
529 
530     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), cpu_compute_eflags(env));
531     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
532     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
533     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
534     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
535     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
536     stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
537 
538     /* Reload the host state from vm_hsave */
539     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
540     env->hflags &= ~HF_SVMI_MASK;
541     env->intercept = 0;
542     env->intercept_exceptions = 0;
543     ENV_GET_CPU(env)->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
544     env->tsc_offset = 0;
545 
546     env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
547     env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
548 
549     env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
550     env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
551 
552     cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
553     cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
554     cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
555     /* we need to set the efer after the crs so the hidden flags get
556        set properly */
557     cpu_load_efer(env,
558                   ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
559     env->eflags = 0;
560     cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
561                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
562     CC_OP = CC_OP_EFLAGS;
563 
564     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
565                        env, R_ES);
566     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
567                        env, R_CS);
568     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
569                        env, R_SS);
570     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
571                        env, R_DS);
572 
573     EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
574     ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
575     EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
576 
577     env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
578     env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
579 
580     /* other setups */
581     cpu_x86_set_cpl(env, 0);
582     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
583     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
584 
585     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
586              ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
587     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
588              ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
589 
590     env->hflags2 &= ~HF2_GIF_MASK;
591     /* FIXME: Resets the current ASID register to zero (host ASID). */
592 
593     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
594 
595     /* Clears the TSC_OFFSET inside the processor. */
596 
597     /* If the host is in PAE mode, the processor reloads the host's PDPEs
598        from the page table indicated the host's CR3. If the PDPEs contain
599        illegal state, the processor causes a shutdown. */
600 
601     /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
602     env->cr[0] |= CR0_PE_MASK;
603     env->eflags &= ~VM_MASK;
604 
605     /* Disables all breakpoints in the host DR7 register. */
606 
607     /* Checks the reloaded host state for consistency. */
608 
609     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
610        host's code segment or non-canonical (in the case of long mode), a
611        #GP fault is delivered inside the host.) */
612 
613     /* remove any pending exception */
614     env->exception_index = -1;
615     env->error_code = 0;
616     env->old_exception = -1;
617 
618     cpu_loop_exit(env);
619 }
620 
621 #endif
622