• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  i386 helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19  */
20 #define CPU_NO_GLOBAL_REGS
21 #include "exec.h"
22 #include "exec-all.h"
23 #include "host-utils.h"
24 
25 //#define DEBUG_PCALL
26 
27 
28 #ifdef DEBUG_PCALL
29 #  define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
30 #  define LOG_PCALL_STATE(env) \
31           log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 #else
33 #  define LOG_PCALL(...) do { } while (0)
34 #  define LOG_PCALL_STATE(env) do { } while (0)
35 #endif
36 
37 
38 #if 0
39 #define raise_exception_err(a, b)\
40 do {\
41     qemu_log("raise_exception line=%d\n", __LINE__);\
42     (raise_exception_err)(a, b);\
43 } while (0)
44 #endif
45 
46 static const uint8_t parity_table[256] = {
47     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
58     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
67     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
68     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
69     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
71     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
72     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
73     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
74     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
75     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
76     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
77     CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
78     0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
79 };
80 
81 /* modulo 17 table */
82 static const uint8_t rclw_table[32] = {
83     0, 1, 2, 3, 4, 5, 6, 7,
84     8, 9,10,11,12,13,14,15,
85    16, 0, 1, 2, 3, 4, 5, 6,
86     7, 8, 9,10,11,12,13,14,
87 };
88 
89 /* modulo 9 table */
90 static const uint8_t rclb_table[32] = {
91     0, 1, 2, 3, 4, 5, 6, 7,
92     8, 0, 1, 2, 3, 4, 5, 6,
93     7, 8, 0, 1, 2, 3, 4, 5,
94     6, 7, 8, 0, 1, 2, 3, 4,
95 };
96 
97 static const CPU86_LDouble f15rk[7] =
98 {
99     0.00000000000000000000L,
100     1.00000000000000000000L,
101     3.14159265358979323851L,  /*pi*/
102     0.30102999566398119523L,  /*lg2*/
103     0.69314718055994530943L,  /*ln2*/
104     1.44269504088896340739L,  /*l2e*/
105     3.32192809488736234781L,  /*l2t*/
106 };
107 
108 /* broken thread support */
109 
110 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
111 
helper_lock(void)112 void helper_lock(void)
113 {
114     spin_lock(&global_cpu_lock);
115 }
116 
helper_unlock(void)117 void helper_unlock(void)
118 {
119     spin_unlock(&global_cpu_lock);
120 }
121 
helper_write_eflags(target_ulong t0,uint32_t update_mask)122 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
123 {
124     load_eflags(t0, update_mask);
125 }
126 
helper_read_eflags(void)127 target_ulong helper_read_eflags(void)
128 {
129     uint32_t eflags;
130     eflags = helper_cc_compute_all(CC_OP);
131     eflags |= (DF & DF_MASK);
132     eflags |= env->eflags & ~(VM_MASK | RF_MASK);
133     return eflags;
134 }
135 
136 /* return non zero if error */
load_segment(uint32_t * e1_ptr,uint32_t * e2_ptr,int selector)137 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
138                                int selector)
139 {
140     SegmentCache *dt;
141     int index;
142     target_ulong ptr;
143 
144     if (selector & 0x4)
145         dt = &env->ldt;
146     else
147         dt = &env->gdt;
148     index = selector & ~7;
149     if ((index + 7) > dt->limit)
150         return -1;
151     ptr = dt->base + index;
152     *e1_ptr = ldl_kernel(ptr);
153     *e2_ptr = ldl_kernel(ptr + 4);
154     return 0;
155 }
156 
get_seg_limit(uint32_t e1,uint32_t e2)157 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
158 {
159     unsigned int limit;
160     limit = (e1 & 0xffff) | (e2 & 0x000f0000);
161     if (e2 & DESC_G_MASK)
162         limit = (limit << 12) | 0xfff;
163     return limit;
164 }
165 
get_seg_base(uint32_t e1,uint32_t e2)166 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
167 {
168     return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
169 }
170 
load_seg_cache_raw_dt(SegmentCache * sc,uint32_t e1,uint32_t e2)171 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
172 {
173     sc->base = get_seg_base(e1, e2);
174     sc->limit = get_seg_limit(e1, e2);
175     sc->flags = e2;
176 }
177 
178 /* init the segment cache in vm86 mode. */
load_seg_vm(int seg,int selector)179 static inline void load_seg_vm(int seg, int selector)
180 {
181     selector &= 0xffff;
182     cpu_x86_load_seg_cache(env, seg, selector,
183                            (selector << 4), 0xffff, 0);
184 }
185 
get_ss_esp_from_tss(uint32_t * ss_ptr,uint32_t * esp_ptr,int dpl)186 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
187                                        uint32_t *esp_ptr, int dpl)
188 {
189     int type, index, shift;
190 
191 #if 0
192     {
193         int i;
194         printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
195         for(i=0;i<env->tr.limit;i++) {
196             printf("%02x ", env->tr.base[i]);
197             if ((i & 7) == 7) printf("\n");
198         }
199         printf("\n");
200     }
201 #endif
202 
203     if (!(env->tr.flags & DESC_P_MASK))
204         cpu_abort(env, "invalid tss");
205     type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
206     if ((type & 7) != 1)
207         cpu_abort(env, "invalid tss type");
208     shift = type >> 3;
209     index = (dpl * 4 + 2) << shift;
210     if (index + (4 << shift) - 1 > env->tr.limit)
211         raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
212     if (shift == 0) {
213         *esp_ptr = lduw_kernel(env->tr.base + index);
214         *ss_ptr = lduw_kernel(env->tr.base + index + 2);
215     } else {
216         *esp_ptr = ldl_kernel(env->tr.base + index);
217         *ss_ptr = lduw_kernel(env->tr.base + index + 4);
218     }
219 }
220 
221 /* XXX: merge with load_seg() */
tss_load_seg(int seg_reg,int selector)222 static void tss_load_seg(int seg_reg, int selector)
223 {
224     uint32_t e1, e2;
225     int rpl, dpl, cpl;
226 
227     if ((selector & 0xfffc) != 0) {
228         if (load_segment(&e1, &e2, selector) != 0)
229             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
230         if (!(e2 & DESC_S_MASK))
231             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
232         rpl = selector & 3;
233         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
234         cpl = env->hflags & HF_CPL_MASK;
235         if (seg_reg == R_CS) {
236             if (!(e2 & DESC_CS_MASK))
237                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
238             /* XXX: is it correct ? */
239             if (dpl != rpl)
240                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
241             if ((e2 & DESC_C_MASK) && dpl > rpl)
242                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
243         } else if (seg_reg == R_SS) {
244             /* SS must be writable data */
245             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
246                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
247             if (dpl != cpl || dpl != rpl)
248                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
249         } else {
250             /* not readable code */
251             if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
252                 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
253             /* if data or non conforming code, checks the rights */
254             if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
255                 if (dpl < cpl || dpl < rpl)
256                     raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
257             }
258         }
259         if (!(e2 & DESC_P_MASK))
260             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
261         cpu_x86_load_seg_cache(env, seg_reg, selector,
262                        get_seg_base(e1, e2),
263                        get_seg_limit(e1, e2),
264                        e2);
265     } else {
266         if (seg_reg == R_SS || seg_reg == R_CS)
267             raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
268     }
269 }
270 
271 #define SWITCH_TSS_JMP  0
272 #define SWITCH_TSS_IRET 1
273 #define SWITCH_TSS_CALL 2
274 
275 /* XXX: restore CPU state in registers (PowerPC case) */
switch_tss(int tss_selector,uint32_t e1,uint32_t e2,int source,uint32_t next_eip)276 static void switch_tss(int tss_selector,
277                        uint32_t e1, uint32_t e2, int source,
278                        uint32_t next_eip)
279 {
280     int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
281     target_ulong tss_base;
282     uint32_t new_regs[8], new_segs[6];
283     uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
284     uint32_t old_eflags, eflags_mask;
285     SegmentCache *dt;
286     int index;
287     target_ulong ptr;
288 
289     type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
290     LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
291 
292     /* if task gate, we read the TSS segment and we load it */
293     if (type == 5) {
294         if (!(e2 & DESC_P_MASK))
295             raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
296         tss_selector = e1 >> 16;
297         if (tss_selector & 4)
298             raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
299         if (load_segment(&e1, &e2, tss_selector) != 0)
300             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
301         if (e2 & DESC_S_MASK)
302             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
303         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
304         if ((type & 7) != 1)
305             raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
306     }
307 
308     if (!(e2 & DESC_P_MASK))
309         raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
310 
311     if (type & 8)
312         tss_limit_max = 103;
313     else
314         tss_limit_max = 43;
315     tss_limit = get_seg_limit(e1, e2);
316     tss_base = get_seg_base(e1, e2);
317     if ((tss_selector & 4) != 0 ||
318         tss_limit < tss_limit_max)
319         raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
320     old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
321     if (old_type & 8)
322         old_tss_limit_max = 103;
323     else
324         old_tss_limit_max = 43;
325 
326     /* read all the registers from the new TSS */
327     if (type & 8) {
328         /* 32 bit */
329         new_cr3 = ldl_kernel(tss_base + 0x1c);
330         new_eip = ldl_kernel(tss_base + 0x20);
331         new_eflags = ldl_kernel(tss_base + 0x24);
332         for(i = 0; i < 8; i++)
333             new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
334         for(i = 0; i < 6; i++)
335             new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
336         new_ldt = lduw_kernel(tss_base + 0x60);
337         new_trap = ldl_kernel(tss_base + 0x64);
338     } else {
339         /* 16 bit */
340         new_cr3 = 0;
341         new_eip = lduw_kernel(tss_base + 0x0e);
342         new_eflags = lduw_kernel(tss_base + 0x10);
343         for(i = 0; i < 8; i++)
344             new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
345         for(i = 0; i < 4; i++)
346             new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
347         new_ldt = lduw_kernel(tss_base + 0x2a);
348         new_segs[R_FS] = 0;
349         new_segs[R_GS] = 0;
350         new_trap = 0;
351     }
352 
353     /* NOTE: we must avoid memory exceptions during the task switch,
354        so we make dummy accesses before */
355     /* XXX: it can still fail in some cases, so a bigger hack is
356        necessary to valid the TLB after having done the accesses */
357 
358     v1 = ldub_kernel(env->tr.base);
359     v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
360     stb_kernel(env->tr.base, v1);
361     stb_kernel(env->tr.base + old_tss_limit_max, v2);
362 
363     /* clear busy bit (it is restartable) */
364     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
365         target_ulong ptr;
366         uint32_t e2;
367         ptr = env->gdt.base + (env->tr.selector & ~7);
368         e2 = ldl_kernel(ptr + 4);
369         e2 &= ~DESC_TSS_BUSY_MASK;
370         stl_kernel(ptr + 4, e2);
371     }
372     old_eflags = compute_eflags();
373     if (source == SWITCH_TSS_IRET)
374         old_eflags &= ~NT_MASK;
375 
376     /* save the current state in the old TSS */
377     if (type & 8) {
378         /* 32 bit */
379         stl_kernel(env->tr.base + 0x20, next_eip);
380         stl_kernel(env->tr.base + 0x24, old_eflags);
381         stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
382         stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
383         stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
384         stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
385         stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
386         stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
387         stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
388         stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
389         for(i = 0; i < 6; i++)
390             stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
391     } else {
392         /* 16 bit */
393         stw_kernel(env->tr.base + 0x0e, next_eip);
394         stw_kernel(env->tr.base + 0x10, old_eflags);
395         stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
396         stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
397         stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
398         stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
399         stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
400         stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
401         stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
402         stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
403         for(i = 0; i < 4; i++)
404             stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
405     }
406 
407     /* now if an exception occurs, it will occurs in the next task
408        context */
409 
410     if (source == SWITCH_TSS_CALL) {
411         stw_kernel(tss_base, env->tr.selector);
412         new_eflags |= NT_MASK;
413     }
414 
415     /* set busy bit */
416     if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
417         target_ulong ptr;
418         uint32_t e2;
419         ptr = env->gdt.base + (tss_selector & ~7);
420         e2 = ldl_kernel(ptr + 4);
421         e2 |= DESC_TSS_BUSY_MASK;
422         stl_kernel(ptr + 4, e2);
423     }
424 
425     /* set the new CPU state */
426     /* from this point, any exception which occurs can give problems */
427     env->cr[0] |= CR0_TS_MASK;
428     env->hflags |= HF_TS_MASK;
429     env->tr.selector = tss_selector;
430     env->tr.base = tss_base;
431     env->tr.limit = tss_limit;
432     env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
433 
434     if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
435         cpu_x86_update_cr3(env, new_cr3);
436     }
437 
438     /* load all registers without an exception, then reload them with
439        possible exception */
440     env->eip = new_eip;
441     eflags_mask = TF_MASK | AC_MASK | ID_MASK |
442         IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
443     if (!(type & 8))
444         eflags_mask &= 0xffff;
445     load_eflags(new_eflags, eflags_mask);
446     /* XXX: what to do in 16 bit case ? */
447     EAX = new_regs[0];
448     ECX = new_regs[1];
449     EDX = new_regs[2];
450     EBX = new_regs[3];
451     ESP = new_regs[4];
452     EBP = new_regs[5];
453     ESI = new_regs[6];
454     EDI = new_regs[7];
455     if (new_eflags & VM_MASK) {
456         for(i = 0; i < 6; i++)
457             load_seg_vm(i, new_segs[i]);
458         /* in vm86, CPL is always 3 */
459         cpu_x86_set_cpl(env, 3);
460     } else {
461         /* CPL is set the RPL of CS */
462         cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
463         /* first just selectors as the rest may trigger exceptions */
464         for(i = 0; i < 6; i++)
465             cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
466     }
467 
468     env->ldt.selector = new_ldt & ~4;
469     env->ldt.base = 0;
470     env->ldt.limit = 0;
471     env->ldt.flags = 0;
472 
473     /* load the LDT */
474     if (new_ldt & 4)
475         raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
476 
477     if ((new_ldt & 0xfffc) != 0) {
478         dt = &env->gdt;
479         index = new_ldt & ~7;
480         if ((index + 7) > dt->limit)
481             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
482         ptr = dt->base + index;
483         e1 = ldl_kernel(ptr);
484         e2 = ldl_kernel(ptr + 4);
485         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
486             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
487         if (!(e2 & DESC_P_MASK))
488             raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
489         load_seg_cache_raw_dt(&env->ldt, e1, e2);
490     }
491 
492     /* load the segments */
493     if (!(new_eflags & VM_MASK)) {
494         tss_load_seg(R_CS, new_segs[R_CS]);
495         tss_load_seg(R_SS, new_segs[R_SS]);
496         tss_load_seg(R_ES, new_segs[R_ES]);
497         tss_load_seg(R_DS, new_segs[R_DS]);
498         tss_load_seg(R_FS, new_segs[R_FS]);
499         tss_load_seg(R_GS, new_segs[R_GS]);
500     }
501 
502     /* check that EIP is in the CS segment limits */
503     if (new_eip > env->segs[R_CS].limit) {
504         /* XXX: different exception if CALL ? */
505         raise_exception_err(EXCP0D_GPF, 0);
506     }
507 
508 #ifndef CONFIG_USER_ONLY
509     /* reset local breakpoints */
510     if (env->dr[7] & 0x55) {
511         for (i = 0; i < 4; i++) {
512             if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
513                 hw_breakpoint_remove(env, i);
514         }
515         env->dr[7] &= ~0x55;
516     }
517 #endif
518 }
519 
520 /* check if Port I/O is allowed in TSS */
check_io(int addr,int size)521 static inline void check_io(int addr, int size)
522 {
523     int io_offset, val, mask;
524 
525     /* TSS must be a valid 32 bit one */
526     if (!(env->tr.flags & DESC_P_MASK) ||
527         ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
528         env->tr.limit < 103)
529         goto fail;
530     io_offset = lduw_kernel(env->tr.base + 0x66);
531     io_offset += (addr >> 3);
532     /* Note: the check needs two bytes */
533     if ((io_offset + 1) > env->tr.limit)
534         goto fail;
535     val = lduw_kernel(env->tr.base + io_offset);
536     val >>= (addr & 7);
537     mask = (1 << size) - 1;
538     /* all bits must be zero to allow the I/O */
539     if ((val & mask) != 0) {
540     fail:
541         raise_exception_err(EXCP0D_GPF, 0);
542     }
543 }
544 
helper_check_iob(uint32_t t0)545 void helper_check_iob(uint32_t t0)
546 {
547     check_io(t0, 1);
548 }
549 
helper_check_iow(uint32_t t0)550 void helper_check_iow(uint32_t t0)
551 {
552     check_io(t0, 2);
553 }
554 
helper_check_iol(uint32_t t0)555 void helper_check_iol(uint32_t t0)
556 {
557     check_io(t0, 4);
558 }
559 
helper_outb(uint32_t port,uint32_t data)560 void helper_outb(uint32_t port, uint32_t data)
561 {
562     cpu_outb(port, data & 0xff);
563 }
564 
helper_inb(uint32_t port)565 target_ulong helper_inb(uint32_t port)
566 {
567     return cpu_inb(port);
568 }
569 
helper_outw(uint32_t port,uint32_t data)570 void helper_outw(uint32_t port, uint32_t data)
571 {
572     cpu_outw(port, data & 0xffff);
573 }
574 
helper_inw(uint32_t port)575 target_ulong helper_inw(uint32_t port)
576 {
577     return cpu_inw(port);
578 }
579 
helper_outl(uint32_t port,uint32_t data)580 void helper_outl(uint32_t port, uint32_t data)
581 {
582     cpu_outl(port, data);
583 }
584 
helper_inl(uint32_t port)585 target_ulong helper_inl(uint32_t port)
586 {
587     return cpu_inl(port);
588 }
589 
get_sp_mask(unsigned int e2)590 static inline unsigned int get_sp_mask(unsigned int e2)
591 {
592     if (e2 & DESC_B_MASK)
593         return 0xffffffff;
594     else
595         return 0xffff;
596 }
597 
exeption_has_error_code(int intno)598 static int exeption_has_error_code(int intno)
599 {
600         switch(intno) {
601         case 8:
602         case 10:
603         case 11:
604         case 12:
605         case 13:
606         case 14:
607         case 17:
608             return 1;
609         }
610 	return 0;
611 }
612 
613 #ifdef TARGET_X86_64
614 #define SET_ESP(val, sp_mask)\
615 do {\
616     if ((sp_mask) == 0xffff)\
617         ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
618     else if ((sp_mask) == 0xffffffffLL)\
619         ESP = (uint32_t)(val);\
620     else\
621         ESP = (val);\
622 } while (0)
623 #else
624 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
625 #endif
626 
627 /* in 64-bit machines, this can overflow. So this segment addition macro
628  * can be used to trim the value to 32-bit whenever needed */
629 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
630 
631 /* XXX: add a is_user flag to have proper security support */
632 #define PUSHW(ssp, sp, sp_mask, val)\
633 {\
634     sp -= 2;\
635     stw_kernel((ssp) + (sp & (sp_mask)), (val));\
636 }
637 
638 #define PUSHL(ssp, sp, sp_mask, val)\
639 {\
640     sp -= 4;\
641     stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
642 }
643 
644 #define POPW(ssp, sp, sp_mask, val)\
645 {\
646     val = lduw_kernel((ssp) + (sp & (sp_mask)));\
647     sp += 2;\
648 }
649 
650 #define POPL(ssp, sp, sp_mask, val)\
651 {\
652     val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
653     sp += 4;\
654 }
655 
656 /* protected mode interrupt */
do_interrupt_protected(int intno,int is_int,int error_code,unsigned int next_eip,int is_hw)657 static void do_interrupt_protected(int intno, int is_int, int error_code,
658                                    unsigned int next_eip, int is_hw)
659 {
660     SegmentCache *dt;
661     target_ulong ptr, ssp;
662     int type, dpl, selector, ss_dpl, cpl;
663     int has_error_code, new_stack, shift;
664     uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
665     uint32_t old_eip, sp_mask;
666 
667     has_error_code = 0;
668     if (!is_int && !is_hw)
669         has_error_code = exeption_has_error_code(intno);
670     if (is_int)
671         old_eip = next_eip;
672     else
673         old_eip = env->eip;
674 
675     dt = &env->idt;
676     if (intno * 8 + 7 > dt->limit)
677         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
678     ptr = dt->base + intno * 8;
679     e1 = ldl_kernel(ptr);
680     e2 = ldl_kernel(ptr + 4);
681     /* check gate type */
682     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
683     switch(type) {
684     case 5: /* task gate */
685         /* must do that check here to return the correct error code */
686         if (!(e2 & DESC_P_MASK))
687             raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
688         switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
689         if (has_error_code) {
690             int type;
691             uint32_t mask;
692             /* push the error code */
693             type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
694             shift = type >> 3;
695             if (env->segs[R_SS].flags & DESC_B_MASK)
696                 mask = 0xffffffff;
697             else
698                 mask = 0xffff;
699             esp = (ESP - (2 << shift)) & mask;
700             ssp = env->segs[R_SS].base + esp;
701             if (shift)
702                 stl_kernel(ssp, error_code);
703             else
704                 stw_kernel(ssp, error_code);
705             SET_ESP(esp, mask);
706         }
707         return;
708     case 6: /* 286 interrupt gate */
709     case 7: /* 286 trap gate */
710     case 14: /* 386 interrupt gate */
711     case 15: /* 386 trap gate */
712         break;
713     default:
714         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
715         break;
716     }
717     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
718     cpl = env->hflags & HF_CPL_MASK;
719     /* check privilege if software int */
720     if (is_int && dpl < cpl)
721         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
722     /* check valid bit */
723     if (!(e2 & DESC_P_MASK))
724         raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
725     selector = e1 >> 16;
726     offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
727     if ((selector & 0xfffc) == 0)
728         raise_exception_err(EXCP0D_GPF, 0);
729 
730     if (load_segment(&e1, &e2, selector) != 0)
731         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
732     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
733         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
734     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
735     if (dpl > cpl)
736         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
737     if (!(e2 & DESC_P_MASK))
738         raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
739     if (!(e2 & DESC_C_MASK) && dpl < cpl) {
740         /* to inner privilege */
741         get_ss_esp_from_tss(&ss, &esp, dpl);
742         if ((ss & 0xfffc) == 0)
743             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
744         if ((ss & 3) != dpl)
745             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
746         if (load_segment(&ss_e1, &ss_e2, ss) != 0)
747             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
748         ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
749         if (ss_dpl != dpl)
750             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
751         if (!(ss_e2 & DESC_S_MASK) ||
752             (ss_e2 & DESC_CS_MASK) ||
753             !(ss_e2 & DESC_W_MASK))
754             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
755         if (!(ss_e2 & DESC_P_MASK))
756             raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
757         new_stack = 1;
758         sp_mask = get_sp_mask(ss_e2);
759         ssp = get_seg_base(ss_e1, ss_e2);
760     } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
761         /* to same privilege */
762         if (env->eflags & VM_MASK)
763             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
764         new_stack = 0;
765         sp_mask = get_sp_mask(env->segs[R_SS].flags);
766         ssp = env->segs[R_SS].base;
767         esp = ESP;
768         dpl = cpl;
769     } else {
770         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
771         new_stack = 0; /* avoid warning */
772         sp_mask = 0; /* avoid warning */
773         ssp = 0; /* avoid warning */
774         esp = 0; /* avoid warning */
775     }
776 
777     shift = type >> 3;
778 
779 #if 0
780     /* XXX: check that enough room is available */
781     push_size = 6 + (new_stack << 2) + (has_error_code << 1);
782     if (env->eflags & VM_MASK)
783         push_size += 8;
784     push_size <<= shift;
785 #endif
786     if (shift == 1) {
787         if (new_stack) {
788             if (env->eflags & VM_MASK) {
789                 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
790                 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
791                 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
792                 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
793             }
794             PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
795             PUSHL(ssp, esp, sp_mask, ESP);
796         }
797         PUSHL(ssp, esp, sp_mask, compute_eflags());
798         PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
799         PUSHL(ssp, esp, sp_mask, old_eip);
800         if (has_error_code) {
801             PUSHL(ssp, esp, sp_mask, error_code);
802         }
803     } else {
804         if (new_stack) {
805             if (env->eflags & VM_MASK) {
806                 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
807                 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
808                 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
809                 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
810             }
811             PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
812             PUSHW(ssp, esp, sp_mask, ESP);
813         }
814         PUSHW(ssp, esp, sp_mask, compute_eflags());
815         PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
816         PUSHW(ssp, esp, sp_mask, old_eip);
817         if (has_error_code) {
818             PUSHW(ssp, esp, sp_mask, error_code);
819         }
820     }
821 
822     if (new_stack) {
823         if (env->eflags & VM_MASK) {
824             cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
825             cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
826             cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
827             cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
828         }
829         ss = (ss & ~3) | dpl;
830         cpu_x86_load_seg_cache(env, R_SS, ss,
831                                ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
832     }
833     SET_ESP(esp, sp_mask);
834 
835     selector = (selector & ~3) | dpl;
836     cpu_x86_load_seg_cache(env, R_CS, selector,
837                    get_seg_base(e1, e2),
838                    get_seg_limit(e1, e2),
839                    e2);
840     cpu_x86_set_cpl(env, dpl);
841     env->eip = offset;
842 
843     /* interrupt gate clear IF mask */
844     if ((type & 1) == 0) {
845         env->eflags &= ~IF_MASK;
846     }
847     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
848 }
849 
850 #ifdef TARGET_X86_64
851 
852 #define PUSHQ(sp, val)\
853 {\
854     sp -= 8;\
855     stq_kernel(sp, (val));\
856 }
857 
858 #define POPQ(sp, val)\
859 {\
860     val = ldq_kernel(sp);\
861     sp += 8;\
862 }
863 
get_rsp_from_tss(int level)864 static inline target_ulong get_rsp_from_tss(int level)
865 {
866     int index;
867 
868 #if 0
869     printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
870            env->tr.base, env->tr.limit);
871 #endif
872 
873     if (!(env->tr.flags & DESC_P_MASK))
874         cpu_abort(env, "invalid tss");
875     index = 8 * level + 4;
876     if ((index + 7) > env->tr.limit)
877         raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
878     return ldq_kernel(env->tr.base + index);
879 }
880 
881 /* 64 bit interrupt */
do_interrupt64(int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)882 static void do_interrupt64(int intno, int is_int, int error_code,
883                            target_ulong next_eip, int is_hw)
884 {
885     SegmentCache *dt;
886     target_ulong ptr;
887     int type, dpl, selector, cpl, ist;
888     int has_error_code, new_stack;
889     uint32_t e1, e2, e3, ss;
890     target_ulong old_eip, esp, offset;
891 
892     has_error_code = 0;
893     if (!is_int && !is_hw)
894         has_error_code = exeption_has_error_code(intno);
895     if (is_int)
896         old_eip = next_eip;
897     else
898         old_eip = env->eip;
899 
900     dt = &env->idt;
901     if (intno * 16 + 15 > dt->limit)
902         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
903     ptr = dt->base + intno * 16;
904     e1 = ldl_kernel(ptr);
905     e2 = ldl_kernel(ptr + 4);
906     e3 = ldl_kernel(ptr + 8);
907     /* check gate type */
908     type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
909     switch(type) {
910     case 14: /* 386 interrupt gate */
911     case 15: /* 386 trap gate */
912         break;
913     default:
914         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
915         break;
916     }
917     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
918     cpl = env->hflags & HF_CPL_MASK;
919     /* check privilege if software int */
920     if (is_int && dpl < cpl)
921         raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
922     /* check valid bit */
923     if (!(e2 & DESC_P_MASK))
924         raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
925     selector = e1 >> 16;
926     offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
927     ist = e2 & 7;
928     if ((selector & 0xfffc) == 0)
929         raise_exception_err(EXCP0D_GPF, 0);
930 
931     if (load_segment(&e1, &e2, selector) != 0)
932         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933     if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
934         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
935     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
936     if (dpl > cpl)
937         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
938     if (!(e2 & DESC_P_MASK))
939         raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
940     if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
941         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
942     if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
943         /* to inner privilege */
944         if (ist != 0)
945             esp = get_rsp_from_tss(ist + 3);
946         else
947             esp = get_rsp_from_tss(dpl);
948         esp &= ~0xfLL; /* align stack */
949         ss = 0;
950         new_stack = 1;
951     } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
952         /* to same privilege */
953         if (env->eflags & VM_MASK)
954             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955         new_stack = 0;
956         if (ist != 0)
957             esp = get_rsp_from_tss(ist + 3);
958         else
959             esp = ESP;
960         esp &= ~0xfLL; /* align stack */
961         dpl = cpl;
962     } else {
963         raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
964         new_stack = 0; /* avoid warning */
965         esp = 0; /* avoid warning */
966     }
967 
968     PUSHQ(esp, env->segs[R_SS].selector);
969     PUSHQ(esp, ESP);
970     PUSHQ(esp, compute_eflags());
971     PUSHQ(esp, env->segs[R_CS].selector);
972     PUSHQ(esp, old_eip);
973     if (has_error_code) {
974         PUSHQ(esp, error_code);
975     }
976 
977     if (new_stack) {
978         ss = 0 | dpl;
979         cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
980     }
981     ESP = esp;
982 
983     selector = (selector & ~3) | dpl;
984     cpu_x86_load_seg_cache(env, R_CS, selector,
985                    get_seg_base(e1, e2),
986                    get_seg_limit(e1, e2),
987                    e2);
988     cpu_x86_set_cpl(env, dpl);
989     env->eip = offset;
990 
991     /* interrupt gate clear IF mask */
992     if ((type & 1) == 0) {
993         env->eflags &= ~IF_MASK;
994     }
995     env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
996 }
997 #endif
998 
999 #ifdef TARGET_X86_64
1000 #if defined(CONFIG_USER_ONLY)
helper_syscall(int next_eip_addend)1001 void helper_syscall(int next_eip_addend)
1002 {
1003     env->exception_index = EXCP_SYSCALL;
1004     env->exception_next_eip = env->eip + next_eip_addend;
1005     cpu_loop_exit();
1006 }
1007 #else
helper_syscall(int next_eip_addend)1008 void helper_syscall(int next_eip_addend)
1009 {
1010     int selector;
1011 
1012     if (!(env->efer & MSR_EFER_SCE)) {
1013         raise_exception_err(EXCP06_ILLOP, 0);
1014     }
1015     selector = (env->star >> 32) & 0xffff;
1016     if (env->hflags & HF_LMA_MASK) {
1017         int code64;
1018 
1019         ECX = env->eip + next_eip_addend;
1020         env->regs[11] = compute_eflags();
1021 
1022         code64 = env->hflags & HF_CS64_MASK;
1023 
1024         cpu_x86_set_cpl(env, 0);
1025         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1026                            0, 0xffffffff,
1027                                DESC_G_MASK | DESC_P_MASK |
1028                                DESC_S_MASK |
1029                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1030         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1031                                0, 0xffffffff,
1032                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1033                                DESC_S_MASK |
1034                                DESC_W_MASK | DESC_A_MASK);
1035         env->eflags &= ~env->fmask;
1036         load_eflags(env->eflags, 0);
1037         if (code64)
1038             env->eip = env->lstar;
1039         else
1040             env->eip = env->cstar;
1041     } else {
1042         ECX = (uint32_t)(env->eip + next_eip_addend);
1043 
1044         cpu_x86_set_cpl(env, 0);
1045         cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1046                            0, 0xffffffff,
1047                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1048                                DESC_S_MASK |
1049                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1050         cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1051                                0, 0xffffffff,
1052                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1053                                DESC_S_MASK |
1054                                DESC_W_MASK | DESC_A_MASK);
1055         env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1056         env->eip = (uint32_t)env->star;
1057     }
1058 }
1059 #endif
1060 #endif
1061 
1062 #ifdef TARGET_X86_64
helper_sysret(int dflag)1063 void helper_sysret(int dflag)
1064 {
1065     int cpl, selector;
1066 
1067     if (!(env->efer & MSR_EFER_SCE)) {
1068         raise_exception_err(EXCP06_ILLOP, 0);
1069     }
1070     cpl = env->hflags & HF_CPL_MASK;
1071     if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1072         raise_exception_err(EXCP0D_GPF, 0);
1073     }
1074     selector = (env->star >> 48) & 0xffff;
1075     if (env->hflags & HF_LMA_MASK) {
1076         if (dflag == 2) {
1077             cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1078                                    0, 0xffffffff,
1079                                    DESC_G_MASK | DESC_P_MASK |
1080                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1082                                    DESC_L_MASK);
1083             env->eip = ECX;
1084         } else {
1085             cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1086                                    0, 0xffffffff,
1087                                    DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088                                    DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089                                    DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1090             env->eip = (uint32_t)ECX;
1091         }
1092         cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1093                                0, 0xffffffff,
1094                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1095                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1096                                DESC_W_MASK | DESC_A_MASK);
1097         load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1098                     IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1099         cpu_x86_set_cpl(env, 3);
1100     } else {
1101         cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1102                                0, 0xffffffff,
1103                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1106         env->eip = (uint32_t)ECX;
1107         cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1108                                0, 0xffffffff,
1109                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1110                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1111                                DESC_W_MASK | DESC_A_MASK);
1112         env->eflags |= IF_MASK;
1113         cpu_x86_set_cpl(env, 3);
1114     }
1115 #ifdef CONFIG_KQEMU
1116     if (kqemu_is_ok(env)) {
1117         if (env->hflags & HF_LMA_MASK)
1118             CC_OP = CC_OP_EFLAGS;
1119         env->exception_index = -1;
1120         cpu_loop_exit();
1121     }
1122 #endif
1123 }
1124 #endif
1125 
1126 /* real mode interrupt */
do_interrupt_real(int intno,int is_int,int error_code,unsigned int next_eip)1127 static void do_interrupt_real(int intno, int is_int, int error_code,
1128                               unsigned int next_eip)
1129 {
1130     SegmentCache *dt;
1131     target_ulong ptr, ssp;
1132     int selector;
1133     uint32_t offset, esp;
1134     uint32_t old_cs, old_eip;
1135 
1136     /* real mode (simpler !) */
1137     dt = &env->idt;
1138     if (intno * 4 + 3 > dt->limit)
1139         raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1140     ptr = dt->base + intno * 4;
1141     offset = lduw_kernel(ptr);
1142     selector = lduw_kernel(ptr + 2);
1143     esp = ESP;
1144     ssp = env->segs[R_SS].base;
1145     if (is_int)
1146         old_eip = next_eip;
1147     else
1148         old_eip = env->eip;
1149     old_cs = env->segs[R_CS].selector;
1150     /* XXX: use SS segment size ? */
1151     PUSHW(ssp, esp, 0xffff, compute_eflags());
1152     PUSHW(ssp, esp, 0xffff, old_cs);
1153     PUSHW(ssp, esp, 0xffff, old_eip);
1154 
1155     /* update processor state */
1156     ESP = (ESP & ~0xffff) | (esp & 0xffff);
1157     env->eip = offset;
1158     env->segs[R_CS].selector = selector;
1159     env->segs[R_CS].base = (selector << 4);
1160     env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1161 }
1162 
1163 /* fake user mode interrupt */
do_interrupt_user(int intno,int is_int,int error_code,target_ulong next_eip)1164 void do_interrupt_user(int intno, int is_int, int error_code,
1165                        target_ulong next_eip)
1166 {
1167     SegmentCache *dt;
1168     target_ulong ptr;
1169     int dpl, cpl, shift;
1170     uint32_t e2;
1171 
1172     dt = &env->idt;
1173     if (env->hflags & HF_LMA_MASK) {
1174         shift = 4;
1175     } else {
1176         shift = 3;
1177     }
1178     ptr = dt->base + (intno << shift);
1179     e2 = ldl_kernel(ptr + 4);
1180 
1181     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1182     cpl = env->hflags & HF_CPL_MASK;
1183     /* check privilege if software int */
1184     if (is_int && dpl < cpl)
1185         raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1186 
1187     /* Since we emulate only user space, we cannot do more than
1188        exiting the emulation with the suitable exception and error
1189        code */
1190     if (is_int)
1191         EIP = next_eip;
1192 }
1193 
1194 #if !defined(CONFIG_USER_ONLY)
handle_even_inj(int intno,int is_int,int error_code,int is_hw,int rm)1195 static void handle_even_inj(int intno, int is_int, int error_code,
1196 		int is_hw, int rm)
1197 {
1198     uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1199     if (!(event_inj & SVM_EVTINJ_VALID)) {
1200 	    int type;
1201 	    if (is_int)
1202 		    type = SVM_EVTINJ_TYPE_SOFT;
1203 	    else
1204 		    type = SVM_EVTINJ_TYPE_EXEPT;
1205 	    event_inj = intno | type | SVM_EVTINJ_VALID;
1206 	    if (!rm && exeption_has_error_code(intno)) {
1207 		    event_inj |= SVM_EVTINJ_VALID_ERR;
1208 		    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1209 	    }
1210 	    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1211     }
1212 }
1213 #endif
1214 
1215 /*
1216  * Begin execution of an interruption. is_int is TRUE if coming from
1217  * the int instruction. next_eip is the EIP value AFTER the interrupt
1218  * instruction. It is only relevant if is_int is TRUE.
1219  */
do_interrupt(int intno,int is_int,int error_code,target_ulong next_eip,int is_hw)1220 void do_interrupt(int intno, int is_int, int error_code,
1221                   target_ulong next_eip, int is_hw)
1222 {
1223     if (qemu_loglevel_mask(CPU_LOG_INT)) {
1224         if ((env->cr[0] & CR0_PE_MASK)) {
1225             static int count;
1226             qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1227                     count, intno, error_code, is_int,
1228                     env->hflags & HF_CPL_MASK,
1229                     env->segs[R_CS].selector, EIP,
1230                     (int)env->segs[R_CS].base + EIP,
1231                     env->segs[R_SS].selector, ESP);
1232             if (intno == 0x0e) {
1233                 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1234             } else {
1235                 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1236             }
1237             qemu_log("\n");
1238             log_cpu_state(env, X86_DUMP_CCOP);
1239 #if 0
1240             {
1241                 int i;
1242                 uint8_t *ptr;
1243                 qemu_log("       code=");
1244                 ptr = env->segs[R_CS].base + env->eip;
1245                 for(i = 0; i < 16; i++) {
1246                     qemu_log(" %02x", ldub(ptr + i));
1247                 }
1248                 qemu_log("\n");
1249             }
1250 #endif
1251             count++;
1252         }
1253     }
1254     if (env->cr[0] & CR0_PE_MASK) {
1255 #if !defined(CONFIG_USER_ONLY)
1256         if (env->hflags & HF_SVMI_MASK)
1257             handle_even_inj(intno, is_int, error_code, is_hw, 0);
1258 #endif
1259 #ifdef TARGET_X86_64
1260         if (env->hflags & HF_LMA_MASK) {
1261             do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1262         } else
1263 #endif
1264         {
1265             do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1266         }
1267     } else {
1268 #if !defined(CONFIG_USER_ONLY)
1269         if (env->hflags & HF_SVMI_MASK)
1270             handle_even_inj(intno, is_int, error_code, is_hw, 1);
1271 #endif
1272         do_interrupt_real(intno, is_int, error_code, next_eip);
1273     }
1274 
1275 #if !defined(CONFIG_USER_ONLY)
1276     if (env->hflags & HF_SVMI_MASK) {
1277 	    uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1278 	    stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1279     }
1280 #endif
1281 }
1282 
1283 /* This should come from sysemu.h - if we could include it here... */
1284 void qemu_system_reset_request(void);
1285 
1286 /*
1287  * Check nested exceptions and change to double or triple fault if
1288  * needed. It should only be called, if this is not an interrupt.
1289  * Returns the new exception number.
1290  */
check_exception(int intno,int * error_code)1291 static int check_exception(int intno, int *error_code)
1292 {
1293     int first_contributory = env->old_exception == 0 ||
1294                               (env->old_exception >= 10 &&
1295                                env->old_exception <= 13);
1296     int second_contributory = intno == 0 ||
1297                                (intno >= 10 && intno <= 13);
1298 
1299     qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1300                 env->old_exception, intno);
1301 
1302 #if !defined(CONFIG_USER_ONLY)
1303     if (env->old_exception == EXCP08_DBLE) {
1304         if (env->hflags & HF_SVMI_MASK)
1305             helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1306 
1307         qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1308 
1309         qemu_system_reset_request();
1310         return EXCP_HLT;
1311     }
1312 #endif
1313 
1314     if ((first_contributory && second_contributory)
1315         || (env->old_exception == EXCP0E_PAGE &&
1316             (second_contributory || (intno == EXCP0E_PAGE)))) {
1317         intno = EXCP08_DBLE;
1318         *error_code = 0;
1319     }
1320 
1321     if (second_contributory || (intno == EXCP0E_PAGE) ||
1322         (intno == EXCP08_DBLE))
1323         env->old_exception = intno;
1324 
1325     return intno;
1326 }
1327 
1328 /*
1329  * Signal an interruption. It is executed in the main CPU loop.
1330  * is_int is TRUE if coming from the int instruction. next_eip is the
1331  * EIP value AFTER the interrupt instruction. It is only relevant if
1332  * is_int is TRUE.
1333  */
raise_interrupt(int intno,int is_int,int error_code,int next_eip_addend)1334 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1335                                           int next_eip_addend)
1336 {
1337     if (!is_int) {
1338         helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1339         intno = check_exception(intno, &error_code);
1340     } else {
1341         helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1342     }
1343 
1344     env->exception_index = intno;
1345     env->error_code = error_code;
1346     env->exception_is_int = is_int;
1347     env->exception_next_eip = env->eip + next_eip_addend;
1348     cpu_loop_exit();
1349 }
1350 
1351 /* shortcuts to generate exceptions */
1352 
raise_exception_err(int exception_index,int error_code)1353 void raise_exception_err(int exception_index, int error_code)
1354 {
1355     raise_interrupt(exception_index, 0, error_code, 0);
1356 }
1357 
raise_exception(int exception_index)1358 void raise_exception(int exception_index)
1359 {
1360     raise_interrupt(exception_index, 0, 0, 0);
1361 }
1362 
1363 /* SMM support */
1364 
1365 #if defined(CONFIG_USER_ONLY)
1366 
do_smm_enter(void)1367 void do_smm_enter(void)
1368 {
1369 }
1370 
helper_rsm(void)1371 void helper_rsm(void)
1372 {
1373 }
1374 
1375 #else
1376 
1377 #ifdef TARGET_X86_64
1378 #define SMM_REVISION_ID 0x00020064
1379 #else
1380 #define SMM_REVISION_ID 0x00020000
1381 #endif
1382 
do_smm_enter(void)1383 void do_smm_enter(void)
1384 {
1385     target_ulong sm_state;
1386     SegmentCache *dt;
1387     int i, offset;
1388 
1389     qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1390     log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1391 
1392     env->hflags |= HF_SMM_MASK;
1393     cpu_smm_update(env);
1394 
1395     sm_state = env->smbase + 0x8000;
1396 
1397 #ifdef TARGET_X86_64
1398     for(i = 0; i < 6; i++) {
1399         dt = &env->segs[i];
1400         offset = 0x7e00 + i * 16;
1401         stw_phys(sm_state + offset, dt->selector);
1402         stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1403         stl_phys(sm_state + offset + 4, dt->limit);
1404         stq_phys(sm_state + offset + 8, dt->base);
1405     }
1406 
1407     stq_phys(sm_state + 0x7e68, env->gdt.base);
1408     stl_phys(sm_state + 0x7e64, env->gdt.limit);
1409 
1410     stw_phys(sm_state + 0x7e70, env->ldt.selector);
1411     stq_phys(sm_state + 0x7e78, env->ldt.base);
1412     stl_phys(sm_state + 0x7e74, env->ldt.limit);
1413     stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1414 
1415     stq_phys(sm_state + 0x7e88, env->idt.base);
1416     stl_phys(sm_state + 0x7e84, env->idt.limit);
1417 
1418     stw_phys(sm_state + 0x7e90, env->tr.selector);
1419     stq_phys(sm_state + 0x7e98, env->tr.base);
1420     stl_phys(sm_state + 0x7e94, env->tr.limit);
1421     stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1422 
1423     stq_phys(sm_state + 0x7ed0, env->efer);
1424 
1425     stq_phys(sm_state + 0x7ff8, EAX);
1426     stq_phys(sm_state + 0x7ff0, ECX);
1427     stq_phys(sm_state + 0x7fe8, EDX);
1428     stq_phys(sm_state + 0x7fe0, EBX);
1429     stq_phys(sm_state + 0x7fd8, ESP);
1430     stq_phys(sm_state + 0x7fd0, EBP);
1431     stq_phys(sm_state + 0x7fc8, ESI);
1432     stq_phys(sm_state + 0x7fc0, EDI);
1433     for(i = 8; i < 16; i++)
1434         stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1435     stq_phys(sm_state + 0x7f78, env->eip);
1436     stl_phys(sm_state + 0x7f70, compute_eflags());
1437     stl_phys(sm_state + 0x7f68, env->dr[6]);
1438     stl_phys(sm_state + 0x7f60, env->dr[7]);
1439 
1440     stl_phys(sm_state + 0x7f48, env->cr[4]);
1441     stl_phys(sm_state + 0x7f50, env->cr[3]);
1442     stl_phys(sm_state + 0x7f58, env->cr[0]);
1443 
1444     stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1445     stl_phys(sm_state + 0x7f00, env->smbase);
1446 #else
1447     stl_phys(sm_state + 0x7ffc, env->cr[0]);
1448     stl_phys(sm_state + 0x7ff8, env->cr[3]);
1449     stl_phys(sm_state + 0x7ff4, compute_eflags());
1450     stl_phys(sm_state + 0x7ff0, env->eip);
1451     stl_phys(sm_state + 0x7fec, EDI);
1452     stl_phys(sm_state + 0x7fe8, ESI);
1453     stl_phys(sm_state + 0x7fe4, EBP);
1454     stl_phys(sm_state + 0x7fe0, ESP);
1455     stl_phys(sm_state + 0x7fdc, EBX);
1456     stl_phys(sm_state + 0x7fd8, EDX);
1457     stl_phys(sm_state + 0x7fd4, ECX);
1458     stl_phys(sm_state + 0x7fd0, EAX);
1459     stl_phys(sm_state + 0x7fcc, env->dr[6]);
1460     stl_phys(sm_state + 0x7fc8, env->dr[7]);
1461 
1462     stl_phys(sm_state + 0x7fc4, env->tr.selector);
1463     stl_phys(sm_state + 0x7f64, env->tr.base);
1464     stl_phys(sm_state + 0x7f60, env->tr.limit);
1465     stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1466 
1467     stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1468     stl_phys(sm_state + 0x7f80, env->ldt.base);
1469     stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1470     stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1471 
1472     stl_phys(sm_state + 0x7f74, env->gdt.base);
1473     stl_phys(sm_state + 0x7f70, env->gdt.limit);
1474 
1475     stl_phys(sm_state + 0x7f58, env->idt.base);
1476     stl_phys(sm_state + 0x7f54, env->idt.limit);
1477 
1478     for(i = 0; i < 6; i++) {
1479         dt = &env->segs[i];
1480         if (i < 3)
1481             offset = 0x7f84 + i * 12;
1482         else
1483             offset = 0x7f2c + (i - 3) * 12;
1484         stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1485         stl_phys(sm_state + offset + 8, dt->base);
1486         stl_phys(sm_state + offset + 4, dt->limit);
1487         stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1488     }
1489     stl_phys(sm_state + 0x7f14, env->cr[4]);
1490 
1491     stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1492     stl_phys(sm_state + 0x7ef8, env->smbase);
1493 #endif
1494     /* init SMM cpu state */
1495 
1496 #ifdef TARGET_X86_64
1497     cpu_load_efer(env, 0);
1498 #endif
1499     load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1500     env->eip = 0x00008000;
1501     cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1502                            0xffffffff, 0);
1503     cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1504     cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1505     cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1506     cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1507     cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1508 
1509     cpu_x86_update_cr0(env,
1510                        env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1511     cpu_x86_update_cr4(env, 0);
1512     env->dr[7] = 0x00000400;
1513     CC_OP = CC_OP_EFLAGS;
1514 }
1515 
helper_rsm(void)1516 void helper_rsm(void)
1517 {
1518     target_ulong sm_state;
1519     int i, offset;
1520     uint32_t val;
1521 
1522     sm_state = env->smbase + 0x8000;
1523 #ifdef TARGET_X86_64
1524     cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1525 
1526     for(i = 0; i < 6; i++) {
1527         offset = 0x7e00 + i * 16;
1528         cpu_x86_load_seg_cache(env, i,
1529                                lduw_phys(sm_state + offset),
1530                                ldq_phys(sm_state + offset + 8),
1531                                ldl_phys(sm_state + offset + 4),
1532                                (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1533     }
1534 
1535     env->gdt.base = ldq_phys(sm_state + 0x7e68);
1536     env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1537 
1538     env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1539     env->ldt.base = ldq_phys(sm_state + 0x7e78);
1540     env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1541     env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1542 
1543     env->idt.base = ldq_phys(sm_state + 0x7e88);
1544     env->idt.limit = ldl_phys(sm_state + 0x7e84);
1545 
1546     env->tr.selector = lduw_phys(sm_state + 0x7e90);
1547     env->tr.base = ldq_phys(sm_state + 0x7e98);
1548     env->tr.limit = ldl_phys(sm_state + 0x7e94);
1549     env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1550 
1551     EAX = ldq_phys(sm_state + 0x7ff8);
1552     ECX = ldq_phys(sm_state + 0x7ff0);
1553     EDX = ldq_phys(sm_state + 0x7fe8);
1554     EBX = ldq_phys(sm_state + 0x7fe0);
1555     ESP = ldq_phys(sm_state + 0x7fd8);
1556     EBP = ldq_phys(sm_state + 0x7fd0);
1557     ESI = ldq_phys(sm_state + 0x7fc8);
1558     EDI = ldq_phys(sm_state + 0x7fc0);
1559     for(i = 8; i < 16; i++)
1560         env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1561     env->eip = ldq_phys(sm_state + 0x7f78);
1562     load_eflags(ldl_phys(sm_state + 0x7f70),
1563                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1564     env->dr[6] = ldl_phys(sm_state + 0x7f68);
1565     env->dr[7] = ldl_phys(sm_state + 0x7f60);
1566 
1567     cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1568     cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1569     cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1570 
1571     val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1572     if (val & 0x20000) {
1573         env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1574     }
1575 #else
1576     cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1577     cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1578     load_eflags(ldl_phys(sm_state + 0x7ff4),
1579                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1580     env->eip = ldl_phys(sm_state + 0x7ff0);
1581     EDI = ldl_phys(sm_state + 0x7fec);
1582     ESI = ldl_phys(sm_state + 0x7fe8);
1583     EBP = ldl_phys(sm_state + 0x7fe4);
1584     ESP = ldl_phys(sm_state + 0x7fe0);
1585     EBX = ldl_phys(sm_state + 0x7fdc);
1586     EDX = ldl_phys(sm_state + 0x7fd8);
1587     ECX = ldl_phys(sm_state + 0x7fd4);
1588     EAX = ldl_phys(sm_state + 0x7fd0);
1589     env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1590     env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1591 
1592     env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1593     env->tr.base = ldl_phys(sm_state + 0x7f64);
1594     env->tr.limit = ldl_phys(sm_state + 0x7f60);
1595     env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1596 
1597     env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1598     env->ldt.base = ldl_phys(sm_state + 0x7f80);
1599     env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1600     env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1601 
1602     env->gdt.base = ldl_phys(sm_state + 0x7f74);
1603     env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1604 
1605     env->idt.base = ldl_phys(sm_state + 0x7f58);
1606     env->idt.limit = ldl_phys(sm_state + 0x7f54);
1607 
1608     for(i = 0; i < 6; i++) {
1609         if (i < 3)
1610             offset = 0x7f84 + i * 12;
1611         else
1612             offset = 0x7f2c + (i - 3) * 12;
1613         cpu_x86_load_seg_cache(env, i,
1614                                ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1615                                ldl_phys(sm_state + offset + 8),
1616                                ldl_phys(sm_state + offset + 4),
1617                                (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1618     }
1619     cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1620 
1621     val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1622     if (val & 0x20000) {
1623         env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1624     }
1625 #endif
1626     CC_OP = CC_OP_EFLAGS;
1627     env->hflags &= ~HF_SMM_MASK;
1628     cpu_smm_update(env);
1629 
1630     qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1631     log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1632 }
1633 
1634 #endif /* !CONFIG_USER_ONLY */
1635 
1636 
1637 /* division, flags are undefined */
1638 
helper_divb_AL(target_ulong t0)1639 void helper_divb_AL(target_ulong t0)
1640 {
1641     unsigned int num, den, q, r;
1642 
1643     num = (EAX & 0xffff);
1644     den = (t0 & 0xff);
1645     if (den == 0) {
1646         raise_exception(EXCP00_DIVZ);
1647     }
1648     q = (num / den);
1649     if (q > 0xff)
1650         raise_exception(EXCP00_DIVZ);
1651     q &= 0xff;
1652     r = (num % den) & 0xff;
1653     EAX = (EAX & ~0xffff) | (r << 8) | q;
1654 }
1655 
helper_idivb_AL(target_ulong t0)1656 void helper_idivb_AL(target_ulong t0)
1657 {
1658     int num, den, q, r;
1659 
1660     num = (int16_t)EAX;
1661     den = (int8_t)t0;
1662     if (den == 0) {
1663         raise_exception(EXCP00_DIVZ);
1664     }
1665     q = (num / den);
1666     if (q != (int8_t)q)
1667         raise_exception(EXCP00_DIVZ);
1668     q &= 0xff;
1669     r = (num % den) & 0xff;
1670     EAX = (EAX & ~0xffff) | (r << 8) | q;
1671 }
1672 
helper_divw_AX(target_ulong t0)1673 void helper_divw_AX(target_ulong t0)
1674 {
1675     unsigned int num, den, q, r;
1676 
1677     num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1678     den = (t0 & 0xffff);
1679     if (den == 0) {
1680         raise_exception(EXCP00_DIVZ);
1681     }
1682     q = (num / den);
1683     if (q > 0xffff)
1684         raise_exception(EXCP00_DIVZ);
1685     q &= 0xffff;
1686     r = (num % den) & 0xffff;
1687     EAX = (EAX & ~0xffff) | q;
1688     EDX = (EDX & ~0xffff) | r;
1689 }
1690 
helper_idivw_AX(target_ulong t0)1691 void helper_idivw_AX(target_ulong t0)
1692 {
1693     int num, den, q, r;
1694 
1695     num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1696     den = (int16_t)t0;
1697     if (den == 0) {
1698         raise_exception(EXCP00_DIVZ);
1699     }
1700     q = (num / den);
1701     if (q != (int16_t)q)
1702         raise_exception(EXCP00_DIVZ);
1703     q &= 0xffff;
1704     r = (num % den) & 0xffff;
1705     EAX = (EAX & ~0xffff) | q;
1706     EDX = (EDX & ~0xffff) | r;
1707 }
1708 
helper_divl_EAX(target_ulong t0)1709 void helper_divl_EAX(target_ulong t0)
1710 {
1711     unsigned int den, r;
1712     uint64_t num, q;
1713 
1714     num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1715     den = t0;
1716     if (den == 0) {
1717         raise_exception(EXCP00_DIVZ);
1718     }
1719     q = (num / den);
1720     r = (num % den);
1721     if (q > 0xffffffff)
1722         raise_exception(EXCP00_DIVZ);
1723     EAX = (uint32_t)q;
1724     EDX = (uint32_t)r;
1725 }
1726 
helper_idivl_EAX(target_ulong t0)1727 void helper_idivl_EAX(target_ulong t0)
1728 {
1729     int den, r;
1730     int64_t num, q;
1731 
1732     num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1733     den = t0;
1734     if (den == 0) {
1735         raise_exception(EXCP00_DIVZ);
1736     }
1737     q = (num / den);
1738     r = (num % den);
1739     if (q != (int32_t)q)
1740         raise_exception(EXCP00_DIVZ);
1741     EAX = (uint32_t)q;
1742     EDX = (uint32_t)r;
1743 }
1744 
1745 /* bcd */
1746 
1747 /* XXX: exception */
helper_aam(int base)1748 void helper_aam(int base)
1749 {
1750     int al, ah;
1751     al = EAX & 0xff;
1752     ah = al / base;
1753     al = al % base;
1754     EAX = (EAX & ~0xffff) | al | (ah << 8);
1755     CC_DST = al;
1756 }
1757 
helper_aad(int base)1758 void helper_aad(int base)
1759 {
1760     int al, ah;
1761     al = EAX & 0xff;
1762     ah = (EAX >> 8) & 0xff;
1763     al = ((ah * base) + al) & 0xff;
1764     EAX = (EAX & ~0xffff) | al;
1765     CC_DST = al;
1766 }
1767 
helper_aaa(void)1768 void helper_aaa(void)
1769 {
1770     int icarry;
1771     int al, ah, af;
1772     int eflags;
1773 
1774     eflags = helper_cc_compute_all(CC_OP);
1775     af = eflags & CC_A;
1776     al = EAX & 0xff;
1777     ah = (EAX >> 8) & 0xff;
1778 
1779     icarry = (al > 0xf9);
1780     if (((al & 0x0f) > 9 ) || af) {
1781         al = (al + 6) & 0x0f;
1782         ah = (ah + 1 + icarry) & 0xff;
1783         eflags |= CC_C | CC_A;
1784     } else {
1785         eflags &= ~(CC_C | CC_A);
1786         al &= 0x0f;
1787     }
1788     EAX = (EAX & ~0xffff) | al | (ah << 8);
1789     CC_SRC = eflags;
1790 }
1791 
helper_aas(void)1792 void helper_aas(void)
1793 {
1794     int icarry;
1795     int al, ah, af;
1796     int eflags;
1797 
1798     eflags = helper_cc_compute_all(CC_OP);
1799     af = eflags & CC_A;
1800     al = EAX & 0xff;
1801     ah = (EAX >> 8) & 0xff;
1802 
1803     icarry = (al < 6);
1804     if (((al & 0x0f) > 9 ) || af) {
1805         al = (al - 6) & 0x0f;
1806         ah = (ah - 1 - icarry) & 0xff;
1807         eflags |= CC_C | CC_A;
1808     } else {
1809         eflags &= ~(CC_C | CC_A);
1810         al &= 0x0f;
1811     }
1812     EAX = (EAX & ~0xffff) | al | (ah << 8);
1813     CC_SRC = eflags;
1814 }
1815 
helper_daa(void)1816 void helper_daa(void)
1817 {
1818     int al, af, cf;
1819     int eflags;
1820 
1821     eflags = helper_cc_compute_all(CC_OP);
1822     cf = eflags & CC_C;
1823     af = eflags & CC_A;
1824     al = EAX & 0xff;
1825 
1826     eflags = 0;
1827     if (((al & 0x0f) > 9 ) || af) {
1828         al = (al + 6) & 0xff;
1829         eflags |= CC_A;
1830     }
1831     if ((al > 0x9f) || cf) {
1832         al = (al + 0x60) & 0xff;
1833         eflags |= CC_C;
1834     }
1835     EAX = (EAX & ~0xff) | al;
1836     /* well, speed is not an issue here, so we compute the flags by hand */
1837     eflags |= (al == 0) << 6; /* zf */
1838     eflags |= parity_table[al]; /* pf */
1839     eflags |= (al & 0x80); /* sf */
1840     CC_SRC = eflags;
1841 }
1842 
helper_das(void)1843 void helper_das(void)
1844 {
1845     int al, al1, af, cf;
1846     int eflags;
1847 
1848     eflags = helper_cc_compute_all(CC_OP);
1849     cf = eflags & CC_C;
1850     af = eflags & CC_A;
1851     al = EAX & 0xff;
1852 
1853     eflags = 0;
1854     al1 = al;
1855     if (((al & 0x0f) > 9 ) || af) {
1856         eflags |= CC_A;
1857         if (al < 6 || cf)
1858             eflags |= CC_C;
1859         al = (al - 6) & 0xff;
1860     }
1861     if ((al1 > 0x99) || cf) {
1862         al = (al - 0x60) & 0xff;
1863         eflags |= CC_C;
1864     }
1865     EAX = (EAX & ~0xff) | al;
1866     /* well, speed is not an issue here, so we compute the flags by hand */
1867     eflags |= (al == 0) << 6; /* zf */
1868     eflags |= parity_table[al]; /* pf */
1869     eflags |= (al & 0x80); /* sf */
1870     CC_SRC = eflags;
1871 }
1872 
helper_into(int next_eip_addend)1873 void helper_into(int next_eip_addend)
1874 {
1875     int eflags;
1876     eflags = helper_cc_compute_all(CC_OP);
1877     if (eflags & CC_O) {
1878         raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
1879     }
1880 }
1881 
helper_cmpxchg8b(target_ulong a0)1882 void helper_cmpxchg8b(target_ulong a0)
1883 {
1884     uint64_t d;
1885     int eflags;
1886 
1887     eflags = helper_cc_compute_all(CC_OP);
1888     d = ldq(a0);
1889     if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
1890         stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
1891         eflags |= CC_Z;
1892     } else {
1893         /* always do the store */
1894         stq(a0, d);
1895         EDX = (uint32_t)(d >> 32);
1896         EAX = (uint32_t)d;
1897         eflags &= ~CC_Z;
1898     }
1899     CC_SRC = eflags;
1900 }
1901 
1902 #ifdef TARGET_X86_64
helper_cmpxchg16b(target_ulong a0)1903 void helper_cmpxchg16b(target_ulong a0)
1904 {
1905     uint64_t d0, d1;
1906     int eflags;
1907 
1908     if ((a0 & 0xf) != 0)
1909         raise_exception(EXCP0D_GPF);
1910     eflags = helper_cc_compute_all(CC_OP);
1911     d0 = ldq(a0);
1912     d1 = ldq(a0 + 8);
1913     if (d0 == EAX && d1 == EDX) {
1914         stq(a0, EBX);
1915         stq(a0 + 8, ECX);
1916         eflags |= CC_Z;
1917     } else {
1918         /* always do the store */
1919         stq(a0, d0);
1920         stq(a0 + 8, d1);
1921         EDX = d1;
1922         EAX = d0;
1923         eflags &= ~CC_Z;
1924     }
1925     CC_SRC = eflags;
1926 }
1927 #endif
1928 
helper_single_step(void)1929 void helper_single_step(void)
1930 {
1931 #ifndef CONFIG_USER_ONLY
1932     check_hw_breakpoints(env, 1);
1933     env->dr[6] |= DR6_BS;
1934 #endif
1935     raise_exception(EXCP01_DB);
1936 }
1937 
helper_cpuid(void)1938 void helper_cpuid(void)
1939 {
1940     uint32_t eax, ebx, ecx, edx;
1941 
1942     helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
1943 
1944     cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
1945     EAX = eax;
1946     EBX = ebx;
1947     ECX = ecx;
1948     EDX = edx;
1949 }
1950 
helper_enter_level(int level,int data32,target_ulong t1)1951 void helper_enter_level(int level, int data32, target_ulong t1)
1952 {
1953     target_ulong ssp;
1954     uint32_t esp_mask, esp, ebp;
1955 
1956     esp_mask = get_sp_mask(env->segs[R_SS].flags);
1957     ssp = env->segs[R_SS].base;
1958     ebp = EBP;
1959     esp = ESP;
1960     if (data32) {
1961         /* 32 bit */
1962         esp -= 4;
1963         while (--level) {
1964             esp -= 4;
1965             ebp -= 4;
1966             stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1967         }
1968         esp -= 4;
1969         stl(ssp + (esp & esp_mask), t1);
1970     } else {
1971         /* 16 bit */
1972         esp -= 2;
1973         while (--level) {
1974             esp -= 2;
1975             ebp -= 2;
1976             stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1977         }
1978         esp -= 2;
1979         stw(ssp + (esp & esp_mask), t1);
1980     }
1981 }
1982 
1983 #ifdef TARGET_X86_64
helper_enter64_level(int level,int data64,target_ulong t1)1984 void helper_enter64_level(int level, int data64, target_ulong t1)
1985 {
1986     target_ulong esp, ebp;
1987     ebp = EBP;
1988     esp = ESP;
1989 
1990     if (data64) {
1991         /* 64 bit */
1992         esp -= 8;
1993         while (--level) {
1994             esp -= 8;
1995             ebp -= 8;
1996             stq(esp, ldq(ebp));
1997         }
1998         esp -= 8;
1999         stq(esp, t1);
2000     } else {
2001         /* 16 bit */
2002         esp -= 2;
2003         while (--level) {
2004             esp -= 2;
2005             ebp -= 2;
2006             stw(esp, lduw(ebp));
2007         }
2008         esp -= 2;
2009         stw(esp, t1);
2010     }
2011 }
2012 #endif
2013 
helper_lldt(int selector)2014 void helper_lldt(int selector)
2015 {
2016     SegmentCache *dt;
2017     uint32_t e1, e2;
2018     int index, entry_limit;
2019     target_ulong ptr;
2020 
2021     selector &= 0xffff;
2022     if ((selector & 0xfffc) == 0) {
2023         /* XXX: NULL selector case: invalid LDT */
2024         env->ldt.base = 0;
2025         env->ldt.limit = 0;
2026     } else {
2027         if (selector & 0x4)
2028             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2029         dt = &env->gdt;
2030         index = selector & ~7;
2031 #ifdef TARGET_X86_64
2032         if (env->hflags & HF_LMA_MASK)
2033             entry_limit = 15;
2034         else
2035 #endif
2036             entry_limit = 7;
2037         if ((index + entry_limit) > dt->limit)
2038             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2039         ptr = dt->base + index;
2040         e1 = ldl_kernel(ptr);
2041         e2 = ldl_kernel(ptr + 4);
2042         if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2043             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2044         if (!(e2 & DESC_P_MASK))
2045             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2046 #ifdef TARGET_X86_64
2047         if (env->hflags & HF_LMA_MASK) {
2048             uint32_t e3;
2049             e3 = ldl_kernel(ptr + 8);
2050             load_seg_cache_raw_dt(&env->ldt, e1, e2);
2051             env->ldt.base |= (target_ulong)e3 << 32;
2052         } else
2053 #endif
2054         {
2055             load_seg_cache_raw_dt(&env->ldt, e1, e2);
2056         }
2057     }
2058     env->ldt.selector = selector;
2059 }
2060 
helper_ltr(int selector)2061 void helper_ltr(int selector)
2062 {
2063     SegmentCache *dt;
2064     uint32_t e1, e2;
2065     int index, type, entry_limit;
2066     target_ulong ptr;
2067 
2068     selector &= 0xffff;
2069     if ((selector & 0xfffc) == 0) {
2070         /* NULL selector case: invalid TR */
2071         env->tr.base = 0;
2072         env->tr.limit = 0;
2073         env->tr.flags = 0;
2074     } else {
2075         if (selector & 0x4)
2076             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2077         dt = &env->gdt;
2078         index = selector & ~7;
2079 #ifdef TARGET_X86_64
2080         if (env->hflags & HF_LMA_MASK)
2081             entry_limit = 15;
2082         else
2083 #endif
2084             entry_limit = 7;
2085         if ((index + entry_limit) > dt->limit)
2086             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2087         ptr = dt->base + index;
2088         e1 = ldl_kernel(ptr);
2089         e2 = ldl_kernel(ptr + 4);
2090         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2091         if ((e2 & DESC_S_MASK) ||
2092             (type != 1 && type != 9))
2093             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2094         if (!(e2 & DESC_P_MASK))
2095             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2096 #ifdef TARGET_X86_64
2097         if (env->hflags & HF_LMA_MASK) {
2098             uint32_t e3, e4;
2099             e3 = ldl_kernel(ptr + 8);
2100             e4 = ldl_kernel(ptr + 12);
2101             if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2102                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2103             load_seg_cache_raw_dt(&env->tr, e1, e2);
2104             env->tr.base |= (target_ulong)e3 << 32;
2105         } else
2106 #endif
2107         {
2108             load_seg_cache_raw_dt(&env->tr, e1, e2);
2109         }
2110         e2 |= DESC_TSS_BUSY_MASK;
2111         stl_kernel(ptr + 4, e2);
2112     }
2113     env->tr.selector = selector;
2114 }
2115 
2116 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
helper_load_seg(int seg_reg,int selector)2117 void helper_load_seg(int seg_reg, int selector)
2118 {
2119     uint32_t e1, e2;
2120     int cpl, dpl, rpl;
2121     SegmentCache *dt;
2122     int index;
2123     target_ulong ptr;
2124 
2125     selector &= 0xffff;
2126     cpl = env->hflags & HF_CPL_MASK;
2127     if ((selector & 0xfffc) == 0) {
2128         /* null selector case */
2129         if (seg_reg == R_SS
2130 #ifdef TARGET_X86_64
2131             && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2132 #endif
2133             )
2134             raise_exception_err(EXCP0D_GPF, 0);
2135         cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2136     } else {
2137 
2138         if (selector & 0x4)
2139             dt = &env->ldt;
2140         else
2141             dt = &env->gdt;
2142         index = selector & ~7;
2143         if ((index + 7) > dt->limit)
2144             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2145         ptr = dt->base + index;
2146         e1 = ldl_kernel(ptr);
2147         e2 = ldl_kernel(ptr + 4);
2148 
2149         if (!(e2 & DESC_S_MASK))
2150             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2151         rpl = selector & 3;
2152         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2153         if (seg_reg == R_SS) {
2154             /* must be writable segment */
2155             if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2156                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157             if (rpl != cpl || dpl != cpl)
2158                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2159         } else {
2160             /* must be readable segment */
2161             if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2162                 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2163 
2164             if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2165                 /* if not conforming code, test rights */
2166                 if (dpl < cpl || dpl < rpl)
2167                     raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2168             }
2169         }
2170 
2171         if (!(e2 & DESC_P_MASK)) {
2172             if (seg_reg == R_SS)
2173                 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2174             else
2175                 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2176         }
2177 
2178         /* set the access bit if not already set */
2179         if (!(e2 & DESC_A_MASK)) {
2180             e2 |= DESC_A_MASK;
2181             stl_kernel(ptr + 4, e2);
2182         }
2183 
2184         cpu_x86_load_seg_cache(env, seg_reg, selector,
2185                        get_seg_base(e1, e2),
2186                        get_seg_limit(e1, e2),
2187                        e2);
2188 #if 0
2189         qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2190                 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2191 #endif
2192     }
2193 }
2194 
2195 /* protected mode jump */
helper_ljmp_protected(int new_cs,target_ulong new_eip,int next_eip_addend)2196 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2197                            int next_eip_addend)
2198 {
2199     int gate_cs, type;
2200     uint32_t e1, e2, cpl, dpl, rpl, limit;
2201     target_ulong next_eip;
2202 
2203     if ((new_cs & 0xfffc) == 0)
2204         raise_exception_err(EXCP0D_GPF, 0);
2205     if (load_segment(&e1, &e2, new_cs) != 0)
2206         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2207     cpl = env->hflags & HF_CPL_MASK;
2208     if (e2 & DESC_S_MASK) {
2209         if (!(e2 & DESC_CS_MASK))
2210             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2211         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2212         if (e2 & DESC_C_MASK) {
2213             /* conforming code segment */
2214             if (dpl > cpl)
2215                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2216         } else {
2217             /* non conforming code segment */
2218             rpl = new_cs & 3;
2219             if (rpl > cpl)
2220                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2221             if (dpl != cpl)
2222                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2223         }
2224         if (!(e2 & DESC_P_MASK))
2225             raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2226         limit = get_seg_limit(e1, e2);
2227         if (new_eip > limit &&
2228             !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2229             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2230         cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2231                        get_seg_base(e1, e2), limit, e2);
2232         EIP = new_eip;
2233     } else {
2234         /* jump to call or task gate */
2235         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2236         rpl = new_cs & 3;
2237         cpl = env->hflags & HF_CPL_MASK;
2238         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2239         switch(type) {
2240         case 1: /* 286 TSS */
2241         case 9: /* 386 TSS */
2242         case 5: /* task gate */
2243             if (dpl < cpl || dpl < rpl)
2244                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2245             next_eip = env->eip + next_eip_addend;
2246             switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2247             CC_OP = CC_OP_EFLAGS;
2248             break;
2249         case 4: /* 286 call gate */
2250         case 12: /* 386 call gate */
2251             if ((dpl < cpl) || (dpl < rpl))
2252                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2253             if (!(e2 & DESC_P_MASK))
2254                 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2255             gate_cs = e1 >> 16;
2256             new_eip = (e1 & 0xffff);
2257             if (type == 12)
2258                 new_eip |= (e2 & 0xffff0000);
2259             if (load_segment(&e1, &e2, gate_cs) != 0)
2260                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2261             dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2262             /* must be code segment */
2263             if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2264                  (DESC_S_MASK | DESC_CS_MASK)))
2265                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2266             if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2267                 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2268                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2269             if (!(e2 & DESC_P_MASK))
2270                 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2271             limit = get_seg_limit(e1, e2);
2272             if (new_eip > limit)
2273                 raise_exception_err(EXCP0D_GPF, 0);
2274             cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2275                                    get_seg_base(e1, e2), limit, e2);
2276             EIP = new_eip;
2277             break;
2278         default:
2279             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2280             break;
2281         }
2282     }
2283 }
2284 
2285 /* real mode call */
helper_lcall_real(int new_cs,target_ulong new_eip1,int shift,int next_eip)2286 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2287                        int shift, int next_eip)
2288 {
2289     int new_eip;
2290     uint32_t esp, esp_mask;
2291     target_ulong ssp;
2292 
2293     new_eip = new_eip1;
2294     esp = ESP;
2295     esp_mask = get_sp_mask(env->segs[R_SS].flags);
2296     ssp = env->segs[R_SS].base;
2297     if (shift) {
2298         PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2299         PUSHL(ssp, esp, esp_mask, next_eip);
2300     } else {
2301         PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2302         PUSHW(ssp, esp, esp_mask, next_eip);
2303     }
2304 
2305     SET_ESP(esp, esp_mask);
2306     env->eip = new_eip;
2307     env->segs[R_CS].selector = new_cs;
2308     env->segs[R_CS].base = (new_cs << 4);
2309 }
2310 
2311 /* protected mode call */
helper_lcall_protected(int new_cs,target_ulong new_eip,int shift,int next_eip_addend)2312 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2313                             int shift, int next_eip_addend)
2314 {
2315     int new_stack, i;
2316     uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2317     uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2318     uint32_t val, limit, old_sp_mask;
2319     target_ulong ssp, old_ssp, next_eip;
2320 
2321     next_eip = env->eip + next_eip_addend;
2322     LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2323     LOG_PCALL_STATE(env);
2324     if ((new_cs & 0xfffc) == 0)
2325         raise_exception_err(EXCP0D_GPF, 0);
2326     if (load_segment(&e1, &e2, new_cs) != 0)
2327         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2328     cpl = env->hflags & HF_CPL_MASK;
2329     LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2330     if (e2 & DESC_S_MASK) {
2331         if (!(e2 & DESC_CS_MASK))
2332             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2333         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2334         if (e2 & DESC_C_MASK) {
2335             /* conforming code segment */
2336             if (dpl > cpl)
2337                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2338         } else {
2339             /* non conforming code segment */
2340             rpl = new_cs & 3;
2341             if (rpl > cpl)
2342                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2343             if (dpl != cpl)
2344                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2345         }
2346         if (!(e2 & DESC_P_MASK))
2347             raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2348 
2349 #ifdef TARGET_X86_64
2350         /* XXX: check 16/32 bit cases in long mode */
2351         if (shift == 2) {
2352             target_ulong rsp;
2353             /* 64 bit case */
2354             rsp = ESP;
2355             PUSHQ(rsp, env->segs[R_CS].selector);
2356             PUSHQ(rsp, next_eip);
2357             /* from this point, not restartable */
2358             ESP = rsp;
2359             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2360                                    get_seg_base(e1, e2),
2361                                    get_seg_limit(e1, e2), e2);
2362             EIP = new_eip;
2363         } else
2364 #endif
2365         {
2366             sp = ESP;
2367             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2368             ssp = env->segs[R_SS].base;
2369             if (shift) {
2370                 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2371                 PUSHL(ssp, sp, sp_mask, next_eip);
2372             } else {
2373                 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2374                 PUSHW(ssp, sp, sp_mask, next_eip);
2375             }
2376 
2377             limit = get_seg_limit(e1, e2);
2378             if (new_eip > limit)
2379                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2380             /* from this point, not restartable */
2381             SET_ESP(sp, sp_mask);
2382             cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2383                                    get_seg_base(e1, e2), limit, e2);
2384             EIP = new_eip;
2385         }
2386     } else {
2387         /* check gate type */
2388         type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2389         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390         rpl = new_cs & 3;
2391         switch(type) {
2392         case 1: /* available 286 TSS */
2393         case 9: /* available 386 TSS */
2394         case 5: /* task gate */
2395             if (dpl < cpl || dpl < rpl)
2396                 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2397             switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2398             CC_OP = CC_OP_EFLAGS;
2399             return;
2400         case 4: /* 286 call gate */
2401         case 12: /* 386 call gate */
2402             break;
2403         default:
2404             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2405             break;
2406         }
2407         shift = type >> 3;
2408 
2409         if (dpl < cpl || dpl < rpl)
2410             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2411         /* check valid bit */
2412         if (!(e2 & DESC_P_MASK))
2413             raise_exception_err(EXCP0B_NOSEG,  new_cs & 0xfffc);
2414         selector = e1 >> 16;
2415         offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2416         param_count = e2 & 0x1f;
2417         if ((selector & 0xfffc) == 0)
2418             raise_exception_err(EXCP0D_GPF, 0);
2419 
2420         if (load_segment(&e1, &e2, selector) != 0)
2421             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2422         if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2423             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2424         dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2425         if (dpl > cpl)
2426             raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2427         if (!(e2 & DESC_P_MASK))
2428             raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2429 
2430         if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2431             /* to inner privilege */
2432             get_ss_esp_from_tss(&ss, &sp, dpl);
2433             LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2434                         ss, sp, param_count, ESP);
2435             if ((ss & 0xfffc) == 0)
2436                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2437             if ((ss & 3) != dpl)
2438                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2439             if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2440                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2441             ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2442             if (ss_dpl != dpl)
2443                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2444             if (!(ss_e2 & DESC_S_MASK) ||
2445                 (ss_e2 & DESC_CS_MASK) ||
2446                 !(ss_e2 & DESC_W_MASK))
2447                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2448             if (!(ss_e2 & DESC_P_MASK))
2449                 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2450 
2451             //            push_size = ((param_count * 2) + 8) << shift;
2452 
2453             old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2454             old_ssp = env->segs[R_SS].base;
2455 
2456             sp_mask = get_sp_mask(ss_e2);
2457             ssp = get_seg_base(ss_e1, ss_e2);
2458             if (shift) {
2459                 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2460                 PUSHL(ssp, sp, sp_mask, ESP);
2461                 for(i = param_count - 1; i >= 0; i--) {
2462                     val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2463                     PUSHL(ssp, sp, sp_mask, val);
2464                 }
2465             } else {
2466                 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2467                 PUSHW(ssp, sp, sp_mask, ESP);
2468                 for(i = param_count - 1; i >= 0; i--) {
2469                     val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2470                     PUSHW(ssp, sp, sp_mask, val);
2471                 }
2472             }
2473             new_stack = 1;
2474         } else {
2475             /* to same privilege */
2476             sp = ESP;
2477             sp_mask = get_sp_mask(env->segs[R_SS].flags);
2478             ssp = env->segs[R_SS].base;
2479             //            push_size = (4 << shift);
2480             new_stack = 0;
2481         }
2482 
2483         if (shift) {
2484             PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2485             PUSHL(ssp, sp, sp_mask, next_eip);
2486         } else {
2487             PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2488             PUSHW(ssp, sp, sp_mask, next_eip);
2489         }
2490 
2491         /* from this point, not restartable */
2492 
2493         if (new_stack) {
2494             ss = (ss & ~3) | dpl;
2495             cpu_x86_load_seg_cache(env, R_SS, ss,
2496                                    ssp,
2497                                    get_seg_limit(ss_e1, ss_e2),
2498                                    ss_e2);
2499         }
2500 
2501         selector = (selector & ~3) | dpl;
2502         cpu_x86_load_seg_cache(env, R_CS, selector,
2503                        get_seg_base(e1, e2),
2504                        get_seg_limit(e1, e2),
2505                        e2);
2506         cpu_x86_set_cpl(env, dpl);
2507         SET_ESP(sp, sp_mask);
2508         EIP = offset;
2509     }
2510 #ifdef CONFIG_KQEMU
2511     if (kqemu_is_ok(env)) {
2512         env->exception_index = -1;
2513         cpu_loop_exit();
2514     }
2515 #endif
2516 }
2517 
2518 /* real and vm86 mode iret */
helper_iret_real(int shift)2519 void helper_iret_real(int shift)
2520 {
2521     uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2522     target_ulong ssp;
2523     int eflags_mask;
2524 
2525     sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2526     sp = ESP;
2527     ssp = env->segs[R_SS].base;
2528     if (shift == 1) {
2529         /* 32 bits */
2530         POPL(ssp, sp, sp_mask, new_eip);
2531         POPL(ssp, sp, sp_mask, new_cs);
2532         new_cs &= 0xffff;
2533         POPL(ssp, sp, sp_mask, new_eflags);
2534     } else {
2535         /* 16 bits */
2536         POPW(ssp, sp, sp_mask, new_eip);
2537         POPW(ssp, sp, sp_mask, new_cs);
2538         POPW(ssp, sp, sp_mask, new_eflags);
2539     }
2540     ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2541     env->segs[R_CS].selector = new_cs;
2542     env->segs[R_CS].base = (new_cs << 4);
2543     env->eip = new_eip;
2544     if (env->eflags & VM_MASK)
2545         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2546     else
2547         eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2548     if (shift == 0)
2549         eflags_mask &= 0xffff;
2550     load_eflags(new_eflags, eflags_mask);
2551     env->hflags2 &= ~HF2_NMI_MASK;
2552 }
2553 
validate_seg(int seg_reg,int cpl)2554 static inline void validate_seg(int seg_reg, int cpl)
2555 {
2556     int dpl;
2557     uint32_t e2;
2558 
2559     /* XXX: on x86_64, we do not want to nullify FS and GS because
2560        they may still contain a valid base. I would be interested to
2561        know how a real x86_64 CPU behaves */
2562     if ((seg_reg == R_FS || seg_reg == R_GS) &&
2563         (env->segs[seg_reg].selector & 0xfffc) == 0)
2564         return;
2565 
2566     e2 = env->segs[seg_reg].flags;
2567     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2568     if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2569         /* data or non conforming code segment */
2570         if (dpl < cpl) {
2571             cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2572         }
2573     }
2574 }
2575 
2576 /* protected mode iret */
helper_ret_protected(int shift,int is_iret,int addend)2577 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2578 {
2579     uint32_t new_cs, new_eflags, new_ss;
2580     uint32_t new_es, new_ds, new_fs, new_gs;
2581     uint32_t e1, e2, ss_e1, ss_e2;
2582     int cpl, dpl, rpl, eflags_mask, iopl;
2583     target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2584 
2585 #ifdef TARGET_X86_64
2586     if (shift == 2)
2587         sp_mask = -1;
2588     else
2589 #endif
2590         sp_mask = get_sp_mask(env->segs[R_SS].flags);
2591     sp = ESP;
2592     ssp = env->segs[R_SS].base;
2593     new_eflags = 0; /* avoid warning */
2594 #ifdef TARGET_X86_64
2595     if (shift == 2) {
2596         POPQ(sp, new_eip);
2597         POPQ(sp, new_cs);
2598         new_cs &= 0xffff;
2599         if (is_iret) {
2600             POPQ(sp, new_eflags);
2601         }
2602     } else
2603 #endif
2604     if (shift == 1) {
2605         /* 32 bits */
2606         POPL(ssp, sp, sp_mask, new_eip);
2607         POPL(ssp, sp, sp_mask, new_cs);
2608         new_cs &= 0xffff;
2609         if (is_iret) {
2610             POPL(ssp, sp, sp_mask, new_eflags);
2611             if (new_eflags & VM_MASK)
2612                 goto return_to_vm86;
2613         }
2614     } else {
2615         /* 16 bits */
2616         POPW(ssp, sp, sp_mask, new_eip);
2617         POPW(ssp, sp, sp_mask, new_cs);
2618         if (is_iret)
2619             POPW(ssp, sp, sp_mask, new_eflags);
2620     }
2621     LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2622               new_cs, new_eip, shift, addend);
2623     LOG_PCALL_STATE(env);
2624     if ((new_cs & 0xfffc) == 0)
2625         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2626     if (load_segment(&e1, &e2, new_cs) != 0)
2627         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2628     if (!(e2 & DESC_S_MASK) ||
2629         !(e2 & DESC_CS_MASK))
2630         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2631     cpl = env->hflags & HF_CPL_MASK;
2632     rpl = new_cs & 3;
2633     if (rpl < cpl)
2634         raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2635     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2636     if (e2 & DESC_C_MASK) {
2637         if (dpl > rpl)
2638             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2639     } else {
2640         if (dpl != rpl)
2641             raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2642     }
2643     if (!(e2 & DESC_P_MASK))
2644         raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2645 
2646     sp += addend;
2647     if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2648                        ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2649         /* return to same privilege level */
2650         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2651                        get_seg_base(e1, e2),
2652                        get_seg_limit(e1, e2),
2653                        e2);
2654     } else {
2655         /* return to different privilege level */
2656 #ifdef TARGET_X86_64
2657         if (shift == 2) {
2658             POPQ(sp, new_esp);
2659             POPQ(sp, new_ss);
2660             new_ss &= 0xffff;
2661         } else
2662 #endif
2663         if (shift == 1) {
2664             /* 32 bits */
2665             POPL(ssp, sp, sp_mask, new_esp);
2666             POPL(ssp, sp, sp_mask, new_ss);
2667             new_ss &= 0xffff;
2668         } else {
2669             /* 16 bits */
2670             POPW(ssp, sp, sp_mask, new_esp);
2671             POPW(ssp, sp, sp_mask, new_ss);
2672         }
2673         LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2674                     new_ss, new_esp);
2675         if ((new_ss & 0xfffc) == 0) {
2676 #ifdef TARGET_X86_64
2677             /* NULL ss is allowed in long mode if cpl != 3*/
2678             /* XXX: test CS64 ? */
2679             if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2680                 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2681                                        0, 0xffffffff,
2682                                        DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2683                                        DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2684                                        DESC_W_MASK | DESC_A_MASK);
2685                 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2686             } else
2687 #endif
2688             {
2689                 raise_exception_err(EXCP0D_GPF, 0);
2690             }
2691         } else {
2692             if ((new_ss & 3) != rpl)
2693                 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2694             if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2695                 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2696             if (!(ss_e2 & DESC_S_MASK) ||
2697                 (ss_e2 & DESC_CS_MASK) ||
2698                 !(ss_e2 & DESC_W_MASK))
2699                 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2700             dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2701             if (dpl != rpl)
2702                 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2703             if (!(ss_e2 & DESC_P_MASK))
2704                 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2705             cpu_x86_load_seg_cache(env, R_SS, new_ss,
2706                                    get_seg_base(ss_e1, ss_e2),
2707                                    get_seg_limit(ss_e1, ss_e2),
2708                                    ss_e2);
2709         }
2710 
2711         cpu_x86_load_seg_cache(env, R_CS, new_cs,
2712                        get_seg_base(e1, e2),
2713                        get_seg_limit(e1, e2),
2714                        e2);
2715         cpu_x86_set_cpl(env, rpl);
2716         sp = new_esp;
2717 #ifdef TARGET_X86_64
2718         if (env->hflags & HF_CS64_MASK)
2719             sp_mask = -1;
2720         else
2721 #endif
2722             sp_mask = get_sp_mask(ss_e2);
2723 
2724         /* validate data segments */
2725         validate_seg(R_ES, rpl);
2726         validate_seg(R_DS, rpl);
2727         validate_seg(R_FS, rpl);
2728         validate_seg(R_GS, rpl);
2729 
2730         sp += addend;
2731     }
2732     SET_ESP(sp, sp_mask);
2733     env->eip = new_eip;
2734     if (is_iret) {
2735         /* NOTE: 'cpl' is the _old_ CPL */
2736         eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2737         if (cpl == 0)
2738             eflags_mask |= IOPL_MASK;
2739         iopl = (env->eflags >> IOPL_SHIFT) & 3;
2740         if (cpl <= iopl)
2741             eflags_mask |= IF_MASK;
2742         if (shift == 0)
2743             eflags_mask &= 0xffff;
2744         load_eflags(new_eflags, eflags_mask);
2745     }
2746     return;
2747 
2748  return_to_vm86:
2749     POPL(ssp, sp, sp_mask, new_esp);
2750     POPL(ssp, sp, sp_mask, new_ss);
2751     POPL(ssp, sp, sp_mask, new_es);
2752     POPL(ssp, sp, sp_mask, new_ds);
2753     POPL(ssp, sp, sp_mask, new_fs);
2754     POPL(ssp, sp, sp_mask, new_gs);
2755 
2756     /* modify processor state */
2757     load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2758                 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2759     load_seg_vm(R_CS, new_cs & 0xffff);
2760     cpu_x86_set_cpl(env, 3);
2761     load_seg_vm(R_SS, new_ss & 0xffff);
2762     load_seg_vm(R_ES, new_es & 0xffff);
2763     load_seg_vm(R_DS, new_ds & 0xffff);
2764     load_seg_vm(R_FS, new_fs & 0xffff);
2765     load_seg_vm(R_GS, new_gs & 0xffff);
2766 
2767     env->eip = new_eip & 0xffff;
2768     ESP = new_esp;
2769 }
2770 
helper_iret_protected(int shift,int next_eip)2771 void helper_iret_protected(int shift, int next_eip)
2772 {
2773     int tss_selector, type;
2774     uint32_t e1, e2;
2775 
2776     /* specific case for TSS */
2777     if (env->eflags & NT_MASK) {
2778 #ifdef TARGET_X86_64
2779         if (env->hflags & HF_LMA_MASK)
2780             raise_exception_err(EXCP0D_GPF, 0);
2781 #endif
2782         tss_selector = lduw_kernel(env->tr.base + 0);
2783         if (tss_selector & 4)
2784             raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2785         if (load_segment(&e1, &e2, tss_selector) != 0)
2786             raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2787         type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2788         /* NOTE: we check both segment and busy TSS */
2789         if (type != 3)
2790             raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2791         switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2792     } else {
2793         helper_ret_protected(shift, 1, 0);
2794     }
2795     env->hflags2 &= ~HF2_NMI_MASK;
2796 #ifdef CONFIG_KQEMU
2797     if (kqemu_is_ok(env)) {
2798         CC_OP = CC_OP_EFLAGS;
2799         env->exception_index = -1;
2800         cpu_loop_exit();
2801     }
2802 #endif
2803 }
2804 
helper_lret_protected(int shift,int addend)2805 void helper_lret_protected(int shift, int addend)
2806 {
2807     helper_ret_protected(shift, 0, addend);
2808 #ifdef CONFIG_KQEMU
2809     if (kqemu_is_ok(env)) {
2810         env->exception_index = -1;
2811         cpu_loop_exit();
2812     }
2813 #endif
2814 }
2815 
helper_sysenter(void)2816 void helper_sysenter(void)
2817 {
2818     if (env->sysenter_cs == 0) {
2819         raise_exception_err(EXCP0D_GPF, 0);
2820     }
2821     env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2822     cpu_x86_set_cpl(env, 0);
2823 
2824 #ifdef TARGET_X86_64
2825     if (env->hflags & HF_LMA_MASK) {
2826         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2827                                0, 0xffffffff,
2828                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2829                                DESC_S_MASK |
2830                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2831     } else
2832 #endif
2833     {
2834         cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2835                                0, 0xffffffff,
2836                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2837                                DESC_S_MASK |
2838                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2839     }
2840     cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2841                            0, 0xffffffff,
2842                            DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2843                            DESC_S_MASK |
2844                            DESC_W_MASK | DESC_A_MASK);
2845     ESP = env->sysenter_esp;
2846     EIP = env->sysenter_eip;
2847 }
2848 
helper_sysexit(int dflag)2849 void helper_sysexit(int dflag)
2850 {
2851     int cpl;
2852 
2853     cpl = env->hflags & HF_CPL_MASK;
2854     if (env->sysenter_cs == 0 || cpl != 0) {
2855         raise_exception_err(EXCP0D_GPF, 0);
2856     }
2857     cpu_x86_set_cpl(env, 3);
2858 #ifdef TARGET_X86_64
2859     if (dflag == 2) {
2860         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2861                                0, 0xffffffff,
2862                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2863                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2864                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2865         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
2866                                0, 0xffffffff,
2867                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2868                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2869                                DESC_W_MASK | DESC_A_MASK);
2870     } else
2871 #endif
2872     {
2873         cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2874                                0, 0xffffffff,
2875                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2876                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2877                                DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2878         cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2879                                0, 0xffffffff,
2880                                DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2881                                DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2882                                DESC_W_MASK | DESC_A_MASK);
2883     }
2884     ESP = ECX;
2885     EIP = EDX;
2886 #ifdef CONFIG_KQEMU
2887     if (kqemu_is_ok(env)) {
2888         env->exception_index = -1;
2889         cpu_loop_exit();
2890     }
2891 #endif
2892 }
2893 
2894 #if defined(CONFIG_USER_ONLY)
helper_read_crN(int reg)2895 target_ulong helper_read_crN(int reg)
2896 {
2897     return 0;
2898 }
2899 
helper_write_crN(int reg,target_ulong t0)2900 void helper_write_crN(int reg, target_ulong t0)
2901 {
2902 }
2903 
helper_movl_drN_T0(int reg,target_ulong t0)2904 void helper_movl_drN_T0(int reg, target_ulong t0)
2905 {
2906 }
2907 #else
helper_read_crN(int reg)2908 target_ulong helper_read_crN(int reg)
2909 {
2910     target_ulong val;
2911 
2912     helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
2913     switch(reg) {
2914     default:
2915         val = env->cr[reg];
2916         break;
2917     case 8:
2918         if (!(env->hflags2 & HF2_VINTR_MASK)) {
2919             val = cpu_get_apic_tpr(env);
2920         } else {
2921             val = env->v_tpr;
2922         }
2923         break;
2924     }
2925     return val;
2926 }
2927 
helper_write_crN(int reg,target_ulong t0)2928 void helper_write_crN(int reg, target_ulong t0)
2929 {
2930     helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
2931     switch(reg) {
2932     case 0:
2933         cpu_x86_update_cr0(env, t0);
2934         break;
2935     case 3:
2936         cpu_x86_update_cr3(env, t0);
2937         break;
2938     case 4:
2939         cpu_x86_update_cr4(env, t0);
2940         break;
2941     case 8:
2942         if (!(env->hflags2 & HF2_VINTR_MASK)) {
2943             cpu_set_apic_tpr(env, t0);
2944         }
2945         env->v_tpr = t0 & 0x0f;
2946         break;
2947     default:
2948         env->cr[reg] = t0;
2949         break;
2950     }
2951 }
2952 
helper_movl_drN_T0(int reg,target_ulong t0)2953 void helper_movl_drN_T0(int reg, target_ulong t0)
2954 {
2955     int i;
2956 
2957     if (reg < 4) {
2958         hw_breakpoint_remove(env, reg);
2959         env->dr[reg] = t0;
2960         hw_breakpoint_insert(env, reg);
2961     } else if (reg == 7) {
2962         for (i = 0; i < 4; i++)
2963             hw_breakpoint_remove(env, i);
2964         env->dr[7] = t0;
2965         for (i = 0; i < 4; i++)
2966             hw_breakpoint_insert(env, i);
2967     } else
2968         env->dr[reg] = t0;
2969 }
2970 #endif
2971 
helper_lmsw(target_ulong t0)2972 void helper_lmsw(target_ulong t0)
2973 {
2974     /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2975        if already set to one. */
2976     t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2977     helper_write_crN(0, t0);
2978 }
2979 
helper_clts(void)2980 void helper_clts(void)
2981 {
2982     env->cr[0] &= ~CR0_TS_MASK;
2983     env->hflags &= ~HF_TS_MASK;
2984 }
2985 
helper_invlpg(target_ulong addr)2986 void helper_invlpg(target_ulong addr)
2987 {
2988     helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
2989     tlb_flush_page(env, addr);
2990 }
2991 
helper_rdtsc(void)2992 void helper_rdtsc(void)
2993 {
2994     uint64_t val;
2995 
2996     if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2997         raise_exception(EXCP0D_GPF);
2998     }
2999     helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3000 
3001     val = cpu_get_tsc(env) + env->tsc_offset;
3002     EAX = (uint32_t)(val);
3003     EDX = (uint32_t)(val >> 32);
3004 }
3005 
helper_rdpmc(void)3006 void helper_rdpmc(void)
3007 {
3008     if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3009         raise_exception(EXCP0D_GPF);
3010     }
3011     helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3012 
3013     /* currently unimplemented */
3014     raise_exception_err(EXCP06_ILLOP, 0);
3015 }
3016 
3017 #if defined(CONFIG_USER_ONLY)
helper_wrmsr(void)3018 void helper_wrmsr(void)
3019 {
3020 }
3021 
helper_rdmsr(void)3022 void helper_rdmsr(void)
3023 {
3024 }
3025 #else
helper_wrmsr(void)3026 void helper_wrmsr(void)
3027 {
3028     uint64_t val;
3029 
3030     helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3031 
3032     val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3033 
3034     switch((uint32_t)ECX) {
3035     case MSR_IA32_SYSENTER_CS:
3036         env->sysenter_cs = val & 0xffff;
3037         break;
3038     case MSR_IA32_SYSENTER_ESP:
3039         env->sysenter_esp = val;
3040         break;
3041     case MSR_IA32_SYSENTER_EIP:
3042         env->sysenter_eip = val;
3043         break;
3044     case MSR_IA32_APICBASE:
3045         cpu_set_apic_base(env, val);
3046         break;
3047     case MSR_EFER:
3048         {
3049             uint64_t update_mask;
3050             update_mask = 0;
3051             if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3052                 update_mask |= MSR_EFER_SCE;
3053             if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3054                 update_mask |= MSR_EFER_LME;
3055             if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3056                 update_mask |= MSR_EFER_FFXSR;
3057             if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3058                 update_mask |= MSR_EFER_NXE;
3059             if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3060                 update_mask |= MSR_EFER_SVME;
3061             if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3062                 update_mask |= MSR_EFER_FFXSR;
3063             cpu_load_efer(env, (env->efer & ~update_mask) |
3064                           (val & update_mask));
3065         }
3066         break;
3067     case MSR_STAR:
3068         env->star = val;
3069         break;
3070     case MSR_PAT:
3071         env->pat = val;
3072         break;
3073     case MSR_VM_HSAVE_PA:
3074         env->vm_hsave = val;
3075         break;
3076 #ifdef TARGET_X86_64
3077     case MSR_LSTAR:
3078         env->lstar = val;
3079         break;
3080     case MSR_CSTAR:
3081         env->cstar = val;
3082         break;
3083     case MSR_FMASK:
3084         env->fmask = val;
3085         break;
3086     case MSR_FSBASE:
3087         env->segs[R_FS].base = val;
3088         break;
3089     case MSR_GSBASE:
3090         env->segs[R_GS].base = val;
3091         break;
3092     case MSR_KERNELGSBASE:
3093         env->kernelgsbase = val;
3094         break;
3095 #endif
3096     case MSR_MTRRphysBase(0):
3097     case MSR_MTRRphysBase(1):
3098     case MSR_MTRRphysBase(2):
3099     case MSR_MTRRphysBase(3):
3100     case MSR_MTRRphysBase(4):
3101     case MSR_MTRRphysBase(5):
3102     case MSR_MTRRphysBase(6):
3103     case MSR_MTRRphysBase(7):
3104         env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3105         break;
3106     case MSR_MTRRphysMask(0):
3107     case MSR_MTRRphysMask(1):
3108     case MSR_MTRRphysMask(2):
3109     case MSR_MTRRphysMask(3):
3110     case MSR_MTRRphysMask(4):
3111     case MSR_MTRRphysMask(5):
3112     case MSR_MTRRphysMask(6):
3113     case MSR_MTRRphysMask(7):
3114         env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3115         break;
3116     case MSR_MTRRfix64K_00000:
3117         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3118         break;
3119     case MSR_MTRRfix16K_80000:
3120     case MSR_MTRRfix16K_A0000:
3121         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3122         break;
3123     case MSR_MTRRfix4K_C0000:
3124     case MSR_MTRRfix4K_C8000:
3125     case MSR_MTRRfix4K_D0000:
3126     case MSR_MTRRfix4K_D8000:
3127     case MSR_MTRRfix4K_E0000:
3128     case MSR_MTRRfix4K_E8000:
3129     case MSR_MTRRfix4K_F0000:
3130     case MSR_MTRRfix4K_F8000:
3131         env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3132         break;
3133     case MSR_MTRRdefType:
3134         env->mtrr_deftype = val;
3135         break;
3136     case MSR_MCG_STATUS:
3137         env->mcg_status = val;
3138         break;
3139     case MSR_MCG_CTL:
3140         if ((env->mcg_cap & MCG_CTL_P)
3141             && (val == 0 || val == ~(uint64_t)0))
3142             env->mcg_ctl = val;
3143         break;
3144     default:
3145         if ((uint32_t)ECX >= MSR_MC0_CTL
3146             && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3147             uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3148             if ((offset & 0x3) != 0
3149                 || (val == 0 || val == ~(uint64_t)0))
3150                 env->mce_banks[offset] = val;
3151             break;
3152         }
3153         /* XXX: exception ? */
3154         break;
3155     }
3156 }
3157 
helper_rdmsr(void)3158 void helper_rdmsr(void)
3159 {
3160     uint64_t val;
3161 
3162     helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3163 
3164     switch((uint32_t)ECX) {
3165     case MSR_IA32_SYSENTER_CS:
3166         val = env->sysenter_cs;
3167         break;
3168     case MSR_IA32_SYSENTER_ESP:
3169         val = env->sysenter_esp;
3170         break;
3171     case MSR_IA32_SYSENTER_EIP:
3172         val = env->sysenter_eip;
3173         break;
3174     case MSR_IA32_APICBASE:
3175         val = cpu_get_apic_base(env);
3176         break;
3177     case MSR_EFER:
3178         val = env->efer;
3179         break;
3180     case MSR_STAR:
3181         val = env->star;
3182         break;
3183     case MSR_PAT:
3184         val = env->pat;
3185         break;
3186     case MSR_VM_HSAVE_PA:
3187         val = env->vm_hsave;
3188         break;
3189     case MSR_IA32_PERF_STATUS:
3190         /* tsc_increment_by_tick */
3191         val = 1000ULL;
3192         /* CPU multiplier */
3193         val |= (((uint64_t)4ULL) << 40);
3194         break;
3195 #ifdef TARGET_X86_64
3196     case MSR_LSTAR:
3197         val = env->lstar;
3198         break;
3199     case MSR_CSTAR:
3200         val = env->cstar;
3201         break;
3202     case MSR_FMASK:
3203         val = env->fmask;
3204         break;
3205     case MSR_FSBASE:
3206         val = env->segs[R_FS].base;
3207         break;
3208     case MSR_GSBASE:
3209         val = env->segs[R_GS].base;
3210         break;
3211     case MSR_KERNELGSBASE:
3212         val = env->kernelgsbase;
3213         break;
3214 #endif
3215 #ifdef CONFIG_KQEMU
3216     case MSR_QPI_COMMBASE:
3217         if (env->kqemu_enabled) {
3218             val = kqemu_comm_base;
3219         } else {
3220             val = 0;
3221         }
3222         break;
3223 #endif
3224     case MSR_MTRRphysBase(0):
3225     case MSR_MTRRphysBase(1):
3226     case MSR_MTRRphysBase(2):
3227     case MSR_MTRRphysBase(3):
3228     case MSR_MTRRphysBase(4):
3229     case MSR_MTRRphysBase(5):
3230     case MSR_MTRRphysBase(6):
3231     case MSR_MTRRphysBase(7):
3232         val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3233         break;
3234     case MSR_MTRRphysMask(0):
3235     case MSR_MTRRphysMask(1):
3236     case MSR_MTRRphysMask(2):
3237     case MSR_MTRRphysMask(3):
3238     case MSR_MTRRphysMask(4):
3239     case MSR_MTRRphysMask(5):
3240     case MSR_MTRRphysMask(6):
3241     case MSR_MTRRphysMask(7):
3242         val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3243         break;
3244     case MSR_MTRRfix64K_00000:
3245         val = env->mtrr_fixed[0];
3246         break;
3247     case MSR_MTRRfix16K_80000:
3248     case MSR_MTRRfix16K_A0000:
3249         val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3250         break;
3251     case MSR_MTRRfix4K_C0000:
3252     case MSR_MTRRfix4K_C8000:
3253     case MSR_MTRRfix4K_D0000:
3254     case MSR_MTRRfix4K_D8000:
3255     case MSR_MTRRfix4K_E0000:
3256     case MSR_MTRRfix4K_E8000:
3257     case MSR_MTRRfix4K_F0000:
3258     case MSR_MTRRfix4K_F8000:
3259         val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3260         break;
3261     case MSR_MTRRdefType:
3262         val = env->mtrr_deftype;
3263         break;
3264     case MSR_MTRRcap:
3265         if (env->cpuid_features & CPUID_MTRR)
3266             val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3267         else
3268             /* XXX: exception ? */
3269             val = 0;
3270         break;
3271     case MSR_MCG_CAP:
3272         val = env->mcg_cap;
3273         break;
3274     case MSR_MCG_CTL:
3275         if (env->mcg_cap & MCG_CTL_P)
3276             val = env->mcg_ctl;
3277         else
3278             val = 0;
3279         break;
3280     case MSR_MCG_STATUS:
3281         val = env->mcg_status;
3282         break;
3283     default:
3284         if ((uint32_t)ECX >= MSR_MC0_CTL
3285             && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3286             uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3287             val = env->mce_banks[offset];
3288             break;
3289         }
3290         /* XXX: exception ? */
3291         val = 0;
3292         break;
3293     }
3294     EAX = (uint32_t)(val);
3295     EDX = (uint32_t)(val >> 32);
3296 }
3297 #endif
3298 
helper_lsl(target_ulong selector1)3299 target_ulong helper_lsl(target_ulong selector1)
3300 {
3301     unsigned int limit;
3302     uint32_t e1, e2, eflags, selector;
3303     int rpl, dpl, cpl, type;
3304 
3305     selector = selector1 & 0xffff;
3306     eflags = helper_cc_compute_all(CC_OP);
3307     if ((selector & 0xfffc) == 0)
3308         goto fail;
3309     if (load_segment(&e1, &e2, selector) != 0)
3310         goto fail;
3311     rpl = selector & 3;
3312     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3313     cpl = env->hflags & HF_CPL_MASK;
3314     if (e2 & DESC_S_MASK) {
3315         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3316             /* conforming */
3317         } else {
3318             if (dpl < cpl || dpl < rpl)
3319                 goto fail;
3320         }
3321     } else {
3322         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3323         switch(type) {
3324         case 1:
3325         case 2:
3326         case 3:
3327         case 9:
3328         case 11:
3329             break;
3330         default:
3331             goto fail;
3332         }
3333         if (dpl < cpl || dpl < rpl) {
3334         fail:
3335             CC_SRC = eflags & ~CC_Z;
3336             return 0;
3337         }
3338     }
3339     limit = get_seg_limit(e1, e2);
3340     CC_SRC = eflags | CC_Z;
3341     return limit;
3342 }
3343 
helper_lar(target_ulong selector1)3344 target_ulong helper_lar(target_ulong selector1)
3345 {
3346     uint32_t e1, e2, eflags, selector;
3347     int rpl, dpl, cpl, type;
3348 
3349     selector = selector1 & 0xffff;
3350     eflags = helper_cc_compute_all(CC_OP);
3351     if ((selector & 0xfffc) == 0)
3352         goto fail;
3353     if (load_segment(&e1, &e2, selector) != 0)
3354         goto fail;
3355     rpl = selector & 3;
3356     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3357     cpl = env->hflags & HF_CPL_MASK;
3358     if (e2 & DESC_S_MASK) {
3359         if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3360             /* conforming */
3361         } else {
3362             if (dpl < cpl || dpl < rpl)
3363                 goto fail;
3364         }
3365     } else {
3366         type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3367         switch(type) {
3368         case 1:
3369         case 2:
3370         case 3:
3371         case 4:
3372         case 5:
3373         case 9:
3374         case 11:
3375         case 12:
3376             break;
3377         default:
3378             goto fail;
3379         }
3380         if (dpl < cpl || dpl < rpl) {
3381         fail:
3382             CC_SRC = eflags & ~CC_Z;
3383             return 0;
3384         }
3385     }
3386     CC_SRC = eflags | CC_Z;
3387     return e2 & 0x00f0ff00;
3388 }
3389 
helper_verr(target_ulong selector1)3390 void helper_verr(target_ulong selector1)
3391 {
3392     uint32_t e1, e2, eflags, selector;
3393     int rpl, dpl, cpl;
3394 
3395     selector = selector1 & 0xffff;
3396     eflags = helper_cc_compute_all(CC_OP);
3397     if ((selector & 0xfffc) == 0)
3398         goto fail;
3399     if (load_segment(&e1, &e2, selector) != 0)
3400         goto fail;
3401     if (!(e2 & DESC_S_MASK))
3402         goto fail;
3403     rpl = selector & 3;
3404     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3405     cpl = env->hflags & HF_CPL_MASK;
3406     if (e2 & DESC_CS_MASK) {
3407         if (!(e2 & DESC_R_MASK))
3408             goto fail;
3409         if (!(e2 & DESC_C_MASK)) {
3410             if (dpl < cpl || dpl < rpl)
3411                 goto fail;
3412         }
3413     } else {
3414         if (dpl < cpl || dpl < rpl) {
3415         fail:
3416             CC_SRC = eflags & ~CC_Z;
3417             return;
3418         }
3419     }
3420     CC_SRC = eflags | CC_Z;
3421 }
3422 
helper_verw(target_ulong selector1)3423 void helper_verw(target_ulong selector1)
3424 {
3425     uint32_t e1, e2, eflags, selector;
3426     int rpl, dpl, cpl;
3427 
3428     selector = selector1 & 0xffff;
3429     eflags = helper_cc_compute_all(CC_OP);
3430     if ((selector & 0xfffc) == 0)
3431         goto fail;
3432     if (load_segment(&e1, &e2, selector) != 0)
3433         goto fail;
3434     if (!(e2 & DESC_S_MASK))
3435         goto fail;
3436     rpl = selector & 3;
3437     dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3438     cpl = env->hflags & HF_CPL_MASK;
3439     if (e2 & DESC_CS_MASK) {
3440         goto fail;
3441     } else {
3442         if (dpl < cpl || dpl < rpl)
3443             goto fail;
3444         if (!(e2 & DESC_W_MASK)) {
3445         fail:
3446             CC_SRC = eflags & ~CC_Z;
3447             return;
3448         }
3449     }
3450     CC_SRC = eflags | CC_Z;
3451 }
3452 
3453 /* x87 FPU helpers */
3454 
fpu_set_exception(int mask)3455 static void fpu_set_exception(int mask)
3456 {
3457     env->fpus |= mask;
3458     if (env->fpus & (~env->fpuc & FPUC_EM))
3459         env->fpus |= FPUS_SE | FPUS_B;
3460 }
3461 
helper_fdiv(CPU86_LDouble a,CPU86_LDouble b)3462 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3463 {
3464     if (b == 0.0)
3465         fpu_set_exception(FPUS_ZE);
3466     return a / b;
3467 }
3468 
fpu_raise_exception(void)3469 static void fpu_raise_exception(void)
3470 {
3471     if (env->cr[0] & CR0_NE_MASK) {
3472         raise_exception(EXCP10_COPR);
3473     }
3474 #if !defined(CONFIG_USER_ONLY)
3475     else {
3476         cpu_set_ferr(env);
3477     }
3478 #endif
3479 }
3480 
helper_flds_FT0(uint32_t val)3481 void helper_flds_FT0(uint32_t val)
3482 {
3483     union {
3484         float32 f;
3485         uint32_t i;
3486     } u;
3487     u.i = val;
3488     FT0 = float32_to_floatx(u.f, &env->fp_status);
3489 }
3490 
helper_fldl_FT0(uint64_t val)3491 void helper_fldl_FT0(uint64_t val)
3492 {
3493     union {
3494         float64 f;
3495         uint64_t i;
3496     } u;
3497     u.i = val;
3498     FT0 = float64_to_floatx(u.f, &env->fp_status);
3499 }
3500 
helper_fildl_FT0(int32_t val)3501 void helper_fildl_FT0(int32_t val)
3502 {
3503     FT0 = int32_to_floatx(val, &env->fp_status);
3504 }
3505 
helper_flds_ST0(uint32_t val)3506 void helper_flds_ST0(uint32_t val)
3507 {
3508     int new_fpstt;
3509     union {
3510         float32 f;
3511         uint32_t i;
3512     } u;
3513     new_fpstt = (env->fpstt - 1) & 7;
3514     u.i = val;
3515     env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3516     env->fpstt = new_fpstt;
3517     env->fptags[new_fpstt] = 0; /* validate stack entry */
3518 }
3519 
helper_fldl_ST0(uint64_t val)3520 void helper_fldl_ST0(uint64_t val)
3521 {
3522     int new_fpstt;
3523     union {
3524         float64 f;
3525         uint64_t i;
3526     } u;
3527     new_fpstt = (env->fpstt - 1) & 7;
3528     u.i = val;
3529     env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3530     env->fpstt = new_fpstt;
3531     env->fptags[new_fpstt] = 0; /* validate stack entry */
3532 }
3533 
helper_fildl_ST0(int32_t val)3534 void helper_fildl_ST0(int32_t val)
3535 {
3536     int new_fpstt;
3537     new_fpstt = (env->fpstt - 1) & 7;
3538     env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3539     env->fpstt = new_fpstt;
3540     env->fptags[new_fpstt] = 0; /* validate stack entry */
3541 }
3542 
helper_fildll_ST0(int64_t val)3543 void helper_fildll_ST0(int64_t val)
3544 {
3545     int new_fpstt;
3546     new_fpstt = (env->fpstt - 1) & 7;
3547     env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3548     env->fpstt = new_fpstt;
3549     env->fptags[new_fpstt] = 0; /* validate stack entry */
3550 }
3551 
helper_fsts_ST0(void)3552 uint32_t helper_fsts_ST0(void)
3553 {
3554     union {
3555         float32 f;
3556         uint32_t i;
3557     } u;
3558     u.f = floatx_to_float32(ST0, &env->fp_status);
3559     return u.i;
3560 }
3561 
helper_fstl_ST0(void)3562 uint64_t helper_fstl_ST0(void)
3563 {
3564     union {
3565         float64 f;
3566         uint64_t i;
3567     } u;
3568     u.f = floatx_to_float64(ST0, &env->fp_status);
3569     return u.i;
3570 }
3571 
helper_fist_ST0(void)3572 int32_t helper_fist_ST0(void)
3573 {
3574     int32_t val;
3575     val = floatx_to_int32(ST0, &env->fp_status);
3576     if (val != (int16_t)val)
3577         val = -32768;
3578     return val;
3579 }
3580 
helper_fistl_ST0(void)3581 int32_t helper_fistl_ST0(void)
3582 {
3583     int32_t val;
3584     val = floatx_to_int32(ST0, &env->fp_status);
3585     return val;
3586 }
3587 
helper_fistll_ST0(void)3588 int64_t helper_fistll_ST0(void)
3589 {
3590     int64_t val;
3591     val = floatx_to_int64(ST0, &env->fp_status);
3592     return val;
3593 }
3594 
helper_fistt_ST0(void)3595 int32_t helper_fistt_ST0(void)
3596 {
3597     int32_t val;
3598     val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3599     if (val != (int16_t)val)
3600         val = -32768;
3601     return val;
3602 }
3603 
helper_fisttl_ST0(void)3604 int32_t helper_fisttl_ST0(void)
3605 {
3606     int32_t val;
3607     val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3608     return val;
3609 }
3610 
helper_fisttll_ST0(void)3611 int64_t helper_fisttll_ST0(void)
3612 {
3613     int64_t val;
3614     val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3615     return val;
3616 }
3617 
helper_fldt_ST0(target_ulong ptr)3618 void helper_fldt_ST0(target_ulong ptr)
3619 {
3620     int new_fpstt;
3621     new_fpstt = (env->fpstt - 1) & 7;
3622     env->fpregs[new_fpstt].d = helper_fldt(ptr);
3623     env->fpstt = new_fpstt;
3624     env->fptags[new_fpstt] = 0; /* validate stack entry */
3625 }
3626 
helper_fstt_ST0(target_ulong ptr)3627 void helper_fstt_ST0(target_ulong ptr)
3628 {
3629     helper_fstt(ST0, ptr);
3630 }
3631 
helper_fpush(void)3632 void helper_fpush(void)
3633 {
3634     fpush();
3635 }
3636 
helper_fpop(void)3637 void helper_fpop(void)
3638 {
3639     fpop();
3640 }
3641 
helper_fdecstp(void)3642 void helper_fdecstp(void)
3643 {
3644     env->fpstt = (env->fpstt - 1) & 7;
3645     env->fpus &= (~0x4700);
3646 }
3647 
helper_fincstp(void)3648 void helper_fincstp(void)
3649 {
3650     env->fpstt = (env->fpstt + 1) & 7;
3651     env->fpus &= (~0x4700);
3652 }
3653 
3654 /* FPU move */
3655 
helper_ffree_STN(int st_index)3656 void helper_ffree_STN(int st_index)
3657 {
3658     env->fptags[(env->fpstt + st_index) & 7] = 1;
3659 }
3660 
helper_fmov_ST0_FT0(void)3661 void helper_fmov_ST0_FT0(void)
3662 {
3663     ST0 = FT0;
3664 }
3665 
helper_fmov_FT0_STN(int st_index)3666 void helper_fmov_FT0_STN(int st_index)
3667 {
3668     FT0 = ST(st_index);
3669 }
3670 
helper_fmov_ST0_STN(int st_index)3671 void helper_fmov_ST0_STN(int st_index)
3672 {
3673     ST0 = ST(st_index);
3674 }
3675 
helper_fmov_STN_ST0(int st_index)3676 void helper_fmov_STN_ST0(int st_index)
3677 {
3678     ST(st_index) = ST0;
3679 }
3680 
helper_fxchg_ST0_STN(int st_index)3681 void helper_fxchg_ST0_STN(int st_index)
3682 {
3683     CPU86_LDouble tmp;
3684     tmp = ST(st_index);
3685     ST(st_index) = ST0;
3686     ST0 = tmp;
3687 }
3688 
3689 /* FPU operations */
3690 
3691 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3692 
helper_fcom_ST0_FT0(void)3693 void helper_fcom_ST0_FT0(void)
3694 {
3695     int ret;
3696 
3697     ret = floatx_compare(ST0, FT0, &env->fp_status);
3698     env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3699 }
3700 
helper_fucom_ST0_FT0(void)3701 void helper_fucom_ST0_FT0(void)
3702 {
3703     int ret;
3704 
3705     ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3706     env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3707 }
3708 
3709 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3710 
helper_fcomi_ST0_FT0(void)3711 void helper_fcomi_ST0_FT0(void)
3712 {
3713     int eflags;
3714     int ret;
3715 
3716     ret = floatx_compare(ST0, FT0, &env->fp_status);
3717     eflags = helper_cc_compute_all(CC_OP);
3718     eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3719     CC_SRC = eflags;
3720 }
3721 
helper_fucomi_ST0_FT0(void)3722 void helper_fucomi_ST0_FT0(void)
3723 {
3724     int eflags;
3725     int ret;
3726 
3727     ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3728     eflags = helper_cc_compute_all(CC_OP);
3729     eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3730     CC_SRC = eflags;
3731 }
3732 
helper_fadd_ST0_FT0(void)3733 void helper_fadd_ST0_FT0(void)
3734 {
3735     ST0 += FT0;
3736 }
3737 
helper_fmul_ST0_FT0(void)3738 void helper_fmul_ST0_FT0(void)
3739 {
3740     ST0 *= FT0;
3741 }
3742 
helper_fsub_ST0_FT0(void)3743 void helper_fsub_ST0_FT0(void)
3744 {
3745     ST0 -= FT0;
3746 }
3747 
helper_fsubr_ST0_FT0(void)3748 void helper_fsubr_ST0_FT0(void)
3749 {
3750     ST0 = FT0 - ST0;
3751 }
3752 
helper_fdiv_ST0_FT0(void)3753 void helper_fdiv_ST0_FT0(void)
3754 {
3755     ST0 = helper_fdiv(ST0, FT0);
3756 }
3757 
helper_fdivr_ST0_FT0(void)3758 void helper_fdivr_ST0_FT0(void)
3759 {
3760     ST0 = helper_fdiv(FT0, ST0);
3761 }
3762 
3763 /* fp operations between STN and ST0 */
3764 
helper_fadd_STN_ST0(int st_index)3765 void helper_fadd_STN_ST0(int st_index)
3766 {
3767     ST(st_index) += ST0;
3768 }
3769 
helper_fmul_STN_ST0(int st_index)3770 void helper_fmul_STN_ST0(int st_index)
3771 {
3772     ST(st_index) *= ST0;
3773 }
3774 
helper_fsub_STN_ST0(int st_index)3775 void helper_fsub_STN_ST0(int st_index)
3776 {
3777     ST(st_index) -= ST0;
3778 }
3779 
helper_fsubr_STN_ST0(int st_index)3780 void helper_fsubr_STN_ST0(int st_index)
3781 {
3782     CPU86_LDouble *p;
3783     p = &ST(st_index);
3784     *p = ST0 - *p;
3785 }
3786 
helper_fdiv_STN_ST0(int st_index)3787 void helper_fdiv_STN_ST0(int st_index)
3788 {
3789     CPU86_LDouble *p;
3790     p = &ST(st_index);
3791     *p = helper_fdiv(*p, ST0);
3792 }
3793 
helper_fdivr_STN_ST0(int st_index)3794 void helper_fdivr_STN_ST0(int st_index)
3795 {
3796     CPU86_LDouble *p;
3797     p = &ST(st_index);
3798     *p = helper_fdiv(ST0, *p);
3799 }
3800 
3801 /* misc FPU operations */
helper_fchs_ST0(void)3802 void helper_fchs_ST0(void)
3803 {
3804     ST0 = floatx_chs(ST0);
3805 }
3806 
helper_fabs_ST0(void)3807 void helper_fabs_ST0(void)
3808 {
3809     ST0 = floatx_abs(ST0);
3810 }
3811 
helper_fld1_ST0(void)3812 void helper_fld1_ST0(void)
3813 {
3814     ST0 = f15rk[1];
3815 }
3816 
helper_fldl2t_ST0(void)3817 void helper_fldl2t_ST0(void)
3818 {
3819     ST0 = f15rk[6];
3820 }
3821 
helper_fldl2e_ST0(void)3822 void helper_fldl2e_ST0(void)
3823 {
3824     ST0 = f15rk[5];
3825 }
3826 
helper_fldpi_ST0(void)3827 void helper_fldpi_ST0(void)
3828 {
3829     ST0 = f15rk[2];
3830 }
3831 
helper_fldlg2_ST0(void)3832 void helper_fldlg2_ST0(void)
3833 {
3834     ST0 = f15rk[3];
3835 }
3836 
helper_fldln2_ST0(void)3837 void helper_fldln2_ST0(void)
3838 {
3839     ST0 = f15rk[4];
3840 }
3841 
helper_fldz_ST0(void)3842 void helper_fldz_ST0(void)
3843 {
3844     ST0 = f15rk[0];
3845 }
3846 
helper_fldz_FT0(void)3847 void helper_fldz_FT0(void)
3848 {
3849     FT0 = f15rk[0];
3850 }
3851 
helper_fnstsw(void)3852 uint32_t helper_fnstsw(void)
3853 {
3854     return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3855 }
3856 
helper_fnstcw(void)3857 uint32_t helper_fnstcw(void)
3858 {
3859     return env->fpuc;
3860 }
3861 
update_fp_status(void)3862 static void update_fp_status(void)
3863 {
3864     int rnd_type;
3865 
3866     /* set rounding mode */
3867     switch(env->fpuc & RC_MASK) {
3868     default:
3869     case RC_NEAR:
3870         rnd_type = float_round_nearest_even;
3871         break;
3872     case RC_DOWN:
3873         rnd_type = float_round_down;
3874         break;
3875     case RC_UP:
3876         rnd_type = float_round_up;
3877         break;
3878     case RC_CHOP:
3879         rnd_type = float_round_to_zero;
3880         break;
3881     }
3882     set_float_rounding_mode(rnd_type, &env->fp_status);
3883 #ifdef FLOATX80
3884     switch((env->fpuc >> 8) & 3) {
3885     case 0:
3886         rnd_type = 32;
3887         break;
3888     case 2:
3889         rnd_type = 64;
3890         break;
3891     case 3:
3892     default:
3893         rnd_type = 80;
3894         break;
3895     }
3896     set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3897 #endif
3898 }
3899 
helper_fldcw(uint32_t val)3900 void helper_fldcw(uint32_t val)
3901 {
3902     env->fpuc = val;
3903     update_fp_status();
3904 }
3905 
helper_fclex(void)3906 void helper_fclex(void)
3907 {
3908     env->fpus &= 0x7f00;
3909 }
3910 
helper_fwait(void)3911 void helper_fwait(void)
3912 {
3913     if (env->fpus & FPUS_SE)
3914         fpu_raise_exception();
3915 }
3916 
helper_fninit(void)3917 void helper_fninit(void)
3918 {
3919     env->fpus = 0;
3920     env->fpstt = 0;
3921     env->fpuc = 0x37f;
3922     env->fptags[0] = 1;
3923     env->fptags[1] = 1;
3924     env->fptags[2] = 1;
3925     env->fptags[3] = 1;
3926     env->fptags[4] = 1;
3927     env->fptags[5] = 1;
3928     env->fptags[6] = 1;
3929     env->fptags[7] = 1;
3930 }
3931 
3932 /* BCD ops */
3933 
helper_fbld_ST0(target_ulong ptr)3934 void helper_fbld_ST0(target_ulong ptr)
3935 {
3936     CPU86_LDouble tmp;
3937     uint64_t val;
3938     unsigned int v;
3939     int i;
3940 
3941     val = 0;
3942     for(i = 8; i >= 0; i--) {
3943         v = ldub(ptr + i);
3944         val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3945     }
3946     tmp = val;
3947     if (ldub(ptr + 9) & 0x80)
3948         tmp = -tmp;
3949     fpush();
3950     ST0 = tmp;
3951 }
3952 
helper_fbst_ST0(target_ulong ptr)3953 void helper_fbst_ST0(target_ulong ptr)
3954 {
3955     int v;
3956     target_ulong mem_ref, mem_end;
3957     int64_t val;
3958 
3959     val = floatx_to_int64(ST0, &env->fp_status);
3960     mem_ref = ptr;
3961     mem_end = mem_ref + 9;
3962     if (val < 0) {
3963         stb(mem_end, 0x80);
3964         val = -val;
3965     } else {
3966         stb(mem_end, 0x00);
3967     }
3968     while (mem_ref < mem_end) {
3969         if (val == 0)
3970             break;
3971         v = val % 100;
3972         val = val / 100;
3973         v = ((v / 10) << 4) | (v % 10);
3974         stb(mem_ref++, v);
3975     }
3976     while (mem_ref < mem_end) {
3977         stb(mem_ref++, 0);
3978     }
3979 }
3980 
helper_f2xm1(void)3981 void helper_f2xm1(void)
3982 {
3983     ST0 = pow(2.0,ST0) - 1.0;
3984 }
3985 
helper_fyl2x(void)3986 void helper_fyl2x(void)
3987 {
3988     CPU86_LDouble fptemp;
3989 
3990     fptemp = ST0;
3991     if (fptemp>0.0){
3992         fptemp = log(fptemp)/log(2.0);	 /* log2(ST) */
3993         ST1 *= fptemp;
3994         fpop();
3995     } else {
3996         env->fpus &= (~0x4700);
3997         env->fpus |= 0x400;
3998     }
3999 }
4000 
helper_fptan(void)4001 void helper_fptan(void)
4002 {
4003     CPU86_LDouble fptemp;
4004 
4005     fptemp = ST0;
4006     if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4007         env->fpus |= 0x400;
4008     } else {
4009         ST0 = tan(fptemp);
4010         fpush();
4011         ST0 = 1.0;
4012         env->fpus &= (~0x400);  /* C2 <-- 0 */
4013         /* the above code is for  |arg| < 2**52 only */
4014     }
4015 }
4016 
helper_fpatan(void)4017 void helper_fpatan(void)
4018 {
4019     CPU86_LDouble fptemp, fpsrcop;
4020 
4021     fpsrcop = ST1;
4022     fptemp = ST0;
4023     ST1 = atan2(fpsrcop,fptemp);
4024     fpop();
4025 }
4026 
helper_fxtract(void)4027 void helper_fxtract(void)
4028 {
4029     CPU86_LDoubleU temp;
4030     unsigned int expdif;
4031 
4032     temp.d = ST0;
4033     expdif = EXPD(temp) - EXPBIAS;
4034     /*DP exponent bias*/
4035     ST0 = expdif;
4036     fpush();
4037     BIASEXPONENT(temp);
4038     ST0 = temp.d;
4039 }
4040 
helper_fprem1(void)4041 void helper_fprem1(void)
4042 {
4043     CPU86_LDouble dblq, fpsrcop, fptemp;
4044     CPU86_LDoubleU fpsrcop1, fptemp1;
4045     int expdif;
4046     signed long long int q;
4047 
4048     if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4049         ST0 = 0.0 / 0.0; /* NaN */
4050         env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4051         return;
4052     }
4053 
4054     fpsrcop = ST0;
4055     fptemp = ST1;
4056     fpsrcop1.d = fpsrcop;
4057     fptemp1.d = fptemp;
4058     expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4059 
4060     if (expdif < 0) {
4061         /* optimisation? taken from the AMD docs */
4062         env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4063         /* ST0 is unchanged */
4064         return;
4065     }
4066 
4067     if (expdif < 53) {
4068         dblq = fpsrcop / fptemp;
4069         /* round dblq towards nearest integer */
4070         dblq = rint(dblq);
4071         ST0 = fpsrcop - fptemp * dblq;
4072 
4073         /* convert dblq to q by truncating towards zero */
4074         if (dblq < 0.0)
4075            q = (signed long long int)(-dblq);
4076         else
4077            q = (signed long long int)dblq;
4078 
4079         env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4080                                 /* (C0,C3,C1) <-- (q2,q1,q0) */
4081         env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4082         env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4083         env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4084     } else {
4085         env->fpus |= 0x400;  /* C2 <-- 1 */
4086         fptemp = pow(2.0, expdif - 50);
4087         fpsrcop = (ST0 / ST1) / fptemp;
4088         /* fpsrcop = integer obtained by chopping */
4089         fpsrcop = (fpsrcop < 0.0) ?
4090                   -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4091         ST0 -= (ST1 * fpsrcop * fptemp);
4092     }
4093 }
4094 
helper_fprem(void)4095 void helper_fprem(void)
4096 {
4097     CPU86_LDouble dblq, fpsrcop, fptemp;
4098     CPU86_LDoubleU fpsrcop1, fptemp1;
4099     int expdif;
4100     signed long long int q;
4101 
4102     if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
4103        ST0 = 0.0 / 0.0; /* NaN */
4104        env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4105        return;
4106     }
4107 
4108     fpsrcop = (CPU86_LDouble)ST0;
4109     fptemp = (CPU86_LDouble)ST1;
4110     fpsrcop1.d = fpsrcop;
4111     fptemp1.d = fptemp;
4112     expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4113 
4114     if (expdif < 0) {
4115         /* optimisation? taken from the AMD docs */
4116         env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4117         /* ST0 is unchanged */
4118         return;
4119     }
4120 
4121     if ( expdif < 53 ) {
4122         dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4123         /* round dblq towards zero */
4124         dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4125         ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
4126 
4127         /* convert dblq to q by truncating towards zero */
4128         if (dblq < 0.0)
4129            q = (signed long long int)(-dblq);
4130         else
4131            q = (signed long long int)dblq;
4132 
4133         env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4134                                 /* (C0,C3,C1) <-- (q2,q1,q0) */
4135         env->fpus |= (q & 0x4) << (8 - 2);  /* (C0) <-- q2 */
4136         env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4137         env->fpus |= (q & 0x1) << (9 - 0);  /* (C1) <-- q0 */
4138     } else {
4139         int N = 32 + (expdif % 32); /* as per AMD docs */
4140         env->fpus |= 0x400;  /* C2 <-- 1 */
4141         fptemp = pow(2.0, (double)(expdif - N));
4142         fpsrcop = (ST0 / ST1) / fptemp;
4143         /* fpsrcop = integer obtained by chopping */
4144         fpsrcop = (fpsrcop < 0.0) ?
4145                   -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4146         ST0 -= (ST1 * fpsrcop * fptemp);
4147     }
4148 }
4149 
helper_fyl2xp1(void)4150 void helper_fyl2xp1(void)
4151 {
4152     CPU86_LDouble fptemp;
4153 
4154     fptemp = ST0;
4155     if ((fptemp+1.0)>0.0) {
4156         fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4157         ST1 *= fptemp;
4158         fpop();
4159     } else {
4160         env->fpus &= (~0x4700);
4161         env->fpus |= 0x400;
4162     }
4163 }
4164 
helper_fsqrt(void)4165 void helper_fsqrt(void)
4166 {
4167     CPU86_LDouble fptemp;
4168 
4169     fptemp = ST0;
4170     if (fptemp<0.0) {
4171         env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4172         env->fpus |= 0x400;
4173     }
4174     ST0 = sqrt(fptemp);
4175 }
4176 
helper_fsincos(void)4177 void helper_fsincos(void)
4178 {
4179     CPU86_LDouble fptemp;
4180 
4181     fptemp = ST0;
4182     if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4183         env->fpus |= 0x400;
4184     } else {
4185         ST0 = sin(fptemp);
4186         fpush();
4187         ST0 = cos(fptemp);
4188         env->fpus &= (~0x400);  /* C2 <-- 0 */
4189         /* the above code is for  |arg| < 2**63 only */
4190     }
4191 }
4192 
helper_frndint(void)4193 void helper_frndint(void)
4194 {
4195     ST0 = floatx_round_to_int(ST0, &env->fp_status);
4196 }
4197 
helper_fscale(void)4198 void helper_fscale(void)
4199 {
4200     ST0 = ldexp (ST0, (int)(ST1));
4201 }
4202 
helper_fsin(void)4203 void helper_fsin(void)
4204 {
4205     CPU86_LDouble fptemp;
4206 
4207     fptemp = ST0;
4208     if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4209         env->fpus |= 0x400;
4210     } else {
4211         ST0 = sin(fptemp);
4212         env->fpus &= (~0x400);  /* C2 <-- 0 */
4213         /* the above code is for  |arg| < 2**53 only */
4214     }
4215 }
4216 
helper_fcos(void)4217 void helper_fcos(void)
4218 {
4219     CPU86_LDouble fptemp;
4220 
4221     fptemp = ST0;
4222     if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4223         env->fpus |= 0x400;
4224     } else {
4225         ST0 = cos(fptemp);
4226         env->fpus &= (~0x400);  /* C2 <-- 0 */
4227         /* the above code is for  |arg5 < 2**63 only */
4228     }
4229 }
4230 
helper_fxam_ST0(void)4231 void helper_fxam_ST0(void)
4232 {
4233     CPU86_LDoubleU temp;
4234     int expdif;
4235 
4236     temp.d = ST0;
4237 
4238     env->fpus &= (~0x4700);  /* (C3,C2,C1,C0) <-- 0000 */
4239     if (SIGND(temp))
4240         env->fpus |= 0x200; /* C1 <-- 1 */
4241 
4242     /* XXX: test fptags too */
4243     expdif = EXPD(temp);
4244     if (expdif == MAXEXPD) {
4245 #ifdef USE_X86LDOUBLE
4246         if (MANTD(temp) == 0x8000000000000000ULL)
4247 #else
4248         if (MANTD(temp) == 0)
4249 #endif
4250             env->fpus |=  0x500 /*Infinity*/;
4251         else
4252             env->fpus |=  0x100 /*NaN*/;
4253     } else if (expdif == 0) {
4254         if (MANTD(temp) == 0)
4255             env->fpus |=  0x4000 /*Zero*/;
4256         else
4257             env->fpus |= 0x4400 /*Denormal*/;
4258     } else {
4259         env->fpus |= 0x400;
4260     }
4261 }
4262 
helper_fstenv(target_ulong ptr,int data32)4263 void helper_fstenv(target_ulong ptr, int data32)
4264 {
4265     int fpus, fptag, exp, i;
4266     uint64_t mant;
4267     CPU86_LDoubleU tmp;
4268 
4269     fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4270     fptag = 0;
4271     for (i=7; i>=0; i--) {
4272 	fptag <<= 2;
4273 	if (env->fptags[i]) {
4274             fptag |= 3;
4275 	} else {
4276             tmp.d = env->fpregs[i].d;
4277             exp = EXPD(tmp);
4278             mant = MANTD(tmp);
4279             if (exp == 0 && mant == 0) {
4280                 /* zero */
4281 	        fptag |= 1;
4282 	    } else if (exp == 0 || exp == MAXEXPD
4283 #ifdef USE_X86LDOUBLE
4284                        || (mant & (1LL << 63)) == 0
4285 #endif
4286                        ) {
4287                 /* NaNs, infinity, denormal */
4288                 fptag |= 2;
4289             }
4290         }
4291     }
4292     if (data32) {
4293         /* 32 bit */
4294         stl(ptr, env->fpuc);
4295         stl(ptr + 4, fpus);
4296         stl(ptr + 8, fptag);
4297         stl(ptr + 12, 0); /* fpip */
4298         stl(ptr + 16, 0); /* fpcs */
4299         stl(ptr + 20, 0); /* fpoo */
4300         stl(ptr + 24, 0); /* fpos */
4301     } else {
4302         /* 16 bit */
4303         stw(ptr, env->fpuc);
4304         stw(ptr + 2, fpus);
4305         stw(ptr + 4, fptag);
4306         stw(ptr + 6, 0);
4307         stw(ptr + 8, 0);
4308         stw(ptr + 10, 0);
4309         stw(ptr + 12, 0);
4310     }
4311 }
4312 
helper_fldenv(target_ulong ptr,int data32)4313 void helper_fldenv(target_ulong ptr, int data32)
4314 {
4315     int i, fpus, fptag;
4316 
4317     if (data32) {
4318 	env->fpuc = lduw(ptr);
4319         fpus = lduw(ptr + 4);
4320         fptag = lduw(ptr + 8);
4321     }
4322     else {
4323 	env->fpuc = lduw(ptr);
4324         fpus = lduw(ptr + 2);
4325         fptag = lduw(ptr + 4);
4326     }
4327     env->fpstt = (fpus >> 11) & 7;
4328     env->fpus = fpus & ~0x3800;
4329     for(i = 0;i < 8; i++) {
4330         env->fptags[i] = ((fptag & 3) == 3);
4331         fptag >>= 2;
4332     }
4333 }
4334 
helper_fsave(target_ulong ptr,int data32)4335 void helper_fsave(target_ulong ptr, int data32)
4336 {
4337     CPU86_LDouble tmp;
4338     int i;
4339 
4340     helper_fstenv(ptr, data32);
4341 
4342     ptr += (14 << data32);
4343     for(i = 0;i < 8; i++) {
4344         tmp = ST(i);
4345         helper_fstt(tmp, ptr);
4346         ptr += 10;
4347     }
4348 
4349     /* fninit */
4350     env->fpus = 0;
4351     env->fpstt = 0;
4352     env->fpuc = 0x37f;
4353     env->fptags[0] = 1;
4354     env->fptags[1] = 1;
4355     env->fptags[2] = 1;
4356     env->fptags[3] = 1;
4357     env->fptags[4] = 1;
4358     env->fptags[5] = 1;
4359     env->fptags[6] = 1;
4360     env->fptags[7] = 1;
4361 }
4362 
helper_frstor(target_ulong ptr,int data32)4363 void helper_frstor(target_ulong ptr, int data32)
4364 {
4365     CPU86_LDouble tmp;
4366     int i;
4367 
4368     helper_fldenv(ptr, data32);
4369     ptr += (14 << data32);
4370 
4371     for(i = 0;i < 8; i++) {
4372         tmp = helper_fldt(ptr);
4373         ST(i) = tmp;
4374         ptr += 10;
4375     }
4376 }
4377 
helper_fxsave(target_ulong ptr,int data64)4378 void helper_fxsave(target_ulong ptr, int data64)
4379 {
4380     int fpus, fptag, i, nb_xmm_regs;
4381     CPU86_LDouble tmp;
4382     target_ulong addr;
4383 
4384     fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4385     fptag = 0;
4386     for(i = 0; i < 8; i++) {
4387         fptag |= (env->fptags[i] << i);
4388     }
4389     stw(ptr, env->fpuc);
4390     stw(ptr + 2, fpus);
4391     stw(ptr + 4, fptag ^ 0xff);
4392 #ifdef TARGET_X86_64
4393     if (data64) {
4394         stq(ptr + 0x08, 0); /* rip */
4395         stq(ptr + 0x10, 0); /* rdp */
4396     } else
4397 #endif
4398     {
4399         stl(ptr + 0x08, 0); /* eip */
4400         stl(ptr + 0x0c, 0); /* sel  */
4401         stl(ptr + 0x10, 0); /* dp */
4402         stl(ptr + 0x14, 0); /* sel  */
4403     }
4404 
4405     addr = ptr + 0x20;
4406     for(i = 0;i < 8; i++) {
4407         tmp = ST(i);
4408         helper_fstt(tmp, addr);
4409         addr += 16;
4410     }
4411 
4412     if (env->cr[4] & CR4_OSFXSR_MASK) {
4413         /* XXX: finish it */
4414         stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4415         stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4416         if (env->hflags & HF_CS64_MASK)
4417             nb_xmm_regs = 16;
4418         else
4419             nb_xmm_regs = 8;
4420         addr = ptr + 0xa0;
4421         /* Fast FXSAVE leaves out the XMM registers */
4422         if (!(env->efer & MSR_EFER_FFXSR)
4423           || (env->hflags & HF_CPL_MASK)
4424           || !(env->hflags & HF_LMA_MASK)) {
4425             for(i = 0; i < nb_xmm_regs; i++) {
4426                 stq(addr, env->xmm_regs[i].XMM_Q(0));
4427                 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4428                 addr += 16;
4429             }
4430         }
4431     }
4432 }
4433 
helper_fxrstor(target_ulong ptr,int data64)4434 void helper_fxrstor(target_ulong ptr, int data64)
4435 {
4436     int i, fpus, fptag, nb_xmm_regs;
4437     CPU86_LDouble tmp;
4438     target_ulong addr;
4439 
4440     env->fpuc = lduw(ptr);
4441     fpus = lduw(ptr + 2);
4442     fptag = lduw(ptr + 4);
4443     env->fpstt = (fpus >> 11) & 7;
4444     env->fpus = fpus & ~0x3800;
4445     fptag ^= 0xff;
4446     for(i = 0;i < 8; i++) {
4447         env->fptags[i] = ((fptag >> i) & 1);
4448     }
4449 
4450     addr = ptr + 0x20;
4451     for(i = 0;i < 8; i++) {
4452         tmp = helper_fldt(addr);
4453         ST(i) = tmp;
4454         addr += 16;
4455     }
4456 
4457     if (env->cr[4] & CR4_OSFXSR_MASK) {
4458         /* XXX: finish it */
4459         env->mxcsr = ldl(ptr + 0x18);
4460         //ldl(ptr + 0x1c);
4461         if (env->hflags & HF_CS64_MASK)
4462             nb_xmm_regs = 16;
4463         else
4464             nb_xmm_regs = 8;
4465         addr = ptr + 0xa0;
4466         /* Fast FXRESTORE leaves out the XMM registers */
4467         if (!(env->efer & MSR_EFER_FFXSR)
4468           || (env->hflags & HF_CPL_MASK)
4469           || !(env->hflags & HF_LMA_MASK)) {
4470             for(i = 0; i < nb_xmm_regs; i++) {
4471                 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4472                 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4473                 addr += 16;
4474             }
4475         }
4476     }
4477 }
4478 
4479 #ifndef USE_X86LDOUBLE
4480 
cpu_get_fp80(uint64_t * pmant,uint16_t * pexp,CPU86_LDouble f)4481 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4482 {
4483     CPU86_LDoubleU temp;
4484     int e;
4485 
4486     temp.d = f;
4487     /* mantissa */
4488     *pmant = (MANTD(temp) << 11) | (1LL << 63);
4489     /* exponent + sign */
4490     e = EXPD(temp) - EXPBIAS + 16383;
4491     e |= SIGND(temp) >> 16;
4492     *pexp = e;
4493 }
4494 
cpu_set_fp80(uint64_t mant,uint16_t upper)4495 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4496 {
4497     CPU86_LDoubleU temp;
4498     int e;
4499     uint64_t ll;
4500 
4501     /* XXX: handle overflow ? */
4502     e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4503     e |= (upper >> 4) & 0x800; /* sign */
4504     ll = (mant >> 11) & ((1LL << 52) - 1);
4505 #ifdef __arm__
4506     temp.l.upper = (e << 20) | (ll >> 32);
4507     temp.l.lower = ll;
4508 #else
4509     temp.ll = ll | ((uint64_t)e << 52);
4510 #endif
4511     return temp.d;
4512 }
4513 
4514 #else
4515 
cpu_get_fp80(uint64_t * pmant,uint16_t * pexp,CPU86_LDouble f)4516 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4517 {
4518     CPU86_LDoubleU temp;
4519 
4520     temp.d = f;
4521     *pmant = temp.l.lower;
4522     *pexp = temp.l.upper;
4523 }
4524 
cpu_set_fp80(uint64_t mant,uint16_t upper)4525 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4526 {
4527     CPU86_LDoubleU temp;
4528 
4529     temp.l.upper = upper;
4530     temp.l.lower = mant;
4531     return temp.d;
4532 }
4533 #endif
4534 
4535 #ifdef TARGET_X86_64
4536 
4537 //#define DEBUG_MULDIV
4538 
add128(uint64_t * plow,uint64_t * phigh,uint64_t a,uint64_t b)4539 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4540 {
4541     *plow += a;
4542     /* carry test */
4543     if (*plow < a)
4544         (*phigh)++;
4545     *phigh += b;
4546 }
4547 
neg128(uint64_t * plow,uint64_t * phigh)4548 static void neg128(uint64_t *plow, uint64_t *phigh)
4549 {
4550     *plow = ~ *plow;
4551     *phigh = ~ *phigh;
4552     add128(plow, phigh, 1, 0);
4553 }
4554 
4555 /* return TRUE if overflow */
div64(uint64_t * plow,uint64_t * phigh,uint64_t b)4556 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4557 {
4558     uint64_t q, r, a1, a0;
4559     int i, qb, ab;
4560 
4561     a0 = *plow;
4562     a1 = *phigh;
4563     if (a1 == 0) {
4564         q = a0 / b;
4565         r = a0 % b;
4566         *plow = q;
4567         *phigh = r;
4568     } else {
4569         if (a1 >= b)
4570             return 1;
4571         /* XXX: use a better algorithm */
4572         for(i = 0; i < 64; i++) {
4573             ab = a1 >> 63;
4574             a1 = (a1 << 1) | (a0 >> 63);
4575             if (ab || a1 >= b) {
4576                 a1 -= b;
4577                 qb = 1;
4578             } else {
4579                 qb = 0;
4580             }
4581             a0 = (a0 << 1) | qb;
4582         }
4583 #if defined(DEBUG_MULDIV)
4584         printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4585                *phigh, *plow, b, a0, a1);
4586 #endif
4587         *plow = a0;
4588         *phigh = a1;
4589     }
4590     return 0;
4591 }
4592 
4593 /* return TRUE if overflow */
idiv64(uint64_t * plow,uint64_t * phigh,int64_t b)4594 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4595 {
4596     int sa, sb;
4597     sa = ((int64_t)*phigh < 0);
4598     if (sa)
4599         neg128(plow, phigh);
4600     sb = (b < 0);
4601     if (sb)
4602         b = -b;
4603     if (div64(plow, phigh, b) != 0)
4604         return 1;
4605     if (sa ^ sb) {
4606         if (*plow > (1ULL << 63))
4607             return 1;
4608         *plow = - *plow;
4609     } else {
4610         if (*plow >= (1ULL << 63))
4611             return 1;
4612     }
4613     if (sa)
4614         *phigh = - *phigh;
4615     return 0;
4616 }
4617 
helper_mulq_EAX_T0(target_ulong t0)4618 void helper_mulq_EAX_T0(target_ulong t0)
4619 {
4620     uint64_t r0, r1;
4621 
4622     mulu64(&r0, &r1, EAX, t0);
4623     EAX = r0;
4624     EDX = r1;
4625     CC_DST = r0;
4626     CC_SRC = r1;
4627 }
4628 
helper_imulq_EAX_T0(target_ulong t0)4629 void helper_imulq_EAX_T0(target_ulong t0)
4630 {
4631     uint64_t r0, r1;
4632 
4633     muls64(&r0, &r1, EAX, t0);
4634     EAX = r0;
4635     EDX = r1;
4636     CC_DST = r0;
4637     CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4638 }
4639 
helper_imulq_T0_T1(target_ulong t0,target_ulong t1)4640 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4641 {
4642     uint64_t r0, r1;
4643 
4644     muls64(&r0, &r1, t0, t1);
4645     CC_DST = r0;
4646     CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4647     return r0;
4648 }
4649 
helper_divq_EAX(target_ulong t0)4650 void helper_divq_EAX(target_ulong t0)
4651 {
4652     uint64_t r0, r1;
4653     if (t0 == 0) {
4654         raise_exception(EXCP00_DIVZ);
4655     }
4656     r0 = EAX;
4657     r1 = EDX;
4658     if (div64(&r0, &r1, t0))
4659         raise_exception(EXCP00_DIVZ);
4660     EAX = r0;
4661     EDX = r1;
4662 }
4663 
helper_idivq_EAX(target_ulong t0)4664 void helper_idivq_EAX(target_ulong t0)
4665 {
4666     uint64_t r0, r1;
4667     if (t0 == 0) {
4668         raise_exception(EXCP00_DIVZ);
4669     }
4670     r0 = EAX;
4671     r1 = EDX;
4672     if (idiv64(&r0, &r1, t0))
4673         raise_exception(EXCP00_DIVZ);
4674     EAX = r0;
4675     EDX = r1;
4676 }
4677 #endif
4678 
do_hlt(void)4679 static void do_hlt(void)
4680 {
4681     env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4682     env->halted = 1;
4683     env->exception_index = EXCP_HLT;
4684     cpu_loop_exit();
4685 }
4686 
helper_hlt(int next_eip_addend)4687 void helper_hlt(int next_eip_addend)
4688 {
4689     helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4690     EIP += next_eip_addend;
4691 
4692     do_hlt();
4693 }
4694 
helper_monitor(target_ulong ptr)4695 void helper_monitor(target_ulong ptr)
4696 {
4697     if ((uint32_t)ECX != 0)
4698         raise_exception(EXCP0D_GPF);
4699     /* XXX: store address ? */
4700     helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4701 }
4702 
helper_mwait(int next_eip_addend)4703 void helper_mwait(int next_eip_addend)
4704 {
4705     if ((uint32_t)ECX != 0)
4706         raise_exception(EXCP0D_GPF);
4707     helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4708     EIP += next_eip_addend;
4709 
4710     /* XXX: not complete but not completely erroneous */
4711     if (env->cpu_index != 0 || env->next_cpu != NULL) {
4712         /* more than one CPU: do not sleep because another CPU may
4713            wake this one */
4714     } else {
4715         do_hlt();
4716     }
4717 }
4718 
helper_debug(void)4719 void helper_debug(void)
4720 {
4721     env->exception_index = EXCP_DEBUG;
4722     cpu_loop_exit();
4723 }
4724 
helper_reset_rf(void)4725 void helper_reset_rf(void)
4726 {
4727     env->eflags &= ~RF_MASK;
4728 }
4729 
helper_raise_interrupt(int intno,int next_eip_addend)4730 void helper_raise_interrupt(int intno, int next_eip_addend)
4731 {
4732     raise_interrupt(intno, 1, 0, next_eip_addend);
4733 }
4734 
helper_raise_exception(int exception_index)4735 void helper_raise_exception(int exception_index)
4736 {
4737     raise_exception(exception_index);
4738 }
4739 
helper_cli(void)4740 void helper_cli(void)
4741 {
4742     env->eflags &= ~IF_MASK;
4743 }
4744 
helper_sti(void)4745 void helper_sti(void)
4746 {
4747     env->eflags |= IF_MASK;
4748 }
4749 
4750 #if 0
4751 /* vm86plus instructions */
4752 void helper_cli_vm(void)
4753 {
4754     env->eflags &= ~VIF_MASK;
4755 }
4756 
4757 void helper_sti_vm(void)
4758 {
4759     env->eflags |= VIF_MASK;
4760     if (env->eflags & VIP_MASK) {
4761         raise_exception(EXCP0D_GPF);
4762     }
4763 }
4764 #endif
4765 
helper_set_inhibit_irq(void)4766 void helper_set_inhibit_irq(void)
4767 {
4768     env->hflags |= HF_INHIBIT_IRQ_MASK;
4769 }
4770 
helper_reset_inhibit_irq(void)4771 void helper_reset_inhibit_irq(void)
4772 {
4773     env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4774 }
4775 
helper_boundw(target_ulong a0,int v)4776 void helper_boundw(target_ulong a0, int v)
4777 {
4778     int low, high;
4779     low = ldsw(a0);
4780     high = ldsw(a0 + 2);
4781     v = (int16_t)v;
4782     if (v < low || v > high) {
4783         raise_exception(EXCP05_BOUND);
4784     }
4785 }
4786 
helper_boundl(target_ulong a0,int v)4787 void helper_boundl(target_ulong a0, int v)
4788 {
4789     int low, high;
4790     low = ldl(a0);
4791     high = ldl(a0 + 4);
4792     if (v < low || v > high) {
4793         raise_exception(EXCP05_BOUND);
4794     }
4795 }
4796 
approx_rsqrt(float a)4797 static float approx_rsqrt(float a)
4798 {
4799     return 1.0 / sqrt(a);
4800 }
4801 
approx_rcp(float a)4802 static float approx_rcp(float a)
4803 {
4804     return 1.0 / a;
4805 }
4806 
4807 #if !defined(CONFIG_USER_ONLY)
4808 
4809 #define MMUSUFFIX _mmu
4810 
4811 #define SHIFT 0
4812 #include "softmmu_template.h"
4813 
4814 #define SHIFT 1
4815 #include "softmmu_template.h"
4816 
4817 #define SHIFT 2
4818 #include "softmmu_template.h"
4819 
4820 #define SHIFT 3
4821 #include "softmmu_template.h"
4822 
4823 #endif
4824 
4825 #if !defined(CONFIG_USER_ONLY)
4826 /* try to fill the TLB and return an exception if error. If retaddr is
4827    NULL, it means that the function was called in C code (i.e. not
4828    from generated code or from helper.c) */
4829 /* XXX: fix it to restore all registers */
tlb_fill(target_ulong addr,int is_write,int mmu_idx,void * retaddr)4830 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4831 {
4832     TranslationBlock *tb;
4833     int ret;
4834     unsigned long pc;
4835     CPUX86State *saved_env;
4836 
4837     /* XXX: hack to restore env in all cases, even if not called from
4838        generated code */
4839     saved_env = env;
4840     env = cpu_single_env;
4841 
4842     ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4843     if (ret) {
4844         if (retaddr) {
4845             /* now we have a real cpu fault */
4846             pc = (unsigned long)retaddr;
4847             tb = tb_find_pc(pc);
4848             if (tb) {
4849                 /* the PC is inside the translated code. It means that we have
4850                    a virtual CPU fault */
4851                 cpu_restore_state(tb, env, pc);
4852             }
4853         }
4854         raise_exception_err(env->exception_index, env->error_code);
4855     }
4856     env = saved_env;
4857 }
4858 #endif
4859 
4860 /* Secure Virtual Machine helpers */
4861 
4862 #if defined(CONFIG_USER_ONLY)
4863 
helper_vmrun(int aflag,int next_eip_addend)4864 void helper_vmrun(int aflag, int next_eip_addend)
4865 {
4866 }
helper_vmmcall(void)4867 void helper_vmmcall(void)
4868 {
4869 }
helper_vmload(int aflag)4870 void helper_vmload(int aflag)
4871 {
4872 }
helper_vmsave(int aflag)4873 void helper_vmsave(int aflag)
4874 {
4875 }
helper_stgi(void)4876 void helper_stgi(void)
4877 {
4878 }
helper_clgi(void)4879 void helper_clgi(void)
4880 {
4881 }
helper_skinit(void)4882 void helper_skinit(void)
4883 {
4884 }
helper_invlpga(int aflag)4885 void helper_invlpga(int aflag)
4886 {
4887 }
helper_vmexit(uint32_t exit_code,uint64_t exit_info_1)4888 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4889 {
4890 }
helper_svm_check_intercept_param(uint32_t type,uint64_t param)4891 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4892 {
4893 }
4894 
helper_svm_check_io(uint32_t port,uint32_t param,uint32_t next_eip_addend)4895 void helper_svm_check_io(uint32_t port, uint32_t param,
4896                          uint32_t next_eip_addend)
4897 {
4898 }
4899 #else
4900 
svm_save_seg(target_phys_addr_t addr,const SegmentCache * sc)4901 static inline void svm_save_seg(target_phys_addr_t addr,
4902                                 const SegmentCache *sc)
4903 {
4904     stw_phys(addr + offsetof(struct vmcb_seg, selector),
4905              sc->selector);
4906     stq_phys(addr + offsetof(struct vmcb_seg, base),
4907              sc->base);
4908     stl_phys(addr + offsetof(struct vmcb_seg, limit),
4909              sc->limit);
4910     stw_phys(addr + offsetof(struct vmcb_seg, attrib),
4911              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
4912 }
4913 
svm_load_seg(target_phys_addr_t addr,SegmentCache * sc)4914 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
4915 {
4916     unsigned int flags;
4917 
4918     sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
4919     sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
4920     sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
4921     flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
4922     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
4923 }
4924 
svm_load_seg_cache(target_phys_addr_t addr,CPUState * env,int seg_reg)4925 static inline void svm_load_seg_cache(target_phys_addr_t addr,
4926                                       CPUState *env, int seg_reg)
4927 {
4928     SegmentCache sc1, *sc = &sc1;
4929     svm_load_seg(addr, sc);
4930     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
4931                            sc->base, sc->limit, sc->flags);
4932 }
4933 
helper_vmrun(int aflag,int next_eip_addend)4934 void helper_vmrun(int aflag, int next_eip_addend)
4935 {
4936     target_ulong addr;
4937     uint32_t event_inj;
4938     uint32_t int_ctl;
4939 
4940     helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
4941 
4942     if (aflag == 2)
4943         addr = EAX;
4944     else
4945         addr = (uint32_t)EAX;
4946 
4947     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
4948 
4949     env->vm_vmcb = addr;
4950 
4951     /* save the current CPU state in the hsave page */
4952     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4953     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4954 
4955     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4956     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4957 
4958     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4959     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4960     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4961     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4962     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4963     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4964 
4965     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4966     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4967 
4968     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
4969                   &env->segs[R_ES]);
4970     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
4971                  &env->segs[R_CS]);
4972     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
4973                  &env->segs[R_SS]);
4974     svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
4975                  &env->segs[R_DS]);
4976 
4977     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
4978              EIP + next_eip_addend);
4979     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4980     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4981 
4982     /* load the interception bitmaps so we do not need to access the
4983        vmcb in svm mode */
4984     env->intercept            = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
4985     env->intercept_cr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4986     env->intercept_cr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4987     env->intercept_dr_read    = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4988     env->intercept_dr_write   = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4989     env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4990 
4991     /* enable intercepts */
4992     env->hflags |= HF_SVMI_MASK;
4993 
4994     env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
4995 
4996     env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4997     env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4998 
4999     env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5000     env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5001 
5002     /* clear exit_info_2 so we behave like the real hardware */
5003     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5004 
5005     cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5006     cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5007     cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5008     env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5009     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5010     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5011     if (int_ctl & V_INTR_MASKING_MASK) {
5012         env->v_tpr = int_ctl & V_TPR_MASK;
5013         env->hflags2 |= HF2_VINTR_MASK;
5014         if (env->eflags & IF_MASK)
5015             env->hflags2 |= HF2_HIF_MASK;
5016     }
5017 
5018     cpu_load_efer(env,
5019                   ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5020     env->eflags = 0;
5021     load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5022                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5023     CC_OP = CC_OP_EFLAGS;
5024 
5025     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5026                        env, R_ES);
5027     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5028                        env, R_CS);
5029     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5030                        env, R_SS);
5031     svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5032                        env, R_DS);
5033 
5034     EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5035     env->eip = EIP;
5036     ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5037     EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5038     env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5039     env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5040     cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5041 
5042     /* FIXME: guest state consistency checks */
5043 
5044     switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5045         case TLB_CONTROL_DO_NOTHING:
5046             break;
5047         case TLB_CONTROL_FLUSH_ALL_ASID:
5048             /* FIXME: this is not 100% correct but should work for now */
5049             tlb_flush(env, 1);
5050         break;
5051     }
5052 
5053     env->hflags2 |= HF2_GIF_MASK;
5054 
5055     if (int_ctl & V_IRQ_MASK) {
5056         env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5057     }
5058 
5059     /* maybe we need to inject an event */
5060     event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5061     if (event_inj & SVM_EVTINJ_VALID) {
5062         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5063         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5064         uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5065 
5066         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5067         /* FIXME: need to implement valid_err */
5068         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5069         case SVM_EVTINJ_TYPE_INTR:
5070                 env->exception_index = vector;
5071                 env->error_code = event_inj_err;
5072                 env->exception_is_int = 0;
5073                 env->exception_next_eip = -1;
5074                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5075                 /* XXX: is it always correct ? */
5076                 do_interrupt(vector, 0, 0, 0, 1);
5077                 break;
5078         case SVM_EVTINJ_TYPE_NMI:
5079                 env->exception_index = EXCP02_NMI;
5080                 env->error_code = event_inj_err;
5081                 env->exception_is_int = 0;
5082                 env->exception_next_eip = EIP;
5083                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5084                 cpu_loop_exit();
5085                 break;
5086         case SVM_EVTINJ_TYPE_EXEPT:
5087                 env->exception_index = vector;
5088                 env->error_code = event_inj_err;
5089                 env->exception_is_int = 0;
5090                 env->exception_next_eip = -1;
5091                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5092                 cpu_loop_exit();
5093                 break;
5094         case SVM_EVTINJ_TYPE_SOFT:
5095                 env->exception_index = vector;
5096                 env->error_code = event_inj_err;
5097                 env->exception_is_int = 1;
5098                 env->exception_next_eip = EIP;
5099                 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5100                 cpu_loop_exit();
5101                 break;
5102         }
5103         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5104     }
5105 }
5106 
helper_vmmcall(void)5107 void helper_vmmcall(void)
5108 {
5109     helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5110     raise_exception(EXCP06_ILLOP);
5111 }
5112 
helper_vmload(int aflag)5113 void helper_vmload(int aflag)
5114 {
5115     target_ulong addr;
5116     helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5117 
5118     if (aflag == 2)
5119         addr = EAX;
5120     else
5121         addr = (uint32_t)EAX;
5122 
5123     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5124                 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5125                 env->segs[R_FS].base);
5126 
5127     svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5128                        env, R_FS);
5129     svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5130                        env, R_GS);
5131     svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5132                  &env->tr);
5133     svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5134                  &env->ldt);
5135 
5136 #ifdef TARGET_X86_64
5137     env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5138     env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5139     env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5140     env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5141 #endif
5142     env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5143     env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5144     env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5145     env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5146 }
5147 
helper_vmsave(int aflag)5148 void helper_vmsave(int aflag)
5149 {
5150     target_ulong addr;
5151     helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5152 
5153     if (aflag == 2)
5154         addr = EAX;
5155     else
5156         addr = (uint32_t)EAX;
5157 
5158     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5159                 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5160                 env->segs[R_FS].base);
5161 
5162     svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5163                  &env->segs[R_FS]);
5164     svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5165                  &env->segs[R_GS]);
5166     svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5167                  &env->tr);
5168     svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5169                  &env->ldt);
5170 
5171 #ifdef TARGET_X86_64
5172     stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5173     stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5174     stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5175     stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5176 #endif
5177     stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5178     stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5179     stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5180     stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5181 }
5182 
helper_stgi(void)5183 void helper_stgi(void)
5184 {
5185     helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5186     env->hflags2 |= HF2_GIF_MASK;
5187 }
5188 
helper_clgi(void)5189 void helper_clgi(void)
5190 {
5191     helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5192     env->hflags2 &= ~HF2_GIF_MASK;
5193 }
5194 
helper_skinit(void)5195 void helper_skinit(void)
5196 {
5197     helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5198     /* XXX: not implemented */
5199     raise_exception(EXCP06_ILLOP);
5200 }
5201 
helper_invlpga(int aflag)5202 void helper_invlpga(int aflag)
5203 {
5204     target_ulong addr;
5205     helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5206 
5207     if (aflag == 2)
5208         addr = EAX;
5209     else
5210         addr = (uint32_t)EAX;
5211 
5212     /* XXX: could use the ASID to see if it is needed to do the
5213        flush */
5214     tlb_flush_page(env, addr);
5215 }
5216 
helper_svm_check_intercept_param(uint32_t type,uint64_t param)5217 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5218 {
5219     if (likely(!(env->hflags & HF_SVMI_MASK)))
5220         return;
5221     switch(type) {
5222     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5223         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5224             helper_vmexit(type, param);
5225         }
5226         break;
5227     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5228         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5229             helper_vmexit(type, param);
5230         }
5231         break;
5232     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5233         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5234             helper_vmexit(type, param);
5235         }
5236         break;
5237     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5238         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5239             helper_vmexit(type, param);
5240         }
5241         break;
5242     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5243         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5244             helper_vmexit(type, param);
5245         }
5246         break;
5247     case SVM_EXIT_MSR:
5248         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5249             /* FIXME: this should be read in at vmrun (faster this way?) */
5250             uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5251             uint32_t t0, t1;
5252             switch((uint32_t)ECX) {
5253             case 0 ... 0x1fff:
5254                 t0 = (ECX * 2) % 8;
5255                 t1 = ECX / 8;
5256                 break;
5257             case 0xc0000000 ... 0xc0001fff:
5258                 t0 = (8192 + ECX - 0xc0000000) * 2;
5259                 t1 = (t0 / 8);
5260                 t0 %= 8;
5261                 break;
5262             case 0xc0010000 ... 0xc0011fff:
5263                 t0 = (16384 + ECX - 0xc0010000) * 2;
5264                 t1 = (t0 / 8);
5265                 t0 %= 8;
5266                 break;
5267             default:
5268                 helper_vmexit(type, param);
5269                 t0 = 0;
5270                 t1 = 0;
5271                 break;
5272             }
5273             if (ldub_phys(addr + t1) & ((1 << param) << t0))
5274                 helper_vmexit(type, param);
5275         }
5276         break;
5277     default:
5278         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5279             helper_vmexit(type, param);
5280         }
5281         break;
5282     }
5283 }
5284 
helper_svm_check_io(uint32_t port,uint32_t param,uint32_t next_eip_addend)5285 void helper_svm_check_io(uint32_t port, uint32_t param,
5286                          uint32_t next_eip_addend)
5287 {
5288     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5289         /* FIXME: this should be read in at vmrun (faster this way?) */
5290         uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5291         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5292         if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5293             /* next EIP */
5294             stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5295                      env->eip + next_eip_addend);
5296             helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5297         }
5298     }
5299 }
5300 
5301 /* Note: currently only 32 bits of exit_code are used */
helper_vmexit(uint32_t exit_code,uint64_t exit_info_1)5302 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5303 {
5304     uint32_t int_ctl;
5305 
5306     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5307                 exit_code, exit_info_1,
5308                 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5309                 EIP);
5310 
5311     if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5312         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5313         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5314     } else {
5315         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5316     }
5317 
5318     /* Save the VM state in the vmcb */
5319     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5320                  &env->segs[R_ES]);
5321     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5322                  &env->segs[R_CS]);
5323     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5324                  &env->segs[R_SS]);
5325     svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5326                  &env->segs[R_DS]);
5327 
5328     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5329     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5330 
5331     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5332     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5333 
5334     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5335     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5336     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5337     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5338     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5339 
5340     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5341     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5342     int_ctl |= env->v_tpr & V_TPR_MASK;
5343     if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5344         int_ctl |= V_IRQ_MASK;
5345     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5346 
5347     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5348     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5349     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5350     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5351     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5352     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5353     stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5354 
5355     /* Reload the host state from vm_hsave */
5356     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5357     env->hflags &= ~HF_SVMI_MASK;
5358     env->intercept = 0;
5359     env->intercept_exceptions = 0;
5360     env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5361     env->tsc_offset = 0;
5362 
5363     env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5364     env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5365 
5366     env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5367     env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5368 
5369     cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5370     cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5371     cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5372     /* we need to set the efer after the crs so the hidden flags get
5373        set properly */
5374     cpu_load_efer(env,
5375                   ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5376     env->eflags = 0;
5377     load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5378                 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5379     CC_OP = CC_OP_EFLAGS;
5380 
5381     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5382                        env, R_ES);
5383     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5384                        env, R_CS);
5385     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5386                        env, R_SS);
5387     svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5388                        env, R_DS);
5389 
5390     EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5391     ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5392     EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5393 
5394     env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5395     env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5396 
5397     /* other setups */
5398     cpu_x86_set_cpl(env, 0);
5399     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5400     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5401 
5402     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5403              ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5404     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5405              ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5406 
5407     env->hflags2 &= ~HF2_GIF_MASK;
5408     /* FIXME: Resets the current ASID register to zero (host ASID). */
5409 
5410     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5411 
5412     /* Clears the TSC_OFFSET inside the processor. */
5413 
5414     /* If the host is in PAE mode, the processor reloads the host's PDPEs
5415        from the page table indicated the host's CR3. If the PDPEs contain
5416        illegal state, the processor causes a shutdown. */
5417 
5418     /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5419     env->cr[0] |= CR0_PE_MASK;
5420     env->eflags &= ~VM_MASK;
5421 
5422     /* Disables all breakpoints in the host DR7 register. */
5423 
5424     /* Checks the reloaded host state for consistency. */
5425 
5426     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5427        host's code segment or non-canonical (in the case of long mode), a
5428        #GP fault is delivered inside the host.) */
5429 
5430     /* remove any pending exception */
5431     env->exception_index = -1;
5432     env->error_code = 0;
5433     env->old_exception = -1;
5434 
5435     cpu_loop_exit();
5436 }
5437 
5438 #endif
5439 
5440 /* MMX/SSE */
5441 /* XXX: optimize by storing fptt and fptags in the static cpu state */
helper_enter_mmx(void)5442 void helper_enter_mmx(void)
5443 {
5444     env->fpstt = 0;
5445     *(uint32_t *)(env->fptags) = 0;
5446     *(uint32_t *)(env->fptags + 4) = 0;
5447 }
5448 
helper_emms(void)5449 void helper_emms(void)
5450 {
5451     /* set to empty state */
5452     *(uint32_t *)(env->fptags) = 0x01010101;
5453     *(uint32_t *)(env->fptags + 4) = 0x01010101;
5454 }
5455 
5456 /* XXX: suppress */
helper_movq(void * d,void * s)5457 void helper_movq(void *d, void *s)
5458 {
5459     *(uint64_t *)d = *(uint64_t *)s;
5460 }
5461 
5462 #define SHIFT 0
5463 #include "ops_sse.h"
5464 
5465 #define SHIFT 1
5466 #include "ops_sse.h"
5467 
5468 #define SHIFT 0
5469 #include "helper_template.h"
5470 #undef SHIFT
5471 
5472 #define SHIFT 1
5473 #include "helper_template.h"
5474 #undef SHIFT
5475 
5476 #define SHIFT 2
5477 #include "helper_template.h"
5478 #undef SHIFT
5479 
5480 #ifdef TARGET_X86_64
5481 
5482 #define SHIFT 3
5483 #include "helper_template.h"
5484 #undef SHIFT
5485 
5486 #endif
5487 
5488 /* bit operations */
helper_bsf(target_ulong t0)5489 target_ulong helper_bsf(target_ulong t0)
5490 {
5491     int count;
5492     target_ulong res;
5493 
5494     res = t0;
5495     count = 0;
5496     while ((res & 1) == 0) {
5497         count++;
5498         res >>= 1;
5499     }
5500     return count;
5501 }
5502 
helper_bsr(target_ulong t0)5503 target_ulong helper_bsr(target_ulong t0)
5504 {
5505     int count;
5506     target_ulong res, mask;
5507 
5508     res = t0;
5509     count = TARGET_LONG_BITS - 1;
5510     mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5511     while ((res & mask) == 0) {
5512         count--;
5513         res <<= 1;
5514     }
5515     return count;
5516 }
5517 
5518 
compute_all_eflags(void)5519 static int compute_all_eflags(void)
5520 {
5521     return CC_SRC;
5522 }
5523 
compute_c_eflags(void)5524 static int compute_c_eflags(void)
5525 {
5526     return CC_SRC & CC_C;
5527 }
5528 
helper_cc_compute_all(int op)5529 uint32_t helper_cc_compute_all(int op)
5530 {
5531     switch (op) {
5532     default: /* should never happen */ return 0;
5533 
5534     case CC_OP_EFLAGS: return compute_all_eflags();
5535 
5536     case CC_OP_MULB: return compute_all_mulb();
5537     case CC_OP_MULW: return compute_all_mulw();
5538     case CC_OP_MULL: return compute_all_mull();
5539 
5540     case CC_OP_ADDB: return compute_all_addb();
5541     case CC_OP_ADDW: return compute_all_addw();
5542     case CC_OP_ADDL: return compute_all_addl();
5543 
5544     case CC_OP_ADCB: return compute_all_adcb();
5545     case CC_OP_ADCW: return compute_all_adcw();
5546     case CC_OP_ADCL: return compute_all_adcl();
5547 
5548     case CC_OP_SUBB: return compute_all_subb();
5549     case CC_OP_SUBW: return compute_all_subw();
5550     case CC_OP_SUBL: return compute_all_subl();
5551 
5552     case CC_OP_SBBB: return compute_all_sbbb();
5553     case CC_OP_SBBW: return compute_all_sbbw();
5554     case CC_OP_SBBL: return compute_all_sbbl();
5555 
5556     case CC_OP_LOGICB: return compute_all_logicb();
5557     case CC_OP_LOGICW: return compute_all_logicw();
5558     case CC_OP_LOGICL: return compute_all_logicl();
5559 
5560     case CC_OP_INCB: return compute_all_incb();
5561     case CC_OP_INCW: return compute_all_incw();
5562     case CC_OP_INCL: return compute_all_incl();
5563 
5564     case CC_OP_DECB: return compute_all_decb();
5565     case CC_OP_DECW: return compute_all_decw();
5566     case CC_OP_DECL: return compute_all_decl();
5567 
5568     case CC_OP_SHLB: return compute_all_shlb();
5569     case CC_OP_SHLW: return compute_all_shlw();
5570     case CC_OP_SHLL: return compute_all_shll();
5571 
5572     case CC_OP_SARB: return compute_all_sarb();
5573     case CC_OP_SARW: return compute_all_sarw();
5574     case CC_OP_SARL: return compute_all_sarl();
5575 
5576 #ifdef TARGET_X86_64
5577     case CC_OP_MULQ: return compute_all_mulq();
5578 
5579     case CC_OP_ADDQ: return compute_all_addq();
5580 
5581     case CC_OP_ADCQ: return compute_all_adcq();
5582 
5583     case CC_OP_SUBQ: return compute_all_subq();
5584 
5585     case CC_OP_SBBQ: return compute_all_sbbq();
5586 
5587     case CC_OP_LOGICQ: return compute_all_logicq();
5588 
5589     case CC_OP_INCQ: return compute_all_incq();
5590 
5591     case CC_OP_DECQ: return compute_all_decq();
5592 
5593     case CC_OP_SHLQ: return compute_all_shlq();
5594 
5595     case CC_OP_SARQ: return compute_all_sarq();
5596 #endif
5597     }
5598 }
5599 
helper_cc_compute_c(int op)5600 uint32_t helper_cc_compute_c(int op)
5601 {
5602     switch (op) {
5603     default: /* should never happen */ return 0;
5604 
5605     case CC_OP_EFLAGS: return compute_c_eflags();
5606 
5607     case CC_OP_MULB: return compute_c_mull();
5608     case CC_OP_MULW: return compute_c_mull();
5609     case CC_OP_MULL: return compute_c_mull();
5610 
5611     case CC_OP_ADDB: return compute_c_addb();
5612     case CC_OP_ADDW: return compute_c_addw();
5613     case CC_OP_ADDL: return compute_c_addl();
5614 
5615     case CC_OP_ADCB: return compute_c_adcb();
5616     case CC_OP_ADCW: return compute_c_adcw();
5617     case CC_OP_ADCL: return compute_c_adcl();
5618 
5619     case CC_OP_SUBB: return compute_c_subb();
5620     case CC_OP_SUBW: return compute_c_subw();
5621     case CC_OP_SUBL: return compute_c_subl();
5622 
5623     case CC_OP_SBBB: return compute_c_sbbb();
5624     case CC_OP_SBBW: return compute_c_sbbw();
5625     case CC_OP_SBBL: return compute_c_sbbl();
5626 
5627     case CC_OP_LOGICB: return compute_c_logicb();
5628     case CC_OP_LOGICW: return compute_c_logicw();
5629     case CC_OP_LOGICL: return compute_c_logicl();
5630 
5631     case CC_OP_INCB: return compute_c_incl();
5632     case CC_OP_INCW: return compute_c_incl();
5633     case CC_OP_INCL: return compute_c_incl();
5634 
5635     case CC_OP_DECB: return compute_c_incl();
5636     case CC_OP_DECW: return compute_c_incl();
5637     case CC_OP_DECL: return compute_c_incl();
5638 
5639     case CC_OP_SHLB: return compute_c_shlb();
5640     case CC_OP_SHLW: return compute_c_shlw();
5641     case CC_OP_SHLL: return compute_c_shll();
5642 
5643     case CC_OP_SARB: return compute_c_sarl();
5644     case CC_OP_SARW: return compute_c_sarl();
5645     case CC_OP_SARL: return compute_c_sarl();
5646 
5647 #ifdef TARGET_X86_64
5648     case CC_OP_MULQ: return compute_c_mull();
5649 
5650     case CC_OP_ADDQ: return compute_c_addq();
5651 
5652     case CC_OP_ADCQ: return compute_c_adcq();
5653 
5654     case CC_OP_SUBQ: return compute_c_subq();
5655 
5656     case CC_OP_SBBQ: return compute_c_sbbq();
5657 
5658     case CC_OP_LOGICQ: return compute_c_logicq();
5659 
5660     case CC_OP_INCQ: return compute_c_incl();
5661 
5662     case CC_OP_DECQ: return compute_c_incl();
5663 
5664     case CC_OP_SHLQ: return compute_c_shlq();
5665 
5666     case CC_OP_SARQ: return compute_c_sarl();
5667 #endif
5668     }
5669 }
5670