1 #include "hw/hw.h"
2 #include "hw/boards.h"
3 #include "hw/pc.h"
4 #include "hw/isa.h"
5
6 #include "exec-all.h"
7 #include "kvm.h"
8
cpu_put_seg(QEMUFile * f,SegmentCache * dt)9 static void cpu_put_seg(QEMUFile *f, SegmentCache *dt)
10 {
11 qemu_put_be32(f, dt->selector);
12 qemu_put_betl(f, dt->base);
13 qemu_put_be32(f, dt->limit);
14 qemu_put_be32(f, dt->flags);
15 }
16
cpu_get_seg(QEMUFile * f,SegmentCache * dt)17 static void cpu_get_seg(QEMUFile *f, SegmentCache *dt)
18 {
19 dt->selector = qemu_get_be32(f);
20 dt->base = qemu_get_betl(f);
21 dt->limit = qemu_get_be32(f);
22 dt->flags = qemu_get_be32(f);
23 }
24
cpu_save(QEMUFile * f,void * opaque)25 void cpu_save(QEMUFile *f, void *opaque)
26 {
27 CPUState *env = opaque;
28 uint16_t fptag, fpus, fpuc, fpregs_format;
29 uint32_t hflags;
30 int32_t a20_mask;
31 int i;
32
33 cpu_synchronize_state(env, 0);
34
35 for(i = 0; i < CPU_NB_REGS; i++)
36 qemu_put_betls(f, &env->regs[i]);
37 qemu_put_betls(f, &env->eip);
38 qemu_put_betls(f, &env->eflags);
39 hflags = env->hflags; /* XXX: suppress most of the redundant hflags */
40 qemu_put_be32s(f, &hflags);
41
42 /* FPU */
43 fpuc = env->fpuc;
44 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
45 fptag = 0;
46 for(i = 0; i < 8; i++) {
47 fptag |= ((!env->fptags[i]) << i);
48 }
49
50 qemu_put_be16s(f, &fpuc);
51 qemu_put_be16s(f, &fpus);
52 qemu_put_be16s(f, &fptag);
53
54 #ifdef USE_X86LDOUBLE
55 fpregs_format = 0;
56 #else
57 fpregs_format = 1;
58 #endif
59 qemu_put_be16s(f, &fpregs_format);
60
61 for(i = 0; i < 8; i++) {
62 #ifdef USE_X86LDOUBLE
63 {
64 uint64_t mant;
65 uint16_t exp;
66 /* we save the real CPU data (in case of MMX usage only 'mant'
67 contains the MMX register */
68 cpu_get_fp80(&mant, &exp, env->fpregs[i].d);
69 qemu_put_be64(f, mant);
70 qemu_put_be16(f, exp);
71 }
72 #else
73 /* if we use doubles for float emulation, we save the doubles to
74 avoid losing information in case of MMX usage. It can give
75 problems if the image is restored on a CPU where long
76 doubles are used instead. */
77 qemu_put_be64(f, env->fpregs[i].mmx.MMX_Q(0));
78 #endif
79 }
80
81 for(i = 0; i < 6; i++)
82 cpu_put_seg(f, &env->segs[i]);
83 cpu_put_seg(f, &env->ldt);
84 cpu_put_seg(f, &env->tr);
85 cpu_put_seg(f, &env->gdt);
86 cpu_put_seg(f, &env->idt);
87
88 qemu_put_be32s(f, &env->sysenter_cs);
89 qemu_put_betls(f, &env->sysenter_esp);
90 qemu_put_betls(f, &env->sysenter_eip);
91
92 qemu_put_betls(f, &env->cr[0]);
93 qemu_put_betls(f, &env->cr[2]);
94 qemu_put_betls(f, &env->cr[3]);
95 qemu_put_betls(f, &env->cr[4]);
96
97 for(i = 0; i < 8; i++)
98 qemu_put_betls(f, &env->dr[i]);
99
100 /* MMU */
101 a20_mask = (int32_t) env->a20_mask;
102 qemu_put_sbe32s(f, &a20_mask);
103
104 /* XMM */
105 qemu_put_be32s(f, &env->mxcsr);
106 for(i = 0; i < CPU_NB_REGS; i++) {
107 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(0));
108 qemu_put_be64s(f, &env->xmm_regs[i].XMM_Q(1));
109 }
110
111 #ifdef TARGET_X86_64
112 qemu_put_be64s(f, &env->efer);
113 qemu_put_be64s(f, &env->star);
114 qemu_put_be64s(f, &env->lstar);
115 qemu_put_be64s(f, &env->cstar);
116 qemu_put_be64s(f, &env->fmask);
117 qemu_put_be64s(f, &env->kernelgsbase);
118 #endif
119 qemu_put_be32s(f, &env->smbase);
120
121 qemu_put_be64s(f, &env->pat);
122 qemu_put_be32s(f, &env->hflags2);
123
124 qemu_put_be64s(f, &env->vm_hsave);
125 qemu_put_be64s(f, &env->vm_vmcb);
126 qemu_put_be64s(f, &env->tsc_offset);
127 qemu_put_be64s(f, &env->intercept);
128 qemu_put_be16s(f, &env->intercept_cr_read);
129 qemu_put_be16s(f, &env->intercept_cr_write);
130 qemu_put_be16s(f, &env->intercept_dr_read);
131 qemu_put_be16s(f, &env->intercept_dr_write);
132 qemu_put_be32s(f, &env->intercept_exceptions);
133 qemu_put_8s(f, &env->v_tpr);
134
135 /* MTRRs */
136 for(i = 0; i < 11; i++)
137 qemu_put_be64s(f, &env->mtrr_fixed[i]);
138 qemu_put_be64s(f, &env->mtrr_deftype);
139 for(i = 0; i < 8; i++) {
140 qemu_put_be64s(f, &env->mtrr_var[i].base);
141 qemu_put_be64s(f, &env->mtrr_var[i].mask);
142 }
143
144 for (i = 0; i < sizeof(env->interrupt_bitmap)/8; i++) {
145 qemu_put_be64s(f, &env->interrupt_bitmap[i]);
146 }
147 qemu_put_be64s(f, &env->tsc);
148 qemu_put_be32s(f, &env->mp_state);
149
150 /* MCE */
151 qemu_put_be64s(f, &env->mcg_cap);
152 if (env->mcg_cap) {
153 qemu_put_be64s(f, &env->mcg_status);
154 qemu_put_be64s(f, &env->mcg_ctl);
155 for (i = 0; i < (env->mcg_cap & 0xff); i++) {
156 qemu_put_be64s(f, &env->mce_banks[4*i]);
157 qemu_put_be64s(f, &env->mce_banks[4*i + 1]);
158 qemu_put_be64s(f, &env->mce_banks[4*i + 2]);
159 qemu_put_be64s(f, &env->mce_banks[4*i + 3]);
160 }
161 }
162 }
163
164 #ifdef USE_X86LDOUBLE
165 /* XXX: add that in a FPU generic layer */
166 union x86_longdouble {
167 uint64_t mant;
168 uint16_t exp;
169 };
170
171 #define MANTD1(fp) (fp & ((1LL << 52) - 1))
172 #define EXPBIAS1 1023
173 #define EXPD1(fp) ((fp >> 52) & 0x7FF)
174 #define SIGND1(fp) ((fp >> 32) & 0x80000000)
175
fp64_to_fp80(union x86_longdouble * p,uint64_t temp)176 static void fp64_to_fp80(union x86_longdouble *p, uint64_t temp)
177 {
178 int e;
179 /* mantissa */
180 p->mant = (MANTD1(temp) << 11) | (1LL << 63);
181 /* exponent + sign */
182 e = EXPD1(temp) - EXPBIAS1 + 16383;
183 e |= SIGND1(temp) >> 16;
184 p->exp = e;
185 }
186 #endif
187
cpu_load(QEMUFile * f,void * opaque,int version_id)188 int cpu_load(QEMUFile *f, void *opaque, int version_id)
189 {
190 CPUState *env = opaque;
191 int i, guess_mmx;
192 uint32_t hflags;
193 uint16_t fpus, fpuc, fptag, fpregs_format;
194 int32_t a20_mask;
195
196 if (version_id < 3 || version_id > CPU_SAVE_VERSION)
197 return -EINVAL;
198 for(i = 0; i < CPU_NB_REGS; i++)
199 qemu_get_betls(f, &env->regs[i]);
200 qemu_get_betls(f, &env->eip);
201 qemu_get_betls(f, &env->eflags);
202 qemu_get_be32s(f, &hflags);
203
204 qemu_get_be16s(f, &fpuc);
205 qemu_get_be16s(f, &fpus);
206 qemu_get_be16s(f, &fptag);
207 qemu_get_be16s(f, &fpregs_format);
208
209 /* NOTE: we cannot always restore the FPU state if the image come
210 from a host with a different 'USE_X86LDOUBLE' define. We guess
211 if we are in an MMX state to restore correctly in that case. */
212 guess_mmx = ((fptag == 0xff) && (fpus & 0x3800) == 0);
213 for(i = 0; i < 8; i++) {
214 uint64_t mant;
215 uint16_t exp;
216
217 switch(fpregs_format) {
218 case 0:
219 mant = qemu_get_be64(f);
220 exp = qemu_get_be16(f);
221 #ifdef USE_X86LDOUBLE
222 env->fpregs[i].d = cpu_set_fp80(mant, exp);
223 #else
224 /* difficult case */
225 if (guess_mmx)
226 env->fpregs[i].mmx.MMX_Q(0) = mant;
227 else
228 env->fpregs[i].d = cpu_set_fp80(mant, exp);
229 #endif
230 break;
231 case 1:
232 mant = qemu_get_be64(f);
233 #ifdef USE_X86LDOUBLE
234 {
235 union x86_longdouble *p;
236 /* difficult case */
237 p = (void *)&env->fpregs[i];
238 if (guess_mmx) {
239 p->mant = mant;
240 p->exp = 0xffff;
241 } else {
242 fp64_to_fp80(p, mant);
243 }
244 }
245 #else
246 env->fpregs[i].mmx.MMX_Q(0) = mant;
247 #endif
248 break;
249 default:
250 return -EINVAL;
251 }
252 }
253
254 env->fpuc = fpuc;
255 /* XXX: restore FPU round state */
256 env->fpstt = (fpus >> 11) & 7;
257 env->fpus = fpus & ~0x3800;
258 fptag ^= 0xff;
259 for(i = 0; i < 8; i++) {
260 env->fptags[i] = (fptag >> i) & 1;
261 }
262
263 for(i = 0; i < 6; i++)
264 cpu_get_seg(f, &env->segs[i]);
265 cpu_get_seg(f, &env->ldt);
266 cpu_get_seg(f, &env->tr);
267 cpu_get_seg(f, &env->gdt);
268 cpu_get_seg(f, &env->idt);
269
270 qemu_get_be32s(f, &env->sysenter_cs);
271 if (version_id >= 7) {
272 qemu_get_betls(f, &env->sysenter_esp);
273 qemu_get_betls(f, &env->sysenter_eip);
274 } else {
275 env->sysenter_esp = qemu_get_be32(f);
276 env->sysenter_eip = qemu_get_be32(f);
277 }
278
279 qemu_get_betls(f, &env->cr[0]);
280 qemu_get_betls(f, &env->cr[2]);
281 qemu_get_betls(f, &env->cr[3]);
282 qemu_get_betls(f, &env->cr[4]);
283
284 for(i = 0; i < 8; i++)
285 qemu_get_betls(f, &env->dr[i]);
286 cpu_breakpoint_remove_all(env, BP_CPU);
287 cpu_watchpoint_remove_all(env, BP_CPU);
288 for (i = 0; i < 4; i++)
289 hw_breakpoint_insert(env, i);
290
291 /* MMU */
292 qemu_get_sbe32s(f, &a20_mask);
293 env->a20_mask = a20_mask;
294
295 qemu_get_be32s(f, &env->mxcsr);
296 for(i = 0; i < CPU_NB_REGS; i++) {
297 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(0));
298 qemu_get_be64s(f, &env->xmm_regs[i].XMM_Q(1));
299 }
300
301 #ifdef TARGET_X86_64
302 qemu_get_be64s(f, &env->efer);
303 qemu_get_be64s(f, &env->star);
304 qemu_get_be64s(f, &env->lstar);
305 qemu_get_be64s(f, &env->cstar);
306 qemu_get_be64s(f, &env->fmask);
307 qemu_get_be64s(f, &env->kernelgsbase);
308 #endif
309 if (version_id >= 4) {
310 qemu_get_be32s(f, &env->smbase);
311 }
312 if (version_id >= 5) {
313 qemu_get_be64s(f, &env->pat);
314 qemu_get_be32s(f, &env->hflags2);
315 if (version_id < 6)
316 qemu_get_be32s(f, &env->halted);
317
318 qemu_get_be64s(f, &env->vm_hsave);
319 qemu_get_be64s(f, &env->vm_vmcb);
320 qemu_get_be64s(f, &env->tsc_offset);
321 qemu_get_be64s(f, &env->intercept);
322 qemu_get_be16s(f, &env->intercept_cr_read);
323 qemu_get_be16s(f, &env->intercept_cr_write);
324 qemu_get_be16s(f, &env->intercept_dr_read);
325 qemu_get_be16s(f, &env->intercept_dr_write);
326 qemu_get_be32s(f, &env->intercept_exceptions);
327 qemu_get_8s(f, &env->v_tpr);
328 }
329
330 if (version_id >= 8) {
331 /* MTRRs */
332 for(i = 0; i < 11; i++)
333 qemu_get_be64s(f, &env->mtrr_fixed[i]);
334 qemu_get_be64s(f, &env->mtrr_deftype);
335 for(i = 0; i < 8; i++) {
336 qemu_get_be64s(f, &env->mtrr_var[i].base);
337 qemu_get_be64s(f, &env->mtrr_var[i].mask);
338 }
339 }
340 if (version_id >= 9) {
341 for (i = 0; i < sizeof(env->interrupt_bitmap)/8; i++) {
342 qemu_get_be64s(f, &env->interrupt_bitmap[i]);
343 }
344 qemu_get_be64s(f, &env->tsc);
345 qemu_get_be32s(f, &env->mp_state);
346 }
347
348 if (version_id >= 10) {
349 qemu_get_be64s(f, &env->mcg_cap);
350 if (env->mcg_cap) {
351 qemu_get_be64s(f, &env->mcg_status);
352 qemu_get_be64s(f, &env->mcg_ctl);
353 for (i = 0; i < (env->mcg_cap & 0xff); i++) {
354 qemu_get_be64s(f, &env->mce_banks[4*i]);
355 qemu_get_be64s(f, &env->mce_banks[4*i + 1]);
356 qemu_get_be64s(f, &env->mce_banks[4*i + 2]);
357 qemu_get_be64s(f, &env->mce_banks[4*i + 3]);
358 }
359 }
360 }
361
362
363 /* XXX: ensure compatiblity for halted bit ? */
364 /* XXX: compute redundant hflags bits */
365 env->hflags = hflags;
366 tlb_flush(env, 1);
367 cpu_synchronize_state(env, 1);
368 return 0;
369 }
370