1 /*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24 #include "config-host.h"
25
26 #include "monitor.h"
27 #include "sysemu.h"
28 #include "gdbstub.h"
29 #include "dma.h"
30 #include "kvm.h"
31 #include "hax.h"
32
33 #include "cpus.h"
34
35 static CPUState *cur_cpu;
36 static CPUState *next_cpu;
37
38 /***********************************************************/
hw_error(const char * fmt,...)39 void hw_error(const char *fmt, ...)
40 {
41 va_list ap;
42 CPUState *env;
43
44 va_start(ap, fmt);
45 fprintf(stderr, "qemu: hardware error: ");
46 vfprintf(stderr, fmt, ap);
47 fprintf(stderr, "\n");
48 for(env = first_cpu; env != NULL; env = env->next_cpu) {
49 fprintf(stderr, "CPU #%d:\n", env->cpu_index);
50 #ifdef TARGET_I386
51 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU);
52 #else
53 cpu_dump_state(env, stderr, fprintf, 0);
54 #endif
55 }
56 va_end(ap);
57 abort();
58 }
59
do_vm_stop(int reason)60 static void do_vm_stop(int reason)
61 {
62 if (vm_running) {
63 cpu_disable_ticks();
64 vm_running = 0;
65 pause_all_vcpus();
66 vm_state_notify(0, reason);
67 }
68 }
69
cpu_can_run(CPUState * env)70 static int cpu_can_run(CPUState *env)
71 {
72 if (env->stop)
73 return 0;
74 if (env->stopped)
75 return 0;
76 return 1;
77 }
78
cpu_has_work(CPUState * env)79 static int cpu_has_work(CPUState *env)
80 {
81 if (env->stop)
82 return 1;
83 if (env->stopped)
84 return 0;
85 if (!env->halted)
86 return 1;
87 if (qemu_cpu_has_work(env))
88 return 1;
89 return 0;
90 }
91
tcg_has_work(void)92 int tcg_has_work(void)
93 {
94 CPUState *env;
95
96 for (env = first_cpu; env != NULL; env = env->next_cpu)
97 if (cpu_has_work(env))
98 return 1;
99 return 0;
100 }
101
102 #ifndef _WIN32
103 static int io_thread_fd = -1;
104
105 #if 0
106 static void qemu_event_increment(void)
107 {
108 static const char byte = 0;
109
110 if (io_thread_fd == -1)
111 return;
112
113 write(io_thread_fd, &byte, sizeof(byte));
114 }
115 #endif
116
qemu_event_read(void * opaque)117 static void qemu_event_read(void *opaque)
118 {
119 int fd = (unsigned long)opaque;
120 ssize_t len;
121
122 /* Drain the notify pipe */
123 do {
124 char buffer[512];
125 len = read(fd, buffer, sizeof(buffer));
126 } while ((len == -1 && errno == EINTR) || len > 0);
127 }
128
qemu_event_init(void)129 static int qemu_event_init(void)
130 {
131 int err;
132 int fds[2];
133
134 err = pipe(fds);
135 if (err == -1)
136 return -errno;
137
138 err = fcntl_setfl(fds[0], O_NONBLOCK);
139 if (err < 0)
140 goto fail;
141
142 err = fcntl_setfl(fds[1], O_NONBLOCK);
143 if (err < 0)
144 goto fail;
145
146 qemu_set_fd_handler2(fds[0], NULL, qemu_event_read, NULL,
147 (void *)(unsigned long)fds[0]);
148
149 io_thread_fd = fds[1];
150 return 0;
151
152 fail:
153 close(fds[0]);
154 close(fds[1]);
155 return err;
156 }
157 #else
158 HANDLE qemu_event_handle;
159
dummy_event_handler(void * opaque)160 static void dummy_event_handler(void *opaque)
161 {
162 }
163
qemu_event_init(void)164 static int qemu_event_init(void)
165 {
166 qemu_event_handle = CreateEvent(NULL, FALSE, FALSE, NULL);
167 if (!qemu_event_handle) {
168 perror("Failed CreateEvent");
169 return -1;
170 }
171 qemu_add_wait_object(qemu_event_handle, dummy_event_handler, NULL);
172 return 0;
173 }
174
175 #if 0
176 static void qemu_event_increment(void)
177 {
178 SetEvent(qemu_event_handle);
179 }
180 #endif
181 #endif
182
183 #ifndef CONFIG_IOTHREAD
qemu_init_main_loop(void)184 int qemu_init_main_loop(void)
185 {
186 return qemu_event_init();
187 }
188
qemu_init_vcpu(void * _env)189 void qemu_init_vcpu(void *_env)
190 {
191 CPUState *env = _env;
192
193 if (kvm_enabled())
194 kvm_init_vcpu(env);
195 #ifdef CONFIG_HAX
196 if (hax_enabled())
197 hax_init_vcpu(env);
198 #endif
199 return;
200 }
201
qemu_cpu_self(void * env)202 int qemu_cpu_self(void *env)
203 {
204 return 1;
205 }
206
resume_all_vcpus(void)207 void resume_all_vcpus(void)
208 {
209 }
210
pause_all_vcpus(void)211 void pause_all_vcpus(void)
212 {
213 }
214
qemu_cpu_kick(void * env)215 void qemu_cpu_kick(void *env)
216 {
217 return;
218 }
219
qemu_notify_event(void)220 void qemu_notify_event(void)
221 {
222 CPUState *env = cpu_single_env;
223
224 if (env) {
225 cpu_exit(env);
226 #ifdef USE_KQEMU
227 if (env->kqemu_enabled)
228 kqemu_cpu_interrupt(env);
229 #endif
230 /*
231 * This is mainly for the Windows host, where the timer may be in
232 * a different thread with vcpu. Thus the timer function needs to
233 * notify the vcpu thread of more than simply cpu_exit. If env is
234 * not NULL, it means that the vcpu is in execute state, we need
235 * only to set the flags. If the guest is in execute state, the
236 * HAX kernel module will exit to qemu. If env is NULL, vcpu is
237 * in main_loop_wait, and we need a event to notify it.
238 */
239 #ifdef CONFIG_HAX
240 if (hax_enabled())
241 hax_raise_event(env);
242 } else {
243 #ifdef _WIN32
244 if(hax_enabled())
245 SetEvent(qemu_event_handle);
246 #endif
247 }
248 #else
249 }
250 #endif
251 }
252
qemu_mutex_lock_iothread(void)253 void qemu_mutex_lock_iothread(void)
254 {
255 }
256
qemu_mutex_unlock_iothread(void)257 void qemu_mutex_unlock_iothread(void)
258 {
259 }
260
vm_stop(int reason)261 void vm_stop(int reason)
262 {
263 do_vm_stop(reason);
264 }
265
266 #else /* CONFIG_IOTHREAD */
267
268 #include "qemu-thread.h"
269
270 QemuMutex qemu_global_mutex;
271 static QemuMutex qemu_fair_mutex;
272
273 static QemuThread io_thread;
274
275 static QemuThread *tcg_cpu_thread;
276 static QemuCond *tcg_halt_cond;
277
278 static int qemu_system_ready;
279 /* cpu creation */
280 static QemuCond qemu_cpu_cond;
281 /* system init */
282 static QemuCond qemu_system_cond;
283 static QemuCond qemu_pause_cond;
284
285 static void block_io_signals(void);
286 static void unblock_io_signals(void);
287 static int tcg_has_work(void);
288
qemu_init_main_loop(void)289 int qemu_init_main_loop(void)
290 {
291 int ret;
292
293 ret = qemu_event_init();
294 if (ret)
295 return ret;
296
297 qemu_cond_init(&qemu_pause_cond);
298 qemu_mutex_init(&qemu_fair_mutex);
299 qemu_mutex_init(&qemu_global_mutex);
300 qemu_mutex_lock(&qemu_global_mutex);
301
302 unblock_io_signals();
303 qemu_thread_self(&io_thread);
304
305 return 0;
306 }
307
qemu_wait_io_event(CPUState * env)308 static void qemu_wait_io_event(CPUState *env)
309 {
310 while (!tcg_has_work())
311 qemu_cond_timedwait(env->halt_cond, &qemu_global_mutex, 1000);
312
313 qemu_mutex_unlock(&qemu_global_mutex);
314
315 /*
316 * Users of qemu_global_mutex can be starved, having no chance
317 * to acquire it since this path will get to it first.
318 * So use another lock to provide fairness.
319 */
320 qemu_mutex_lock(&qemu_fair_mutex);
321 qemu_mutex_unlock(&qemu_fair_mutex);
322
323 qemu_mutex_lock(&qemu_global_mutex);
324 if (env->stop) {
325 env->stop = 0;
326 env->stopped = 1;
327 qemu_cond_signal(&qemu_pause_cond);
328 }
329 }
330
331 static int qemu_cpu_exec(CPUState *env);
332
kvm_cpu_thread_fn(void * arg)333 static void *kvm_cpu_thread_fn(void *arg)
334 {
335 CPUState *env = arg;
336
337 block_io_signals();
338 qemu_thread_self(env->thread);
339
340 /* signal CPU creation */
341 qemu_mutex_lock(&qemu_global_mutex);
342 env->created = 1;
343 qemu_cond_signal(&qemu_cpu_cond);
344
345 /* and wait for machine initialization */
346 while (!qemu_system_ready)
347 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
348
349 while (1) {
350 if (cpu_can_run(env))
351 qemu_cpu_exec(env);
352 qemu_wait_io_event(env);
353 }
354
355 return NULL;
356 }
357
358 static void tcg_cpu_exec(void);
359
tcg_cpu_thread_fn(void * arg)360 static void *tcg_cpu_thread_fn(void *arg)
361 {
362 CPUState *env = arg;
363
364 block_io_signals();
365 qemu_thread_self(env->thread);
366
367 /* signal CPU creation */
368 qemu_mutex_lock(&qemu_global_mutex);
369 for (env = first_cpu; env != NULL; env = env->next_cpu)
370 env->created = 1;
371 qemu_cond_signal(&qemu_cpu_cond);
372
373 /* and wait for machine initialization */
374 while (!qemu_system_ready)
375 qemu_cond_timedwait(&qemu_system_cond, &qemu_global_mutex, 100);
376
377 while (1) {
378 tcg_cpu_exec();
379 qemu_wait_io_event(cur_cpu);
380 }
381
382 return NULL;
383 }
384
qemu_cpu_kick(void * _env)385 void qemu_cpu_kick(void *_env)
386 {
387 CPUState *env = _env;
388 qemu_cond_broadcast(env->halt_cond);
389 if (kvm_enabled() || hax_enabled())
390 qemu_thread_signal(env->thread, SIGUSR1);
391 }
392
qemu_cpu_self(void * env)393 int qemu_cpu_self(void *env)
394 {
395 return (cpu_single_env != NULL);
396 }
397
cpu_signal(int sig)398 static void cpu_signal(int sig)
399 {
400 if (cpu_single_env)
401 cpu_exit(cpu_single_env);
402 }
403
block_io_signals(void)404 static void block_io_signals(void)
405 {
406 sigset_t set;
407 struct sigaction sigact;
408
409 sigemptyset(&set);
410 sigaddset(&set, SIGUSR2);
411 sigaddset(&set, SIGIO);
412 sigaddset(&set, SIGALRM);
413 pthread_sigmask(SIG_BLOCK, &set, NULL);
414
415 sigemptyset(&set);
416 sigaddset(&set, SIGUSR1);
417 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
418
419 memset(&sigact, 0, sizeof(sigact));
420 sigact.sa_handler = cpu_signal;
421 sigaction(SIGUSR1, &sigact, NULL);
422 }
423
unblock_io_signals(void)424 static void unblock_io_signals(void)
425 {
426 sigset_t set;
427
428 sigemptyset(&set);
429 sigaddset(&set, SIGUSR2);
430 sigaddset(&set, SIGIO);
431 sigaddset(&set, SIGALRM);
432 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
433
434 sigemptyset(&set);
435 sigaddset(&set, SIGUSR1);
436 pthread_sigmask(SIG_BLOCK, &set, NULL);
437 }
438
qemu_signal_lock(unsigned int msecs)439 static void qemu_signal_lock(unsigned int msecs)
440 {
441 qemu_mutex_lock(&qemu_fair_mutex);
442
443 while (qemu_mutex_trylock(&qemu_global_mutex)) {
444 qemu_thread_signal(tcg_cpu_thread, SIGUSR1);
445 if (!qemu_mutex_timedlock(&qemu_global_mutex, msecs))
446 break;
447 }
448 qemu_mutex_unlock(&qemu_fair_mutex);
449 }
450
qemu_mutex_lock_iothread(void)451 void qemu_mutex_lock_iothread(void)
452 {
453 if (kvm_enabled() || hax_enabled()) {
454 qemu_mutex_lock(&qemu_fair_mutex);
455 qemu_mutex_lock(&qemu_global_mutex);
456 qemu_mutex_unlock(&qemu_fair_mutex);
457 } else
458 qemu_signal_lock(100);
459 }
460
qemu_mutex_unlock_iothread(void)461 void qemu_mutex_unlock_iothread(void)
462 {
463 qemu_mutex_unlock(&qemu_global_mutex);
464 }
465
all_vcpus_paused(void)466 static int all_vcpus_paused(void)
467 {
468 CPUState *penv = first_cpu;
469
470 while (penv) {
471 if (!penv->stopped)
472 return 0;
473 penv = (CPUState *)penv->next_cpu;
474 }
475
476 return 1;
477 }
478
pause_all_vcpus(void)479 void pause_all_vcpus(void)
480 {
481 CPUState *penv = first_cpu;
482
483 while (penv) {
484 penv->stop = 1;
485 qemu_thread_signal(penv->thread, SIGUSR1);
486 qemu_cpu_kick(penv);
487 penv = (CPUState *)penv->next_cpu;
488 }
489
490 while (!all_vcpus_paused()) {
491 qemu_cond_timedwait(&qemu_pause_cond, &qemu_global_mutex, 100);
492 penv = first_cpu;
493 while (penv) {
494 qemu_thread_signal(penv->thread, SIGUSR1);
495 penv = (CPUState *)penv->next_cpu;
496 }
497 }
498 }
499
resume_all_vcpus(void)500 void resume_all_vcpus(void)
501 {
502 CPUState *penv = first_cpu;
503
504 while (penv) {
505 penv->stop = 0;
506 penv->stopped = 0;
507 qemu_thread_signal(penv->thread, SIGUSR1);
508 qemu_cpu_kick(penv);
509 penv = (CPUState *)penv->next_cpu;
510 }
511 }
512
tcg_init_vcpu(void * _env)513 static void tcg_init_vcpu(void *_env)
514 {
515 CPUState *env = _env;
516 /* share a single thread for all cpus with TCG */
517 if (!tcg_cpu_thread) {
518 env->thread = qemu_mallocz(sizeof(QemuThread));
519 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
520 qemu_cond_init(env->halt_cond);
521 qemu_thread_create(env->thread, tcg_cpu_thread_fn, env);
522 while (env->created == 0)
523 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
524 tcg_cpu_thread = env->thread;
525 tcg_halt_cond = env->halt_cond;
526 } else {
527 env->thread = tcg_cpu_thread;
528 env->halt_cond = tcg_halt_cond;
529 }
530 }
531
kvm_start_vcpu(CPUState * env)532 static void kvm_start_vcpu(CPUState *env)
533 {
534 #if 0
535 kvm_init_vcpu(env);
536 env->thread = qemu_mallocz(sizeof(QemuThread));
537 env->halt_cond = qemu_mallocz(sizeof(QemuCond));
538 qemu_cond_init(env->halt_cond);
539 qemu_thread_create(env->thread, kvm_cpu_thread_fn, env);
540 while (env->created == 0)
541 qemu_cond_timedwait(&qemu_cpu_cond, &qemu_global_mutex, 100);
542 #endif
543 }
544
qemu_init_vcpu(void * _env)545 void qemu_init_vcpu(void *_env)
546 {
547 CPUState *env = _env;
548
549 if (kvm_enabled())
550 kvm_start_vcpu(env);
551 else
552 tcg_init_vcpu(env);
553 }
554
qemu_notify_event(void)555 void qemu_notify_event(void)
556 {
557 qemu_event_increment();
558 }
559
vm_stop(int reason)560 void vm_stop(int reason)
561 {
562 QemuThread me;
563 qemu_thread_self(&me);
564
565 if (!qemu_thread_equal(&me, &io_thread)) {
566 qemu_system_vmstop_request(reason);
567 /*
568 * FIXME: should not return to device code in case
569 * vm_stop() has been requested.
570 */
571 if (cpu_single_env) {
572 cpu_exit(cpu_single_env);
573 cpu_single_env->stop = 1;
574 }
575 return;
576 }
577 do_vm_stop(reason);
578 }
579
580 #endif
581
qemu_cpu_exec(CPUState * env)582 static int qemu_cpu_exec(CPUState *env)
583 {
584 int ret;
585 #ifdef CONFIG_PROFILER
586 int64_t ti;
587 #endif
588
589 #ifdef CONFIG_PROFILER
590 ti = profile_getclock();
591 #endif
592 if (use_icount) {
593 int64_t count;
594 int decr;
595 qemu_icount -= (env->icount_decr.u16.low + env->icount_extra);
596 env->icount_decr.u16.low = 0;
597 env->icount_extra = 0;
598 count = qemu_next_icount_deadline();
599 count = (count + (1 << icount_time_shift) - 1)
600 >> icount_time_shift;
601 qemu_icount += count;
602 decr = (count > 0xffff) ? 0xffff : count;
603 count -= decr;
604 env->icount_decr.u16.low = decr;
605 env->icount_extra = count;
606 }
607 #ifdef CONFIG_TRACE
608 if (tbflush_requested) {
609 tbflush_requested = 0;
610 tb_flush(env);
611 return EXCP_INTERRUPT;
612 }
613 #endif
614
615
616 ret = cpu_exec(env);
617 #ifdef CONFIG_PROFILER
618 qemu_time += profile_getclock() - ti;
619 #endif
620 if (use_icount) {
621 /* Fold pending instructions back into the
622 instruction counter, and clear the interrupt flag. */
623 qemu_icount -= (env->icount_decr.u16.low
624 + env->icount_extra);
625 env->icount_decr.u32 = 0;
626 env->icount_extra = 0;
627 }
628 return ret;
629 }
630
tcg_cpu_exec(void)631 void tcg_cpu_exec(void)
632 {
633 int ret = 0;
634
635 if (next_cpu == NULL)
636 next_cpu = first_cpu;
637 for (; next_cpu != NULL; next_cpu = next_cpu->next_cpu) {
638 CPUState *env = cur_cpu = next_cpu;
639
640 if (!vm_running)
641 break;
642 if (qemu_timer_alarm_pending()) {
643 break;
644 }
645 if (cpu_can_run(env))
646 ret = qemu_cpu_exec(env);
647 if (ret == EXCP_DEBUG) {
648 gdb_set_stop_cpu(env);
649 debug_requested = 1;
650 break;
651 }
652 }
653 }
654
655