• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19  */
20 #include "config.h"
21 #include "exec.h"
22 #include "disas.h"
23 #include "tcg.h"
24 #include "kvm.h"
25 
26 #if !defined(CONFIG_SOFTMMU)
27 #undef EAX
28 #undef ECX
29 #undef EDX
30 #undef EBX
31 #undef ESP
32 #undef EBP
33 #undef ESI
34 #undef EDI
35 #undef EIP
36 #include <signal.h>
37 #ifdef __linux__
38 #include <sys/ucontext.h>
39 #endif
40 #endif
41 
42 #if defined(__sparc__) && !defined(HOST_SOLARIS)
43 // Work around ugly bugs in glibc that mangle global register contents
44 #undef env
45 #define env cpu_single_env
46 #endif
47 
48 int tb_invalidated_flag;
49 
50 //#define DEBUG_EXEC
51 //#define DEBUG_SIGNAL
52 
qemu_cpu_has_work(CPUState * env)53 int qemu_cpu_has_work(CPUState *env)
54 {
55     return cpu_has_work(env);
56 }
57 
cpu_loop_exit(void)58 void cpu_loop_exit(void)
59 {
60     /* NOTE: the register at this point must be saved by hand because
61        longjmp restore them */
62     regs_to_env();
63     longjmp(env->jmp_env, 1);
64 }
65 
66 /* exit the current TB from a signal handler. The host registers are
67    restored in a state compatible with the CPU emulator
68  */
cpu_resume_from_signal(CPUState * env1,void * puc)69 void cpu_resume_from_signal(CPUState *env1, void *puc)
70 {
71 #if !defined(CONFIG_SOFTMMU)
72 #ifdef __linux__
73     struct ucontext *uc = puc;
74 #elif defined(__OpenBSD__)
75     struct sigcontext *uc = puc;
76 #endif
77 #endif
78 
79     env = env1;
80 
81     /* XXX: restore cpu registers saved in host registers */
82 
83 #if !defined(CONFIG_SOFTMMU)
84     if (puc) {
85         /* XXX: use siglongjmp ? */
86 #ifdef __linux__
87         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
88 #elif defined(__OpenBSD__)
89         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
90 #endif
91     }
92 #endif
93     env->exception_index = -1;
94     longjmp(env->jmp_env, 1);
95 }
96 
97 /* Execute the code without caching the generated code. An interpreter
98    could be used if available. */
cpu_exec_nocache(int max_cycles,TranslationBlock * orig_tb)99 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
100 {
101     unsigned long next_tb;
102     TranslationBlock *tb;
103 
104     /* Should never happen.
105        We only end up here when an existing TB is too long.  */
106     if (max_cycles > CF_COUNT_MASK)
107         max_cycles = CF_COUNT_MASK;
108 
109     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
110                      max_cycles);
111     env->current_tb = tb;
112     /* execute the generated code */
113     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
114 
115     if ((next_tb & 3) == 2) {
116         /* Restore PC.  This may happen if async event occurs before
117            the TB starts executing.  */
118         cpu_pc_from_tb(env, tb);
119     }
120     tb_phys_invalidate(tb, -1);
121     tb_free(tb);
122 }
123 
tb_find_slow(target_ulong pc,target_ulong cs_base,uint64_t flags)124 static TranslationBlock *tb_find_slow(target_ulong pc,
125                                       target_ulong cs_base,
126                                       uint64_t flags)
127 {
128     TranslationBlock *tb, **ptb1;
129     unsigned int h;
130     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
131 
132     tb_invalidated_flag = 0;
133 
134     regs_to_env(); /* XXX: do it just before cpu_gen_code() */
135 
136     /* find translated block using physical mappings */
137     phys_pc = get_phys_addr_code(env, pc);
138     phys_page1 = phys_pc & TARGET_PAGE_MASK;
139     phys_page2 = -1;
140     h = tb_phys_hash_func(phys_pc);
141     ptb1 = &tb_phys_hash[h];
142     for(;;) {
143         tb = *ptb1;
144         if (!tb)
145             goto not_found;
146         if (tb->pc == pc &&
147             tb->page_addr[0] == phys_page1 &&
148             tb->cs_base == cs_base &&
149             tb->flags == flags) {
150             /* check next page if needed */
151             if (tb->page_addr[1] != -1) {
152                 virt_page2 = (pc & TARGET_PAGE_MASK) +
153                     TARGET_PAGE_SIZE;
154                 phys_page2 = get_phys_addr_code(env, virt_page2);
155                 if (tb->page_addr[1] == phys_page2)
156                     goto found;
157             } else {
158                 goto found;
159             }
160         }
161         ptb1 = &tb->phys_hash_next;
162     }
163  not_found:
164    /* if no translated code available, then translate it now */
165     tb = tb_gen_code(env, pc, cs_base, flags, 0);
166 
167  found:
168     /* we add the TB in the virtual pc hash table */
169     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
170     return tb;
171 }
172 
tb_find_fast(void)173 static inline TranslationBlock *tb_find_fast(void)
174 {
175     TranslationBlock *tb;
176     target_ulong cs_base, pc;
177     int flags;
178 
179     /* we record a subset of the CPU state. It will
180        always be the same before a given translated block
181        is executed. */
182     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
183     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
184     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
185                  tb->flags != flags)) {
186         tb = tb_find_slow(pc, cs_base, flags);
187     }
188     return tb;
189 }
190 
191 static CPUDebugExcpHandler *debug_excp_handler;
192 
cpu_set_debug_excp_handler(CPUDebugExcpHandler * handler)193 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
194 {
195     CPUDebugExcpHandler *old_handler = debug_excp_handler;
196 
197     debug_excp_handler = handler;
198     return old_handler;
199 }
200 
cpu_handle_debug_exception(CPUState * env)201 static void cpu_handle_debug_exception(CPUState *env)
202 {
203     CPUWatchpoint *wp;
204 
205     if (!env->watchpoint_hit)
206         TAILQ_FOREACH(wp, &env->watchpoints, entry)
207             wp->flags &= ~BP_WATCHPOINT_HIT;
208 
209     if (debug_excp_handler)
210         debug_excp_handler(env);
211 }
212 
213 /* main execution loop */
214 
cpu_exec(CPUState * env1)215 int cpu_exec(CPUState *env1)
216 {
217 #define DECLARE_HOST_REGS 1
218 #include "hostregs_helper.h"
219     int ret, interrupt_request;
220     TranslationBlock *tb;
221     uint8_t *tc_ptr;
222     unsigned long next_tb;
223 
224     if (cpu_halted(env1) == EXCP_HALTED)
225         return EXCP_HALTED;
226 
227     cpu_single_env = env1;
228 
229     /* first we save global registers */
230 #define SAVE_HOST_REGS 1
231 #include "hostregs_helper.h"
232     env = env1;
233 
234     env_to_regs();
235 #if defined(TARGET_I386)
236     /* put eflags in CPU temporary format */
237     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
238     DF = 1 - (2 * ((env->eflags >> 10) & 1));
239     CC_OP = CC_OP_EFLAGS;
240     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
241 #elif defined(TARGET_SPARC)
242 #elif defined(TARGET_M68K)
243     env->cc_op = CC_OP_FLAGS;
244     env->cc_dest = env->sr & 0xf;
245     env->cc_x = (env->sr >> 4) & 1;
246 #elif defined(TARGET_ALPHA)
247 #elif defined(TARGET_ARM)
248 #elif defined(TARGET_PPC)
249 #elif defined(TARGET_MICROBLAZE)
250 #elif defined(TARGET_MIPS)
251 #elif defined(TARGET_SH4)
252 #elif defined(TARGET_CRIS)
253     /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257     env->exception_index = -1;
258 
259     /* prepare setjmp context for exception handling */
260     for(;;) {
261         if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(HOST_SOLARIS)
263 #undef env
264                     env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267             env->current_tb = NULL;
268             /* if an exception is pending, we execute it here */
269             if (env->exception_index >= 0) {
270                 if (env->exception_index >= EXCP_INTERRUPT) {
271                     /* exit request from the cpu execution loop */
272                     ret = env->exception_index;
273                     if (ret == EXCP_DEBUG)
274                         cpu_handle_debug_exception(env);
275                     break;
276                 } else {
277 #if defined(CONFIG_USER_ONLY)
278                     /* if user mode only, we simulate a fake exception
279                        which will be handled outside the cpu execution
280                        loop */
281 #if defined(TARGET_I386)
282                     do_interrupt_user(env->exception_index,
283                                       env->exception_is_int,
284                                       env->error_code,
285                                       env->exception_next_eip);
286                     /* successfully delivered */
287                     env->old_exception = -1;
288 #endif
289                     ret = env->exception_index;
290                     break;
291 #else
292 #if defined(TARGET_I386)
293                     /* simulate a real cpu exception. On i386, it can
294                        trigger new exceptions, but we do not handle
295                        double or triple faults yet. */
296                     do_interrupt(env->exception_index,
297                                  env->exception_is_int,
298                                  env->error_code,
299                                  env->exception_next_eip, 0);
300                     /* successfully delivered */
301                     env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303                     do_interrupt(env);
304 #elif defined(TARGET_MICROBLAZE)
305                     do_interrupt(env);
306 #elif defined(TARGET_MIPS)
307                     do_interrupt(env);
308 #elif defined(TARGET_SPARC)
309                     do_interrupt(env);
310 #elif defined(TARGET_ARM)
311                     do_interrupt(env);
312 #elif defined(TARGET_SH4)
313 		    do_interrupt(env);
314 #elif defined(TARGET_ALPHA)
315                     do_interrupt(env);
316 #elif defined(TARGET_CRIS)
317                     do_interrupt(env);
318 #elif defined(TARGET_M68K)
319                     do_interrupt(0);
320 #endif
321 #endif
322                 }
323                 env->exception_index = -1;
324             }
325 #ifdef CONFIG_KQEMU
326             if (kqemu_is_ok(env) && env->interrupt_request == 0 && env->exit_request == 0) {
327                 int ret;
328                 env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
329                 ret = kqemu_cpu_exec(env);
330                 /* put eflags in CPU temporary format */
331                 CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
332                 DF = 1 - (2 * ((env->eflags >> 10) & 1));
333                 CC_OP = CC_OP_EFLAGS;
334                 env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
335                 if (ret == 1) {
336                     /* exception */
337                     longjmp(env->jmp_env, 1);
338                 } else if (ret == 2) {
339                     /* softmmu execution needed */
340                 } else {
341                     if (env->interrupt_request != 0 || env->exit_request != 0) {
342                         /* hardware interrupt will be executed just after */
343                     } else {
344                         /* otherwise, we restart */
345                         longjmp(env->jmp_env, 1);
346                     }
347                 }
348             }
349 #endif
350 
351             if (kvm_enabled()) {
352                 kvm_cpu_exec(env);
353                 longjmp(env->jmp_env, 1);
354             }
355 
356             next_tb = 0; /* force lookup of first TB */
357             for(;;) {
358                 interrupt_request = env->interrupt_request;
359                 if (unlikely(interrupt_request)) {
360                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
361                         /* Mask out external interrupts for this step. */
362                         interrupt_request &= ~(CPU_INTERRUPT_HARD |
363                                                CPU_INTERRUPT_FIQ |
364                                                CPU_INTERRUPT_SMI |
365                                                CPU_INTERRUPT_NMI);
366                     }
367                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
368                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
369                         env->exception_index = EXCP_DEBUG;
370                         cpu_loop_exit();
371                     }
372 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
373     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
374     defined(TARGET_MICROBLAZE)
375                     if (interrupt_request & CPU_INTERRUPT_HALT) {
376                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
377                         env->halted = 1;
378                         env->exception_index = EXCP_HLT;
379                         cpu_loop_exit();
380                     }
381 #endif
382 #if defined(TARGET_I386)
383                     if (env->hflags2 & HF2_GIF_MASK) {
384                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
385                             !(env->hflags & HF_SMM_MASK)) {
386                             svm_check_intercept(SVM_EXIT_SMI);
387                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
388                             do_smm_enter();
389                             next_tb = 0;
390                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
391                                    !(env->hflags2 & HF2_NMI_MASK)) {
392                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
393                             env->hflags2 |= HF2_NMI_MASK;
394                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
395                             next_tb = 0;
396                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
397                                    (((env->hflags2 & HF2_VINTR_MASK) &&
398                                      (env->hflags2 & HF2_HIF_MASK)) ||
399                                     (!(env->hflags2 & HF2_VINTR_MASK) &&
400                                      (env->eflags & IF_MASK &&
401                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
402                             int intno;
403                             svm_check_intercept(SVM_EXIT_INTR);
404                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
405                             intno = cpu_get_pic_interrupt(env);
406                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
407 #if defined(__sparc__) && !defined(HOST_SOLARIS)
408 #undef env
409                     env = cpu_single_env;
410 #define env cpu_single_env
411 #endif
412                             do_interrupt(intno, 0, 0, 0, 1);
413                             /* ensure that no TB jump will be modified as
414                                the program flow was changed */
415                             next_tb = 0;
416 #if !defined(CONFIG_USER_ONLY)
417                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
418                                    (env->eflags & IF_MASK) &&
419                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
420                             int intno;
421                             /* FIXME: this should respect TPR */
422                             svm_check_intercept(SVM_EXIT_VINTR);
423                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
424                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
425                             do_interrupt(intno, 0, 0, 0, 1);
426                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
427                             next_tb = 0;
428 #endif
429                         }
430                     }
431 #elif defined(TARGET_PPC)
432 #if 0
433                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
434                         cpu_ppc_reset(env);
435                     }
436 #endif
437                     if (interrupt_request & CPU_INTERRUPT_HARD) {
438                         ppc_hw_interrupt(env);
439                         if (env->pending_interrupts == 0)
440                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
441                         next_tb = 0;
442                     }
443 #elif defined(TARGET_MICROBLAZE)
444                     if ((interrupt_request & CPU_INTERRUPT_HARD)
445                         && (env->sregs[SR_MSR] & MSR_IE)
446                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
447                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
448                         env->exception_index = EXCP_IRQ;
449                         do_interrupt(env);
450                         next_tb = 0;
451                     }
452 #elif defined(TARGET_MIPS)
453                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
454                         (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
455                         (env->CP0_Status & (1 << CP0St_IE)) &&
456                         !(env->CP0_Status & (1 << CP0St_EXL)) &&
457                         !(env->CP0_Status & (1 << CP0St_ERL)) &&
458                         !(env->hflags & MIPS_HFLAG_DM)) {
459                         /* Raise it */
460                         env->exception_index = EXCP_EXT_INTERRUPT;
461                         env->error_code = 0;
462                         do_interrupt(env);
463                         next_tb = 0;
464                     }
465 #elif defined(TARGET_SPARC)
466                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
467 			(env->psret != 0)) {
468 			int pil = env->interrupt_index & 15;
469 			int type = env->interrupt_index & 0xf0;
470 
471 			if (((type == TT_EXTINT) &&
472 			     (pil == 15 || pil > env->psrpil)) ||
473 			    type != TT_EXTINT) {
474 			    env->interrupt_request &= ~CPU_INTERRUPT_HARD;
475                             env->exception_index = env->interrupt_index;
476                             do_interrupt(env);
477 			    env->interrupt_index = 0;
478 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
479                             cpu_check_irqs(env);
480 #endif
481                         next_tb = 0;
482 			}
483 		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
484 			//do_interrupt(0, 0, 0, 0, 0);
485 			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
486 		    }
487 #elif defined(TARGET_ARM)
488                     if (interrupt_request & CPU_INTERRUPT_FIQ
489                         && !(env->uncached_cpsr & CPSR_F)) {
490                         env->exception_index = EXCP_FIQ;
491                         do_interrupt(env);
492                         next_tb = 0;
493                     }
494                     /* ARMv7-M interrupt return works by loading a magic value
495                        into the PC.  On real hardware the load causes the
496                        return to occur.  The qemu implementation performs the
497                        jump normally, then does the exception return when the
498                        CPU tries to execute code at the magic address.
499                        This will cause the magic PC value to be pushed to
500                        the stack if an interrupt occured at the wrong time.
501                        We avoid this by disabling interrupts when
502                        pc contains a magic address.  */
503                     if (interrupt_request & CPU_INTERRUPT_HARD
504                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
505                             || !(env->uncached_cpsr & CPSR_I))) {
506                         env->exception_index = EXCP_IRQ;
507                         do_interrupt(env);
508                         next_tb = 0;
509                     }
510 #elif defined(TARGET_SH4)
511                     if (interrupt_request & CPU_INTERRUPT_HARD) {
512                         do_interrupt(env);
513                         next_tb = 0;
514                     }
515 #elif defined(TARGET_ALPHA)
516                     if (interrupt_request & CPU_INTERRUPT_HARD) {
517                         do_interrupt(env);
518                         next_tb = 0;
519                     }
520 #elif defined(TARGET_CRIS)
521                     if (interrupt_request & CPU_INTERRUPT_HARD
522                         && (env->pregs[PR_CCS] & I_FLAG)) {
523                         env->exception_index = EXCP_IRQ;
524                         do_interrupt(env);
525                         next_tb = 0;
526                     }
527                     if (interrupt_request & CPU_INTERRUPT_NMI
528                         && (env->pregs[PR_CCS] & M_FLAG)) {
529                         env->exception_index = EXCP_NMI;
530                         do_interrupt(env);
531                         next_tb = 0;
532                     }
533 #elif defined(TARGET_M68K)
534                     if (interrupt_request & CPU_INTERRUPT_HARD
535                         && ((env->sr & SR_I) >> SR_I_SHIFT)
536                             < env->pending_level) {
537                         /* Real hardware gets the interrupt vector via an
538                            IACK cycle at this point.  Current emulated
539                            hardware doesn't rely on this, so we
540                            provide/save the vector when the interrupt is
541                            first signalled.  */
542                         env->exception_index = env->pending_vector;
543                         do_interrupt(1);
544                         next_tb = 0;
545                     }
546 #endif
547                    /* Don't use the cached interupt_request value,
548                       do_interrupt may have updated the EXITTB flag. */
549                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
550                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
551                         /* ensure that no TB jump will be modified as
552                            the program flow was changed */
553                         next_tb = 0;
554                     }
555                 }
556                 if (unlikely(env->exit_request)) {
557                     env->exit_request = 0;
558                     env->exception_index = EXCP_INTERRUPT;
559                     cpu_loop_exit();
560                 }
561 #ifdef DEBUG_EXEC
562                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
563                     /* restore flags in standard format */
564                     regs_to_env();
565 #if defined(TARGET_I386)
566                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
567                     log_cpu_state(env, X86_DUMP_CCOP);
568                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
569 #elif defined(TARGET_ARM)
570                     log_cpu_state(env, 0);
571 #elif defined(TARGET_SPARC)
572                     log_cpu_state(env, 0);
573 #elif defined(TARGET_PPC)
574                     log_cpu_state(env, 0);
575 #elif defined(TARGET_M68K)
576                     cpu_m68k_flush_flags(env, env->cc_op);
577                     env->cc_op = CC_OP_FLAGS;
578                     env->sr = (env->sr & 0xffe0)
579                               | env->cc_dest | (env->cc_x << 4);
580                     log_cpu_state(env, 0);
581 #elif defined(TARGET_MICROBLAZE)
582                     log_cpu_state(env, 0);
583 #elif defined(TARGET_MIPS)
584                     log_cpu_state(env, 0);
585 #elif defined(TARGET_SH4)
586 		    log_cpu_state(env, 0);
587 #elif defined(TARGET_ALPHA)
588                     log_cpu_state(env, 0);
589 #elif defined(TARGET_CRIS)
590                     log_cpu_state(env, 0);
591 #else
592 #error unsupported target CPU
593 #endif
594                 }
595 #endif
596                 spin_lock(&tb_lock);
597                 tb = tb_find_fast();
598                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
599                    doing it in tb_find_slow */
600                 if (tb_invalidated_flag) {
601                     /* as some TB could have been invalidated because
602                        of memory exceptions while generating the code, we
603                        must recompute the hash index here */
604                     next_tb = 0;
605                     tb_invalidated_flag = 0;
606                 }
607 #ifdef DEBUG_EXEC
608                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
609                              (long)tb->tc_ptr, tb->pc,
610                              lookup_symbol(tb->pc));
611 #endif
612                 /* see if we can patch the calling TB. When the TB
613                    spans two pages, we cannot safely do a direct
614                    jump. */
615                 {
616                     if (next_tb != 0 &&
617 #ifdef CONFIG_KQEMU
618                         (env->kqemu_enabled != 2) &&
619 #endif
620                         tb->page_addr[1] == -1) {
621                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
622                 }
623                 }
624                 spin_unlock(&tb_lock);
625                 env->current_tb = tb;
626 
627                 /* cpu_interrupt might be called while translating the
628                    TB, but before it is linked into a potentially
629                    infinite loop and becomes env->current_tb. Avoid
630                    starting execution if there is a pending interrupt. */
631                 if (unlikely (env->exit_request))
632                     env->current_tb = NULL;
633 
634                 while (env->current_tb) {
635                     tc_ptr = tb->tc_ptr;
636                 /* execute the generated code */
637 #if defined(__sparc__) && !defined(HOST_SOLARIS)
638 #undef env
639                     env = cpu_single_env;
640 #define env cpu_single_env
641 #endif
642                     next_tb = tcg_qemu_tb_exec(tc_ptr);
643                     env->current_tb = NULL;
644                     if ((next_tb & 3) == 2) {
645                         /* Instruction counter expired.  */
646                         int insns_left;
647                         tb = (TranslationBlock *)(long)(next_tb & ~3);
648                         /* Restore PC.  */
649                         cpu_pc_from_tb(env, tb);
650                         insns_left = env->icount_decr.u32;
651                         if (env->icount_extra && insns_left >= 0) {
652                             /* Refill decrementer and continue execution.  */
653                             env->icount_extra += insns_left;
654                             if (env->icount_extra > 0xffff) {
655                                 insns_left = 0xffff;
656                             } else {
657                                 insns_left = env->icount_extra;
658                             }
659                             env->icount_extra -= insns_left;
660                             env->icount_decr.u16.low = insns_left;
661                         } else {
662                             if (insns_left > 0) {
663                                 /* Execute remaining instructions.  */
664                                 cpu_exec_nocache(insns_left, tb);
665                             }
666                             env->exception_index = EXCP_INTERRUPT;
667                             next_tb = 0;
668                             cpu_loop_exit();
669                         }
670                     }
671                 }
672                 /* reset soft MMU for next block (it can currently
673                    only be set by a memory fault) */
674 #if defined(CONFIG_KQEMU)
675 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
676                 if (kqemu_is_ok(env) &&
677                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
678                     cpu_loop_exit();
679                 }
680 #endif
681             } /* for(;;) */
682         } else {
683             env_to_regs();
684         }
685     } /* for(;;) */
686 
687 
688 #if defined(TARGET_I386)
689     /* restore flags in standard format */
690     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
691 #elif defined(TARGET_ARM)
692     /* XXX: Save/restore host fpu exception state?.  */
693 #elif defined(TARGET_SPARC)
694 #elif defined(TARGET_PPC)
695 #elif defined(TARGET_M68K)
696     cpu_m68k_flush_flags(env, env->cc_op);
697     env->cc_op = CC_OP_FLAGS;
698     env->sr = (env->sr & 0xffe0)
699               | env->cc_dest | (env->cc_x << 4);
700 #elif defined(TARGET_MICROBLAZE)
701 #elif defined(TARGET_MIPS)
702 #elif defined(TARGET_SH4)
703 #elif defined(TARGET_ALPHA)
704 #elif defined(TARGET_CRIS)
705     /* XXXXX */
706 #else
707 #error unsupported target CPU
708 #endif
709 
710     /* restore global registers */
711 #include "hostregs_helper.h"
712 
713     /* fail safe : never use cpu_single_env outside cpu_exec() */
714     cpu_single_env = NULL;
715     return ret;
716 }
717 
718 /* must only be called from the generated code as an exception can be
719    generated */
tb_invalidate_page_range(target_ulong start,target_ulong end)720 void tb_invalidate_page_range(target_ulong start, target_ulong end)
721 {
722     /* XXX: cannot enable it yet because it yields to MMU exception
723        where NIP != read address on PowerPC */
724 #if 0
725     target_ulong phys_addr;
726     phys_addr = get_phys_addr_code(env, start);
727     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
728 #endif
729 }
730 
731 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
732 
cpu_x86_load_seg(CPUX86State * s,int seg_reg,int selector)733 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
734 {
735     CPUX86State *saved_env;
736 
737     saved_env = env;
738     env = s;
739     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
740         selector &= 0xffff;
741         cpu_x86_load_seg_cache(env, seg_reg, selector,
742                                (selector << 4), 0xffff, 0);
743     } else {
744         helper_load_seg(seg_reg, selector);
745     }
746     env = saved_env;
747 }
748 
cpu_x86_fsave(CPUX86State * s,target_ulong ptr,int data32)749 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
750 {
751     CPUX86State *saved_env;
752 
753     saved_env = env;
754     env = s;
755 
756     helper_fsave(ptr, data32);
757 
758     env = saved_env;
759 }
760 
cpu_x86_frstor(CPUX86State * s,target_ulong ptr,int data32)761 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
762 {
763     CPUX86State *saved_env;
764 
765     saved_env = env;
766     env = s;
767 
768     helper_frstor(ptr, data32);
769 
770     env = saved_env;
771 }
772 
773 #endif /* TARGET_I386 */
774 
775 #if !defined(CONFIG_SOFTMMU)
776 
777 #if defined(TARGET_I386)
778 
779 /* 'pc' is the host PC at which the exception was raised. 'address' is
780    the effective address of the memory exception. 'is_write' is 1 if a
781    write caused the exception and otherwise 0'. 'old_set' is the
782    signal set which should be restored */
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)783 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
784                                     int is_write, sigset_t *old_set,
785                                     void *puc)
786 {
787     TranslationBlock *tb;
788     int ret;
789 
790     if (cpu_single_env)
791         env = cpu_single_env; /* XXX: find a correct solution for multithread */
792 #if defined(DEBUG_SIGNAL)
793     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
794                 pc, address, is_write, *(unsigned long *)old_set);
795 #endif
796     /* XXX: locking issue */
797     if (is_write && page_unprotect(h2g(address), pc, puc)) {
798         return 1;
799     }
800 
801     /* see if it is an MMU fault */
802     ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
803     if (ret < 0)
804         return 0; /* not an MMU fault */
805     if (ret == 0)
806         return 1; /* the MMU fault was handled without causing real CPU fault */
807     /* now we have a real cpu fault */
808     tb = tb_find_pc(pc);
809     if (tb) {
810         /* the PC is inside the translated code. It means that we have
811            a virtual CPU fault */
812         cpu_restore_state(tb, env, pc, puc);
813     }
814     if (ret == 1) {
815 #if 0
816         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
817                env->eip, env->cr[2], env->error_code);
818 #endif
819         /* we restore the process signal mask as the sigreturn should
820            do it (XXX: use sigsetjmp) */
821         sigprocmask(SIG_SETMASK, old_set, NULL);
822         raise_exception_err(env->exception_index, env->error_code);
823     } else {
824         /* activate soft MMU for this block */
825         env->hflags |= HF_SOFTMMU_MASK;
826         cpu_resume_from_signal(env, puc);
827     }
828     /* never comes here */
829     return 1;
830 }
831 
832 #elif defined(TARGET_ARM)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)833 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
834                                     int is_write, sigset_t *old_set,
835                                     void *puc)
836 {
837     TranslationBlock *tb;
838     int ret;
839 
840     if (cpu_single_env)
841         env = cpu_single_env; /* XXX: find a correct solution for multithread */
842 #if defined(DEBUG_SIGNAL)
843     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
844            pc, address, is_write, *(unsigned long *)old_set);
845 #endif
846     /* XXX: locking issue */
847     if (is_write && page_unprotect(h2g(address), pc, puc)) {
848         return 1;
849     }
850     /* see if it is an MMU fault */
851     ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
852     if (ret < 0)
853         return 0; /* not an MMU fault */
854     if (ret == 0)
855         return 1; /* the MMU fault was handled without causing real CPU fault */
856     /* now we have a real cpu fault */
857     tb = tb_find_pc(pc);
858     if (tb) {
859         /* the PC is inside the translated code. It means that we have
860            a virtual CPU fault */
861         cpu_restore_state(tb, env, pc, puc);
862     }
863     /* we restore the process signal mask as the sigreturn should
864        do it (XXX: use sigsetjmp) */
865     sigprocmask(SIG_SETMASK, old_set, NULL);
866     cpu_loop_exit();
867     /* never comes here */
868     return 1;
869 }
870 #elif defined(TARGET_SPARC)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)871 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
872                                     int is_write, sigset_t *old_set,
873                                     void *puc)
874 {
875     TranslationBlock *tb;
876     int ret;
877 
878     if (cpu_single_env)
879         env = cpu_single_env; /* XXX: find a correct solution for multithread */
880 #if defined(DEBUG_SIGNAL)
881     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
882            pc, address, is_write, *(unsigned long *)old_set);
883 #endif
884     /* XXX: locking issue */
885     if (is_write && page_unprotect(h2g(address), pc, puc)) {
886         return 1;
887     }
888     /* see if it is an MMU fault */
889     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
890     if (ret < 0)
891         return 0; /* not an MMU fault */
892     if (ret == 0)
893         return 1; /* the MMU fault was handled without causing real CPU fault */
894     /* now we have a real cpu fault */
895     tb = tb_find_pc(pc);
896     if (tb) {
897         /* the PC is inside the translated code. It means that we have
898            a virtual CPU fault */
899         cpu_restore_state(tb, env, pc, puc);
900     }
901     /* we restore the process signal mask as the sigreturn should
902        do it (XXX: use sigsetjmp) */
903     sigprocmask(SIG_SETMASK, old_set, NULL);
904     cpu_loop_exit();
905     /* never comes here */
906     return 1;
907 }
908 #elif defined (TARGET_PPC)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)909 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
910                                     int is_write, sigset_t *old_set,
911                                     void *puc)
912 {
913     TranslationBlock *tb;
914     int ret;
915 
916     if (cpu_single_env)
917         env = cpu_single_env; /* XXX: find a correct solution for multithread */
918 #if defined(DEBUG_SIGNAL)
919     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
920            pc, address, is_write, *(unsigned long *)old_set);
921 #endif
922     /* XXX: locking issue */
923     if (is_write && page_unprotect(h2g(address), pc, puc)) {
924         return 1;
925     }
926 
927     /* see if it is an MMU fault */
928     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
929     if (ret < 0)
930         return 0; /* not an MMU fault */
931     if (ret == 0)
932         return 1; /* the MMU fault was handled without causing real CPU fault */
933 
934     /* now we have a real cpu fault */
935     tb = tb_find_pc(pc);
936     if (tb) {
937         /* the PC is inside the translated code. It means that we have
938            a virtual CPU fault */
939         cpu_restore_state(tb, env, pc, puc);
940     }
941     if (ret == 1) {
942 #if 0
943         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
944                env->nip, env->error_code, tb);
945 #endif
946     /* we restore the process signal mask as the sigreturn should
947        do it (XXX: use sigsetjmp) */
948         sigprocmask(SIG_SETMASK, old_set, NULL);
949         cpu_loop_exit();
950     } else {
951         /* activate soft MMU for this block */
952         cpu_resume_from_signal(env, puc);
953     }
954     /* never comes here */
955     return 1;
956 }
957 
958 #elif defined(TARGET_M68K)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)959 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
960                                     int is_write, sigset_t *old_set,
961                                     void *puc)
962 {
963     TranslationBlock *tb;
964     int ret;
965 
966     if (cpu_single_env)
967         env = cpu_single_env; /* XXX: find a correct solution for multithread */
968 #if defined(DEBUG_SIGNAL)
969     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
970            pc, address, is_write, *(unsigned long *)old_set);
971 #endif
972     /* XXX: locking issue */
973     if (is_write && page_unprotect(address, pc, puc)) {
974         return 1;
975     }
976     /* see if it is an MMU fault */
977     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
978     if (ret < 0)
979         return 0; /* not an MMU fault */
980     if (ret == 0)
981         return 1; /* the MMU fault was handled without causing real CPU fault */
982     /* now we have a real cpu fault */
983     tb = tb_find_pc(pc);
984     if (tb) {
985         /* the PC is inside the translated code. It means that we have
986            a virtual CPU fault */
987         cpu_restore_state(tb, env, pc, puc);
988     }
989     /* we restore the process signal mask as the sigreturn should
990        do it (XXX: use sigsetjmp) */
991     sigprocmask(SIG_SETMASK, old_set, NULL);
992     cpu_loop_exit();
993     /* never comes here */
994     return 1;
995 }
996 
997 #elif defined (TARGET_MIPS)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)998 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
999                                     int is_write, sigset_t *old_set,
1000                                     void *puc)
1001 {
1002     TranslationBlock *tb;
1003     int ret;
1004 
1005     if (cpu_single_env)
1006         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1007 #if defined(DEBUG_SIGNAL)
1008     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1009            pc, address, is_write, *(unsigned long *)old_set);
1010 #endif
1011     /* XXX: locking issue */
1012     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1013         return 1;
1014     }
1015 
1016     /* see if it is an MMU fault */
1017     ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1018     if (ret < 0)
1019         return 0; /* not an MMU fault */
1020     if (ret == 0)
1021         return 1; /* the MMU fault was handled without causing real CPU fault */
1022 
1023     /* now we have a real cpu fault */
1024     tb = tb_find_pc(pc);
1025     if (tb) {
1026         /* the PC is inside the translated code. It means that we have
1027            a virtual CPU fault */
1028         cpu_restore_state(tb, env, pc, puc);
1029     }
1030     if (ret == 1) {
1031 #if 0
1032         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1033                env->PC, env->error_code, tb);
1034 #endif
1035     /* we restore the process signal mask as the sigreturn should
1036        do it (XXX: use sigsetjmp) */
1037         sigprocmask(SIG_SETMASK, old_set, NULL);
1038         cpu_loop_exit();
1039     } else {
1040         /* activate soft MMU for this block */
1041         cpu_resume_from_signal(env, puc);
1042     }
1043     /* never comes here */
1044     return 1;
1045 }
1046 
1047 #elif defined (TARGET_MICROBLAZE)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1048 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1049                                     int is_write, sigset_t *old_set,
1050                                     void *puc)
1051 {
1052     TranslationBlock *tb;
1053     int ret;
1054 
1055     if (cpu_single_env)
1056         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1057 #if defined(DEBUG_SIGNAL)
1058     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1059            pc, address, is_write, *(unsigned long *)old_set);
1060 #endif
1061     /* XXX: locking issue */
1062     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1063         return 1;
1064     }
1065 
1066     /* see if it is an MMU fault */
1067     ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1068     if (ret < 0)
1069         return 0; /* not an MMU fault */
1070     if (ret == 0)
1071         return 1; /* the MMU fault was handled without causing real CPU fault */
1072 
1073     /* now we have a real cpu fault */
1074     tb = tb_find_pc(pc);
1075     if (tb) {
1076         /* the PC is inside the translated code. It means that we have
1077            a virtual CPU fault */
1078         cpu_restore_state(tb, env, pc, puc);
1079     }
1080     if (ret == 1) {
1081 #if 0
1082         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1083                env->PC, env->error_code, tb);
1084 #endif
1085     /* we restore the process signal mask as the sigreturn should
1086        do it (XXX: use sigsetjmp) */
1087         sigprocmask(SIG_SETMASK, old_set, NULL);
1088         cpu_loop_exit();
1089     } else {
1090         /* activate soft MMU for this block */
1091         cpu_resume_from_signal(env, puc);
1092     }
1093     /* never comes here */
1094     return 1;
1095 }
1096 
1097 #elif defined (TARGET_SH4)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1098 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1099                                     int is_write, sigset_t *old_set,
1100                                     void *puc)
1101 {
1102     TranslationBlock *tb;
1103     int ret;
1104 
1105     if (cpu_single_env)
1106         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1107 #if defined(DEBUG_SIGNAL)
1108     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1109            pc, address, is_write, *(unsigned long *)old_set);
1110 #endif
1111     /* XXX: locking issue */
1112     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1113         return 1;
1114     }
1115 
1116     /* see if it is an MMU fault */
1117     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1118     if (ret < 0)
1119         return 0; /* not an MMU fault */
1120     if (ret == 0)
1121         return 1; /* the MMU fault was handled without causing real CPU fault */
1122 
1123     /* now we have a real cpu fault */
1124     tb = tb_find_pc(pc);
1125     if (tb) {
1126         /* the PC is inside the translated code. It means that we have
1127            a virtual CPU fault */
1128         cpu_restore_state(tb, env, pc, puc);
1129     }
1130 #if 0
1131         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1132                env->nip, env->error_code, tb);
1133 #endif
1134     /* we restore the process signal mask as the sigreturn should
1135        do it (XXX: use sigsetjmp) */
1136     sigprocmask(SIG_SETMASK, old_set, NULL);
1137     cpu_loop_exit();
1138     /* never comes here */
1139     return 1;
1140 }
1141 
1142 #elif defined (TARGET_ALPHA)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1143 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1144                                     int is_write, sigset_t *old_set,
1145                                     void *puc)
1146 {
1147     TranslationBlock *tb;
1148     int ret;
1149 
1150     if (cpu_single_env)
1151         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1152 #if defined(DEBUG_SIGNAL)
1153     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1154            pc, address, is_write, *(unsigned long *)old_set);
1155 #endif
1156     /* XXX: locking issue */
1157     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1158         return 1;
1159     }
1160 
1161     /* see if it is an MMU fault */
1162     ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1163     if (ret < 0)
1164         return 0; /* not an MMU fault */
1165     if (ret == 0)
1166         return 1; /* the MMU fault was handled without causing real CPU fault */
1167 
1168     /* now we have a real cpu fault */
1169     tb = tb_find_pc(pc);
1170     if (tb) {
1171         /* the PC is inside the translated code. It means that we have
1172            a virtual CPU fault */
1173         cpu_restore_state(tb, env, pc, puc);
1174     }
1175 #if 0
1176         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1177                env->nip, env->error_code, tb);
1178 #endif
1179     /* we restore the process signal mask as the sigreturn should
1180        do it (XXX: use sigsetjmp) */
1181     sigprocmask(SIG_SETMASK, old_set, NULL);
1182     cpu_loop_exit();
1183     /* never comes here */
1184     return 1;
1185 }
1186 #elif defined (TARGET_CRIS)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1187 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1188                                     int is_write, sigset_t *old_set,
1189                                     void *puc)
1190 {
1191     TranslationBlock *tb;
1192     int ret;
1193 
1194     if (cpu_single_env)
1195         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1196 #if defined(DEBUG_SIGNAL)
1197     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1198            pc, address, is_write, *(unsigned long *)old_set);
1199 #endif
1200     /* XXX: locking issue */
1201     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1202         return 1;
1203     }
1204 
1205     /* see if it is an MMU fault */
1206     ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1207     if (ret < 0)
1208         return 0; /* not an MMU fault */
1209     if (ret == 0)
1210         return 1; /* the MMU fault was handled without causing real CPU fault */
1211 
1212     /* now we have a real cpu fault */
1213     tb = tb_find_pc(pc);
1214     if (tb) {
1215         /* the PC is inside the translated code. It means that we have
1216            a virtual CPU fault */
1217         cpu_restore_state(tb, env, pc, puc);
1218     }
1219     /* we restore the process signal mask as the sigreturn should
1220        do it (XXX: use sigsetjmp) */
1221     sigprocmask(SIG_SETMASK, old_set, NULL);
1222     cpu_loop_exit();
1223     /* never comes here */
1224     return 1;
1225 }
1226 
1227 #else
1228 #error unsupported target CPU
1229 #endif
1230 
1231 #if defined(__i386__)
1232 
1233 #if defined(__APPLE__)
1234 # include <sys/ucontext.h>
1235 
1236 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1237 # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1238 # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1239 # define MASK_sig(context)    ((context)->uc_sigmask)
1240 #elif defined(__OpenBSD__)
1241 # define EIP_sig(context)     ((context)->sc_eip)
1242 # define TRAP_sig(context)    ((context)->sc_trapno)
1243 # define ERROR_sig(context)   ((context)->sc_err)
1244 # define MASK_sig(context)    ((context)->sc_mask)
1245 #else
1246 # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1247 # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1248 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1249 # define MASK_sig(context)    ((context)->uc_sigmask)
1250 #endif
1251 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1252 int cpu_signal_handler(int host_signum, void *pinfo,
1253                        void *puc)
1254 {
1255     siginfo_t *info = pinfo;
1256 #if defined(__OpenBSD__)
1257     struct sigcontext *uc = puc;
1258 #else
1259     struct ucontext *uc = puc;
1260 #endif
1261     unsigned long pc;
1262     int trapno;
1263 
1264 #ifndef REG_EIP
1265 /* for glibc 2.1 */
1266 #define REG_EIP    EIP
1267 #define REG_ERR    ERR
1268 #define REG_TRAPNO TRAPNO
1269 #endif
1270     pc = EIP_sig(uc);
1271     trapno = TRAP_sig(uc);
1272     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1273                              trapno == 0xe ?
1274                              (ERROR_sig(uc) >> 1) & 1 : 0,
1275                              &MASK_sig(uc), puc);
1276 }
1277 
1278 #elif defined(__x86_64__)
1279 
1280 #ifdef __NetBSD__
1281 #define PC_sig(context)       _UC_MACHINE_PC(context)
1282 #define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1283 #define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
1284 #define MASK_sig(context)     ((context)->uc_sigmask)
1285 #elif defined(__OpenBSD__)
1286 #define PC_sig(context)       ((context)->sc_rip)
1287 #define TRAP_sig(context)     ((context)->sc_trapno)
1288 #define ERROR_sig(context)    ((context)->sc_err)
1289 #define MASK_sig(context)     ((context)->sc_mask)
1290 #else
1291 #define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
1292 #define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
1293 #define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
1294 #define MASK_sig(context)     ((context)->uc_sigmask)
1295 #endif
1296 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1297 int cpu_signal_handler(int host_signum, void *pinfo,
1298                        void *puc)
1299 {
1300     siginfo_t *info = pinfo;
1301     unsigned long pc;
1302 #ifdef __NetBSD__
1303     ucontext_t *uc = puc;
1304 #elif defined(__OpenBSD__)
1305     struct sigcontext *uc = puc;
1306 #else
1307     struct ucontext *uc = puc;
1308 #endif
1309 
1310     pc = PC_sig(uc);
1311     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1312                              TRAP_sig(uc) == 0xe ?
1313                              (ERROR_sig(uc) >> 1) & 1 : 0,
1314                              &MASK_sig(uc), puc);
1315 }
1316 
1317 #elif defined(_ARCH_PPC)
1318 
1319 /***********************************************************************
1320  * signal context platform-specific definitions
1321  * From Wine
1322  */
1323 #ifdef linux
1324 /* All Registers access - only for local access */
1325 # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
1326 /* Gpr Registers access  */
1327 # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
1328 # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
1329 # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1330 # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
1331 # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
1332 # define LR_sig(context)			REG_sig(link, context) /* Link register */
1333 # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
1334 /* Float Registers access  */
1335 # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1336 # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1337 /* Exception Registers access */
1338 # define DAR_sig(context)			REG_sig(dar, context)
1339 # define DSISR_sig(context)			REG_sig(dsisr, context)
1340 # define TRAP_sig(context)			REG_sig(trap, context)
1341 #endif /* linux */
1342 
1343 #ifdef __APPLE__
1344 # include <sys/ucontext.h>
1345 typedef struct ucontext SIGCONTEXT;
1346 /* All Registers access - only for local access */
1347 # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
1348 # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
1349 # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
1350 # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
1351 /* Gpr Registers access */
1352 # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
1353 # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
1354 # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1355 # define CTR_sig(context)			REG_sig(ctr, context)
1356 # define XER_sig(context)			REG_sig(xer, context) /* Link register */
1357 # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
1358 # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
1359 /* Float Registers access */
1360 # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
1361 # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
1362 /* Exception Registers access */
1363 # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1364 # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
1365 # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1366 #endif /* __APPLE__ */
1367 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1368 int cpu_signal_handler(int host_signum, void *pinfo,
1369                        void *puc)
1370 {
1371     siginfo_t *info = pinfo;
1372     struct ucontext *uc = puc;
1373     unsigned long pc;
1374     int is_write;
1375 
1376     pc = IAR_sig(uc);
1377     is_write = 0;
1378 #if 0
1379     /* ppc 4xx case */
1380     if (DSISR_sig(uc) & 0x00800000)
1381         is_write = 1;
1382 #else
1383     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1384         is_write = 1;
1385 #endif
1386     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1387                              is_write, &uc->uc_sigmask, puc);
1388 }
1389 
1390 #elif defined(__alpha__)
1391 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1392 int cpu_signal_handler(int host_signum, void *pinfo,
1393                            void *puc)
1394 {
1395     siginfo_t *info = pinfo;
1396     struct ucontext *uc = puc;
1397     uint32_t *pc = uc->uc_mcontext.sc_pc;
1398     uint32_t insn = *pc;
1399     int is_write = 0;
1400 
1401     /* XXX: need kernel patch to get write flag faster */
1402     switch (insn >> 26) {
1403     case 0x0d: // stw
1404     case 0x0e: // stb
1405     case 0x0f: // stq_u
1406     case 0x24: // stf
1407     case 0x25: // stg
1408     case 0x26: // sts
1409     case 0x27: // stt
1410     case 0x2c: // stl
1411     case 0x2d: // stq
1412     case 0x2e: // stl_c
1413     case 0x2f: // stq_c
1414 	is_write = 1;
1415     }
1416 
1417     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1418                              is_write, &uc->uc_sigmask, puc);
1419 }
1420 #elif defined(__sparc__)
1421 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1422 int cpu_signal_handler(int host_signum, void *pinfo,
1423                        void *puc)
1424 {
1425     siginfo_t *info = pinfo;
1426     int is_write;
1427     uint32_t insn;
1428 #if !defined(__arch64__) || defined(HOST_SOLARIS)
1429     uint32_t *regs = (uint32_t *)(info + 1);
1430     void *sigmask = (regs + 20);
1431     /* XXX: is there a standard glibc define ? */
1432     unsigned long pc = regs[1];
1433 #else
1434 #ifdef __linux__
1435     struct sigcontext *sc = puc;
1436     unsigned long pc = sc->sigc_regs.tpc;
1437     void *sigmask = (void *)sc->sigc_mask;
1438 #elif defined(__OpenBSD__)
1439     struct sigcontext *uc = puc;
1440     unsigned long pc = uc->sc_pc;
1441     void *sigmask = (void *)(long)uc->sc_mask;
1442 #endif
1443 #endif
1444 
1445     /* XXX: need kernel patch to get write flag faster */
1446     is_write = 0;
1447     insn = *(uint32_t *)pc;
1448     if ((insn >> 30) == 3) {
1449       switch((insn >> 19) & 0x3f) {
1450       case 0x05: // stb
1451       case 0x15: // stba
1452       case 0x06: // sth
1453       case 0x16: // stha
1454       case 0x04: // st
1455       case 0x14: // sta
1456       case 0x07: // std
1457       case 0x17: // stda
1458       case 0x0e: // stx
1459       case 0x1e: // stxa
1460       case 0x24: // stf
1461       case 0x34: // stfa
1462       case 0x27: // stdf
1463       case 0x37: // stdfa
1464       case 0x26: // stqf
1465       case 0x36: // stqfa
1466       case 0x25: // stfsr
1467       case 0x3c: // casa
1468       case 0x3e: // casxa
1469 	is_write = 1;
1470 	break;
1471       }
1472     }
1473     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1474                              is_write, sigmask, NULL);
1475 }
1476 
1477 #elif defined(__arm__)
1478 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1479 int cpu_signal_handler(int host_signum, void *pinfo,
1480                        void *puc)
1481 {
1482     siginfo_t *info = pinfo;
1483     struct ucontext *uc = puc;
1484     unsigned long pc;
1485     int is_write;
1486 
1487 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1488     pc = uc->uc_mcontext.gregs[R15];
1489 #else
1490     pc = uc->uc_mcontext.arm_pc;
1491 #endif
1492     /* XXX: compute is_write */
1493     is_write = 0;
1494     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1495                              is_write,
1496                              &uc->uc_sigmask, puc);
1497 }
1498 
1499 #elif defined(__mc68000)
1500 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1501 int cpu_signal_handler(int host_signum, void *pinfo,
1502                        void *puc)
1503 {
1504     siginfo_t *info = pinfo;
1505     struct ucontext *uc = puc;
1506     unsigned long pc;
1507     int is_write;
1508 
1509     pc = uc->uc_mcontext.gregs[16];
1510     /* XXX: compute is_write */
1511     is_write = 0;
1512     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1513                              is_write,
1514                              &uc->uc_sigmask, puc);
1515 }
1516 
1517 #elif defined(__ia64)
1518 
1519 #ifndef __ISR_VALID
1520   /* This ought to be in <bits/siginfo.h>... */
1521 # define __ISR_VALID	1
1522 #endif
1523 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1524 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1525 {
1526     siginfo_t *info = pinfo;
1527     struct ucontext *uc = puc;
1528     unsigned long ip;
1529     int is_write = 0;
1530 
1531     ip = uc->uc_mcontext.sc_ip;
1532     switch (host_signum) {
1533       case SIGILL:
1534       case SIGFPE:
1535       case SIGSEGV:
1536       case SIGBUS:
1537       case SIGTRAP:
1538 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
1539 	      /* ISR.W (write-access) is bit 33:  */
1540 	      is_write = (info->si_isr >> 33) & 1;
1541 	  break;
1542 
1543       default:
1544 	  break;
1545     }
1546     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1547                              is_write,
1548                              &uc->uc_sigmask, puc);
1549 }
1550 
1551 #elif defined(__s390__)
1552 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1553 int cpu_signal_handler(int host_signum, void *pinfo,
1554                        void *puc)
1555 {
1556     siginfo_t *info = pinfo;
1557     struct ucontext *uc = puc;
1558     unsigned long pc;
1559     int is_write;
1560 
1561     pc = uc->uc_mcontext.psw.addr;
1562     /* XXX: compute is_write */
1563     is_write = 0;
1564     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1565                              is_write, &uc->uc_sigmask, puc);
1566 }
1567 
1568 #elif defined(__mips__)
1569 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1570 int cpu_signal_handler(int host_signum, void *pinfo,
1571                        void *puc)
1572 {
1573     siginfo_t *info = pinfo;
1574     struct ucontext *uc = puc;
1575     greg_t pc = uc->uc_mcontext.pc;
1576     int is_write;
1577 
1578     /* XXX: compute is_write */
1579     is_write = 0;
1580     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1581                              is_write, &uc->uc_sigmask, puc);
1582 }
1583 
1584 #elif defined(__hppa__)
1585 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1586 int cpu_signal_handler(int host_signum, void *pinfo,
1587                        void *puc)
1588 {
1589     struct siginfo *info = pinfo;
1590     struct ucontext *uc = puc;
1591     unsigned long pc;
1592     int is_write;
1593 
1594     pc = uc->uc_mcontext.sc_iaoq[0];
1595     /* FIXME: compute is_write */
1596     is_write = 0;
1597     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1598                              is_write,
1599                              &uc->uc_sigmask, puc);
1600 }
1601 
1602 #else
1603 
1604 #error host CPU specific signal handler needed
1605 
1606 #endif
1607 
1608 #endif /* !defined(CONFIG_SOFTMMU) */
1609