• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  i386 emulator main execution loop
3  *
4  *  Copyright (c) 2003-2005 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #include "exec.h"
21 #include "disas.h"
22 #include "tcg.h"
23 #include "kvm.h"
24 
25 #if !defined(CONFIG_SOFTMMU)
26 #undef EAX
27 #undef ECX
28 #undef EDX
29 #undef EBX
30 #undef ESP
31 #undef EBP
32 #undef ESI
33 #undef EDI
34 #undef EIP
35 #include <signal.h>
36 #ifdef __linux__
37 #include <sys/ucontext.h>
38 #endif
39 #endif
40 
41 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
42 // Work around ugly bugs in glibc that mangle global register contents
43 #undef env
44 #define env cpu_single_env
45 #endif
46 
47 int tb_invalidated_flag;
48 
49 //#define CONFIG_DEBUG_EXEC
50 //#define DEBUG_SIGNAL
51 
qemu_cpu_has_work(CPUState * env)52 int qemu_cpu_has_work(CPUState *env)
53 {
54     return cpu_has_work(env);
55 }
56 
cpu_loop_exit(void)57 void cpu_loop_exit(void)
58 {
59     /* NOTE: the register at this point must be saved by hand because
60        longjmp restore them */
61     regs_to_env();
62     longjmp(env->jmp_env, 1);
63 }
64 
65 /* exit the current TB from a signal handler. The host registers are
66    restored in a state compatible with the CPU emulator
67  */
cpu_resume_from_signal(CPUState * env1,void * puc)68 void cpu_resume_from_signal(CPUState *env1, void *puc)
69 {
70 #if !defined(CONFIG_SOFTMMU)
71 #ifdef __linux__
72     struct ucontext *uc = puc;
73 #elif defined(__OpenBSD__)
74     struct sigcontext *uc = puc;
75 #endif
76 #endif
77 
78     env = env1;
79 
80     /* XXX: restore cpu registers saved in host registers */
81 
82 #if !defined(CONFIG_SOFTMMU)
83     if (puc) {
84         /* XXX: use siglongjmp ? */
85 #ifdef __linux__
86         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
87 #elif defined(__OpenBSD__)
88         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
89 #endif
90     }
91 #endif
92     env->exception_index = -1;
93     longjmp(env->jmp_env, 1);
94 }
95 
96 /* Execute the code without caching the generated code. An interpreter
97    could be used if available. */
cpu_exec_nocache(int max_cycles,TranslationBlock * orig_tb)98 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
99 {
100     unsigned long next_tb;
101     TranslationBlock *tb;
102 
103     /* Should never happen.
104        We only end up here when an existing TB is too long.  */
105     if (max_cycles > CF_COUNT_MASK)
106         max_cycles = CF_COUNT_MASK;
107 
108     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
109                      max_cycles);
110     env->current_tb = tb;
111     /* execute the generated code */
112     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
113 
114     if ((next_tb & 3) == 2) {
115         /* Restore PC.  This may happen if async event occurs before
116            the TB starts executing.  */
117         cpu_pc_from_tb(env, tb);
118     }
119     tb_phys_invalidate(tb, -1);
120     tb_free(tb);
121 }
122 
tb_find_slow(target_ulong pc,target_ulong cs_base,uint64_t flags)123 static TranslationBlock *tb_find_slow(target_ulong pc,
124                                       target_ulong cs_base,
125                                       uint64_t flags)
126 {
127     TranslationBlock *tb, **ptb1;
128     unsigned int h;
129     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
130 
131     tb_invalidated_flag = 0;
132 
133     regs_to_env(); /* XXX: do it just before cpu_gen_code() */
134 
135     /* find translated block using physical mappings */
136     phys_pc = get_phys_addr_code(env, pc);
137     phys_page1 = phys_pc & TARGET_PAGE_MASK;
138     phys_page2 = -1;
139     h = tb_phys_hash_func(phys_pc);
140     ptb1 = &tb_phys_hash[h];
141     for(;;) {
142         tb = *ptb1;
143         if (!tb)
144             goto not_found;
145         if (tb->pc == pc &&
146             tb->page_addr[0] == phys_page1 &&
147             tb->cs_base == cs_base &&
148             tb->flags == flags) {
149             /* check next page if needed */
150             if (tb->page_addr[1] != -1) {
151                 virt_page2 = (pc & TARGET_PAGE_MASK) +
152                     TARGET_PAGE_SIZE;
153                 phys_page2 = get_phys_addr_code(env, virt_page2);
154                 if (tb->page_addr[1] == phys_page2)
155                     goto found;
156             } else {
157                 goto found;
158             }
159         }
160         ptb1 = &tb->phys_hash_next;
161     }
162  not_found:
163    /* if no translated code available, then translate it now */
164     tb = tb_gen_code(env, pc, cs_base, flags, 0);
165 
166  found:
167     /* we add the TB in the virtual pc hash table */
168     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
169     return tb;
170 }
171 
tb_find_fast(void)172 static inline TranslationBlock *tb_find_fast(void)
173 {
174     TranslationBlock *tb;
175     target_ulong cs_base, pc;
176     int flags;
177 
178     /* we record a subset of the CPU state. It will
179        always be the same before a given translated block
180        is executed. */
181     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
182     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
183     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
184                  tb->flags != flags)) {
185         tb = tb_find_slow(pc, cs_base, flags);
186     }
187     return tb;
188 }
189 
190 static CPUDebugExcpHandler *debug_excp_handler;
191 
cpu_set_debug_excp_handler(CPUDebugExcpHandler * handler)192 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
193 {
194     CPUDebugExcpHandler *old_handler = debug_excp_handler;
195 
196     debug_excp_handler = handler;
197     return old_handler;
198 }
199 
cpu_handle_debug_exception(CPUState * env)200 static void cpu_handle_debug_exception(CPUState *env)
201 {
202     CPUWatchpoint *wp;
203 
204     if (!env->watchpoint_hit)
205         QTAILQ_FOREACH(wp, &env->watchpoints, entry)
206             wp->flags &= ~BP_WATCHPOINT_HIT;
207 
208     if (debug_excp_handler)
209         debug_excp_handler(env);
210 }
211 
212 /* main execution loop */
213 
cpu_exec(CPUState * env1)214 int cpu_exec(CPUState *env1)
215 {
216 #define DECLARE_HOST_REGS 1
217 #include "hostregs_helper.h"
218     int ret, interrupt_request;
219     TranslationBlock *tb;
220     uint8_t *tc_ptr;
221     unsigned long next_tb;
222 
223     if (cpu_halted(env1) == EXCP_HALTED)
224         return EXCP_HALTED;
225 
226     cpu_single_env = env1;
227 
228     /* first we save global registers */
229 #define SAVE_HOST_REGS 1
230 #include "hostregs_helper.h"
231     env = env1;
232 
233     env_to_regs();
234 #if defined(TARGET_I386)
235     /* put eflags in CPU temporary format */
236     CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
237     DF = 1 - (2 * ((env->eflags >> 10) & 1));
238     CC_OP = CC_OP_EFLAGS;
239     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
240 #elif defined(TARGET_SPARC)
241 #elif defined(TARGET_M68K)
242     env->cc_op = CC_OP_FLAGS;
243     env->cc_dest = env->sr & 0xf;
244     env->cc_x = (env->sr >> 4) & 1;
245 #elif defined(TARGET_ALPHA)
246 #elif defined(TARGET_ARM)
247 #elif defined(TARGET_PPC)
248 #elif defined(TARGET_MICROBLAZE)
249 #elif defined(TARGET_MIPS)
250 #elif defined(TARGET_SH4)
251 #elif defined(TARGET_CRIS)
252 #elif defined(TARGET_S390X)
253     /* XXXXX */
254 #else
255 #error unsupported target CPU
256 #endif
257     env->exception_index = -1;
258 
259     /* prepare setjmp context for exception handling */
260     for(;;) {
261         if (setjmp(env->jmp_env) == 0) {
262 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
263 #undef env
264                     env = cpu_single_env;
265 #define env cpu_single_env
266 #endif
267             env->current_tb = NULL;
268             /* if an exception is pending, we execute it here */
269             if (env->exception_index >= 0) {
270                 if (env->exception_index >= EXCP_INTERRUPT) {
271                     /* exit request from the cpu execution loop */
272                     ret = env->exception_index;
273                     if (ret == EXCP_DEBUG)
274                         cpu_handle_debug_exception(env);
275                     break;
276                 } else {
277 #if defined(CONFIG_USER_ONLY)
278                     /* if user mode only, we simulate a fake exception
279                        which will be handled outside the cpu execution
280                        loop */
281 #if defined(TARGET_I386)
282                     do_interrupt_user(env->exception_index,
283                                       env->exception_is_int,
284                                       env->error_code,
285                                       env->exception_next_eip);
286                     /* successfully delivered */
287                     env->old_exception = -1;
288 #endif
289                     ret = env->exception_index;
290                     break;
291 #else
292 #if defined(TARGET_I386)
293                     /* simulate a real cpu exception. On i386, it can
294                        trigger new exceptions, but we do not handle
295                        double or triple faults yet. */
296                     do_interrupt(env->exception_index,
297                                  env->exception_is_int,
298                                  env->error_code,
299                                  env->exception_next_eip, 0);
300                     /* successfully delivered */
301                     env->old_exception = -1;
302 #elif defined(TARGET_PPC)
303                     do_interrupt(env);
304 #elif defined(TARGET_MICROBLAZE)
305                     do_interrupt(env);
306 #elif defined(TARGET_MIPS)
307                     do_interrupt(env);
308 #elif defined(TARGET_SPARC)
309                     do_interrupt(env);
310 #elif defined(TARGET_ARM)
311                     do_interrupt(env);
312 #elif defined(TARGET_SH4)
313 		    do_interrupt(env);
314 #elif defined(TARGET_ALPHA)
315                     do_interrupt(env);
316 #elif defined(TARGET_CRIS)
317                     do_interrupt(env);
318 #elif defined(TARGET_M68K)
319                     do_interrupt(0);
320 #endif
321                     env->exception_index = -1;
322 #endif
323                 }
324             }
325 
326             if (kvm_enabled()) {
327                 kvm_cpu_exec(env);
328                 longjmp(env->jmp_env, 1);
329             }
330 
331             next_tb = 0; /* force lookup of first TB */
332             for(;;) {
333                 interrupt_request = env->interrupt_request;
334                 if (unlikely(interrupt_request)) {
335                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
336                         /* Mask out external interrupts for this step. */
337                         interrupt_request &= ~(CPU_INTERRUPT_HARD |
338                                                CPU_INTERRUPT_FIQ |
339                                                CPU_INTERRUPT_SMI |
340                                                CPU_INTERRUPT_NMI);
341                     }
342                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
343                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
344                         env->exception_index = EXCP_DEBUG;
345                         cpu_loop_exit();
346                     }
347 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
348     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
349     defined(TARGET_MICROBLAZE)
350                     if (interrupt_request & CPU_INTERRUPT_HALT) {
351                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
352                         env->halted = 1;
353                         env->exception_index = EXCP_HLT;
354                         cpu_loop_exit();
355                     }
356 #endif
357 #if defined(TARGET_I386)
358                     if (interrupt_request & CPU_INTERRUPT_INIT) {
359                             svm_check_intercept(SVM_EXIT_INIT);
360                             do_cpu_init(env);
361                             env->exception_index = EXCP_HALTED;
362                             cpu_loop_exit();
363                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
364                             do_cpu_sipi(env);
365                     } else if (env->hflags2 & HF2_GIF_MASK) {
366                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
367                             !(env->hflags & HF_SMM_MASK)) {
368                             svm_check_intercept(SVM_EXIT_SMI);
369                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
370                             do_smm_enter();
371                             next_tb = 0;
372                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
373                                    !(env->hflags2 & HF2_NMI_MASK)) {
374                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
375                             env->hflags2 |= HF2_NMI_MASK;
376                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
377                             next_tb = 0;
378 			} else if (interrupt_request & CPU_INTERRUPT_MCE) {
379                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
380                             do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
381                             next_tb = 0;
382                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
383                                    (((env->hflags2 & HF2_VINTR_MASK) &&
384                                      (env->hflags2 & HF2_HIF_MASK)) ||
385                                     (!(env->hflags2 & HF2_VINTR_MASK) &&
386                                      (env->eflags & IF_MASK &&
387                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
388                             int intno;
389                             svm_check_intercept(SVM_EXIT_INTR);
390                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
391                             intno = cpu_get_pic_interrupt(env);
392                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
393 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
394 #undef env
395                     env = cpu_single_env;
396 #define env cpu_single_env
397 #endif
398                             do_interrupt(intno, 0, 0, 0, 1);
399                             /* ensure that no TB jump will be modified as
400                                the program flow was changed */
401                             next_tb = 0;
402 #if !defined(CONFIG_USER_ONLY)
403                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
404                                    (env->eflags & IF_MASK) &&
405                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
406                             int intno;
407                             /* FIXME: this should respect TPR */
408                             svm_check_intercept(SVM_EXIT_VINTR);
409                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
410                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
411                             do_interrupt(intno, 0, 0, 0, 1);
412                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
413                             next_tb = 0;
414 #endif
415                         }
416                     }
417 #elif defined(TARGET_PPC)
418 #if 0
419                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
420                         cpu_reset(env);
421                     }
422 #endif
423                     if (interrupt_request & CPU_INTERRUPT_HARD) {
424                         ppc_hw_interrupt(env);
425                         if (env->pending_interrupts == 0)
426                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
427                         next_tb = 0;
428                     }
429 #elif defined(TARGET_MICROBLAZE)
430                     if ((interrupt_request & CPU_INTERRUPT_HARD)
431                         && (env->sregs[SR_MSR] & MSR_IE)
432                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
433                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
434                         env->exception_index = EXCP_IRQ;
435                         do_interrupt(env);
436                         next_tb = 0;
437                     }
438 #elif defined(TARGET_MIPS)
439                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
440                         (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask) &&
441                         (env->CP0_Status & (1 << CP0St_IE)) &&
442                         !(env->CP0_Status & (1 << CP0St_EXL)) &&
443                         !(env->CP0_Status & (1 << CP0St_ERL)) &&
444                         !(env->hflags & MIPS_HFLAG_DM)) {
445                         /* Raise it */
446                         env->exception_index = EXCP_EXT_INTERRUPT;
447                         env->error_code = 0;
448                         do_interrupt(env);
449                         next_tb = 0;
450                     }
451 #elif defined(TARGET_SPARC)
452                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
453 			(env->psret != 0)) {
454 			int pil = env->interrupt_index & 15;
455 			int type = env->interrupt_index & 0xf0;
456 
457 			if (((type == TT_EXTINT) &&
458 			     (pil == 15 || pil > env->psrpil)) ||
459 			    type != TT_EXTINT) {
460 			    env->interrupt_request &= ~CPU_INTERRUPT_HARD;
461                             env->exception_index = env->interrupt_index;
462                             do_interrupt(env);
463 			    env->interrupt_index = 0;
464 #if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
465                             cpu_check_irqs(env);
466 #endif
467                         next_tb = 0;
468 			}
469 		    } else if (interrupt_request & CPU_INTERRUPT_TIMER) {
470 			//do_interrupt(0, 0, 0, 0, 0);
471 			env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
472 		    }
473 #elif defined(TARGET_ARM)
474                     if (interrupt_request & CPU_INTERRUPT_FIQ
475                         && !(env->uncached_cpsr & CPSR_F)) {
476                         env->exception_index = EXCP_FIQ;
477                         do_interrupt(env);
478                         next_tb = 0;
479                     }
480                     /* ARMv7-M interrupt return works by loading a magic value
481                        into the PC.  On real hardware the load causes the
482                        return to occur.  The qemu implementation performs the
483                        jump normally, then does the exception return when the
484                        CPU tries to execute code at the magic address.
485                        This will cause the magic PC value to be pushed to
486                        the stack if an interrupt occured at the wrong time.
487                        We avoid this by disabling interrupts when
488                        pc contains a magic address.  */
489                     if (interrupt_request & CPU_INTERRUPT_HARD
490                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
491                             || !(env->uncached_cpsr & CPSR_I))) {
492                         env->exception_index = EXCP_IRQ;
493                         do_interrupt(env);
494                         next_tb = 0;
495                     }
496 #elif defined(TARGET_SH4)
497                     if (interrupt_request & CPU_INTERRUPT_HARD) {
498                         do_interrupt(env);
499                         next_tb = 0;
500                     }
501 #elif defined(TARGET_ALPHA)
502                     if (interrupt_request & CPU_INTERRUPT_HARD) {
503                         do_interrupt(env);
504                         next_tb = 0;
505                     }
506 #elif defined(TARGET_CRIS)
507                     if (interrupt_request & CPU_INTERRUPT_HARD
508                         && (env->pregs[PR_CCS] & I_FLAG)) {
509                         env->exception_index = EXCP_IRQ;
510                         do_interrupt(env);
511                         next_tb = 0;
512                     }
513                     if (interrupt_request & CPU_INTERRUPT_NMI
514                         && (env->pregs[PR_CCS] & M_FLAG)) {
515                         env->exception_index = EXCP_NMI;
516                         do_interrupt(env);
517                         next_tb = 0;
518                     }
519 #elif defined(TARGET_M68K)
520                     if (interrupt_request & CPU_INTERRUPT_HARD
521                         && ((env->sr & SR_I) >> SR_I_SHIFT)
522                             < env->pending_level) {
523                         /* Real hardware gets the interrupt vector via an
524                            IACK cycle at this point.  Current emulated
525                            hardware doesn't rely on this, so we
526                            provide/save the vector when the interrupt is
527                            first signalled.  */
528                         env->exception_index = env->pending_vector;
529                         do_interrupt(1);
530                         next_tb = 0;
531                     }
532 #endif
533                    /* Don't use the cached interupt_request value,
534                       do_interrupt may have updated the EXITTB flag. */
535                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
536                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
537                         /* ensure that no TB jump will be modified as
538                            the program flow was changed */
539                         next_tb = 0;
540                     }
541                 }
542                 if (unlikely(env->exit_request)) {
543                     env->exit_request = 0;
544                     env->exception_index = EXCP_INTERRUPT;
545                     cpu_loop_exit();
546                 }
547 #ifdef DEBUG_EXEC
548                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
549                     /* restore flags in standard format */
550                     regs_to_env();
551 #if defined(TARGET_I386)
552                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
553                     log_cpu_state(env, X86_DUMP_CCOP);
554                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
555 #elif defined(TARGET_ARM)
556                     log_cpu_state(env, 0);
557 #elif defined(TARGET_SPARC)
558                     log_cpu_state(env, 0);
559 #elif defined(TARGET_PPC)
560                     log_cpu_state(env, 0);
561 #elif defined(TARGET_M68K)
562                     cpu_m68k_flush_flags(env, env->cc_op);
563                     env->cc_op = CC_OP_FLAGS;
564                     env->sr = (env->sr & 0xffe0)
565                               | env->cc_dest | (env->cc_x << 4);
566                     log_cpu_state(env, 0);
567 #elif defined(TARGET_MICROBLAZE)
568                     log_cpu_state(env, 0);
569 #elif defined(TARGET_MIPS)
570                     log_cpu_state(env, 0);
571 #elif defined(TARGET_SH4)
572 		    log_cpu_state(env, 0);
573 #elif defined(TARGET_ALPHA)
574                     log_cpu_state(env, 0);
575 #elif defined(TARGET_CRIS)
576                     log_cpu_state(env, 0);
577 #else
578 #error unsupported target CPU
579 #endif
580                 }
581 #endif
582                 spin_lock(&tb_lock);
583                 tb = tb_find_fast();
584                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
585                    doing it in tb_find_slow */
586                 if (tb_invalidated_flag) {
587                     /* as some TB could have been invalidated because
588                        of memory exceptions while generating the code, we
589                        must recompute the hash index here */
590                     next_tb = 0;
591                     tb_invalidated_flag = 0;
592                 }
593 #ifdef DEBUG_EXEC
594                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
595                              (long)tb->tc_ptr, tb->pc,
596                              lookup_symbol(tb->pc));
597 #endif
598                 /* see if we can patch the calling TB. When the TB
599                    spans two pages, we cannot safely do a direct
600                    jump. */
601                 {
602                     if (next_tb != 0 &&
603 #ifdef CONFIG_KQEMU
604                         (env->kqemu_enabled != 2) &&
605 #endif
606                         tb->page_addr[1] == -1) {
607                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
608                 }
609                 }
610                 spin_unlock(&tb_lock);
611                 env->current_tb = tb;
612 
613                 /* cpu_interrupt might be called while translating the
614                    TB, but before it is linked into a potentially
615                    infinite loop and becomes env->current_tb. Avoid
616                    starting execution if there is a pending interrupt. */
617                 if (unlikely (env->exit_request))
618                     env->current_tb = NULL;
619 
620                 while (env->current_tb) {
621                     tc_ptr = tb->tc_ptr;
622                 /* execute the generated code */
623 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
624 #undef env
625                     env = cpu_single_env;
626 #define env cpu_single_env
627 #endif
628                     next_tb = tcg_qemu_tb_exec(tc_ptr);
629                     env->current_tb = NULL;
630                     if ((next_tb & 3) == 2) {
631                         /* Instruction counter expired.  */
632                         int insns_left;
633                         tb = (TranslationBlock *)(long)(next_tb & ~3);
634                         /* Restore PC.  */
635                         cpu_pc_from_tb(env, tb);
636                         insns_left = env->icount_decr.u32;
637                         if (env->icount_extra && insns_left >= 0) {
638                             /* Refill decrementer and continue execution.  */
639                             env->icount_extra += insns_left;
640                             if (env->icount_extra > 0xffff) {
641                                 insns_left = 0xffff;
642                             } else {
643                                 insns_left = env->icount_extra;
644                             }
645                             env->icount_extra -= insns_left;
646                             env->icount_decr.u16.low = insns_left;
647                         } else {
648                             if (insns_left > 0) {
649                                 /* Execute remaining instructions.  */
650                                 cpu_exec_nocache(insns_left, tb);
651                             }
652                             env->exception_index = EXCP_INTERRUPT;
653                             next_tb = 0;
654                             cpu_loop_exit();
655                         }
656                     }
657                 }
658                 /* reset soft MMU for next block (it can currently
659                    only be set by a memory fault) */
660 #if defined(CONFIG_KQEMU)
661 #define MIN_CYCLE_BEFORE_SWITCH (100 * 1000)
662                 if (kqemu_is_ok(env) &&
663                     (cpu_get_time_fast() - env->last_io_time) >= MIN_CYCLE_BEFORE_SWITCH) {
664                     cpu_loop_exit();
665                 }
666 #endif
667             } /* for(;;) */
668         } else {
669             env_to_regs();
670         }
671     } /* for(;;) */
672 
673 
674 #if defined(TARGET_I386)
675     /* restore flags in standard format */
676     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
677 #elif defined(TARGET_ARM)
678     /* XXX: Save/restore host fpu exception state?.  */
679 #elif defined(TARGET_SPARC)
680 #elif defined(TARGET_PPC)
681 #elif defined(TARGET_M68K)
682     cpu_m68k_flush_flags(env, env->cc_op);
683     env->cc_op = CC_OP_FLAGS;
684     env->sr = (env->sr & 0xffe0)
685               | env->cc_dest | (env->cc_x << 4);
686 #elif defined(TARGET_MICROBLAZE)
687 #elif defined(TARGET_MIPS)
688 #elif defined(TARGET_SH4)
689 #elif defined(TARGET_ALPHA)
690 #elif defined(TARGET_CRIS)
691     /* XXXXX */
692 #else
693 #error unsupported target CPU
694 #endif
695 
696     /* restore global registers */
697 #include "hostregs_helper.h"
698 
699     /* fail safe : never use cpu_single_env outside cpu_exec() */
700     cpu_single_env = NULL;
701     return ret;
702 }
703 
704 /* must only be called from the generated code as an exception can be
705    generated */
tb_invalidate_page_range(target_ulong start,target_ulong end)706 void tb_invalidate_page_range(target_ulong start, target_ulong end)
707 {
708     /* XXX: cannot enable it yet because it yields to MMU exception
709        where NIP != read address on PowerPC */
710 #if 0
711     target_ulong phys_addr;
712     phys_addr = get_phys_addr_code(env, start);
713     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
714 #endif
715 }
716 
717 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
718 
cpu_x86_load_seg(CPUX86State * s,int seg_reg,int selector)719 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
720 {
721     CPUX86State *saved_env;
722 
723     saved_env = env;
724     env = s;
725     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
726         selector &= 0xffff;
727         cpu_x86_load_seg_cache(env, seg_reg, selector,
728                                (selector << 4), 0xffff, 0);
729     } else {
730         helper_load_seg(seg_reg, selector);
731     }
732     env = saved_env;
733 }
734 
cpu_x86_fsave(CPUX86State * s,target_ulong ptr,int data32)735 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
736 {
737     CPUX86State *saved_env;
738 
739     saved_env = env;
740     env = s;
741 
742     helper_fsave(ptr, data32);
743 
744     env = saved_env;
745 }
746 
cpu_x86_frstor(CPUX86State * s,target_ulong ptr,int data32)747 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
748 {
749     CPUX86State *saved_env;
750 
751     saved_env = env;
752     env = s;
753 
754     helper_frstor(ptr, data32);
755 
756     env = saved_env;
757 }
758 
759 #endif /* TARGET_I386 */
760 
761 #if !defined(CONFIG_SOFTMMU)
762 
763 #if defined(TARGET_I386)
764 
765 /* 'pc' is the host PC at which the exception was raised. 'address' is
766    the effective address of the memory exception. 'is_write' is 1 if a
767    write caused the exception and otherwise 0'. 'old_set' is the
768    signal set which should be restored */
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)769 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
770                                     int is_write, sigset_t *old_set,
771                                     void *puc)
772 {
773     TranslationBlock *tb;
774     int ret;
775 
776     if (cpu_single_env)
777         env = cpu_single_env; /* XXX: find a correct solution for multithread */
778 #if defined(DEBUG_SIGNAL)
779     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
780                 pc, address, is_write, *(unsigned long *)old_set);
781 #endif
782     /* XXX: locking issue */
783     if (is_write && page_unprotect(h2g(address), pc, puc)) {
784         return 1;
785     }
786 
787     /* see if it is an MMU fault */
788     ret = cpu_x86_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
789     if (ret < 0)
790         return 0; /* not an MMU fault */
791     if (ret == 0)
792         return 1; /* the MMU fault was handled without causing real CPU fault */
793     /* now we have a real cpu fault */
794     tb = tb_find_pc(pc);
795     if (tb) {
796         /* the PC is inside the translated code. It means that we have
797            a virtual CPU fault */
798         cpu_restore_state(tb, env, pc, puc);
799     }
800     if (ret == 1) {
801 #if 0
802         printf("PF exception: EIP=0x%08x CR2=0x%08x error=0x%x\n",
803                env->eip, env->cr[2], env->error_code);
804 #endif
805         /* we restore the process signal mask as the sigreturn should
806            do it (XXX: use sigsetjmp) */
807         sigprocmask(SIG_SETMASK, old_set, NULL);
808         raise_exception_err(env->exception_index, env->error_code);
809     } else {
810         /* activate soft MMU for this block */
811         env->hflags |= HF_SOFTMMU_MASK;
812         cpu_resume_from_signal(env, puc);
813     }
814     /* never comes here */
815     return 1;
816 }
817 
818 #elif defined(TARGET_ARM)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)819 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
820                                     int is_write, sigset_t *old_set,
821                                     void *puc)
822 {
823     TranslationBlock *tb;
824     int ret;
825 
826     if (cpu_single_env)
827         env = cpu_single_env; /* XXX: find a correct solution for multithread */
828 #if defined(DEBUG_SIGNAL)
829     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
830            pc, address, is_write, *(unsigned long *)old_set);
831 #endif
832     /* XXX: locking issue */
833     if (is_write && page_unprotect(h2g(address), pc, puc)) {
834         return 1;
835     }
836     /* see if it is an MMU fault */
837     ret = cpu_arm_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
838     if (ret < 0)
839         return 0; /* not an MMU fault */
840     if (ret == 0)
841         return 1; /* the MMU fault was handled without causing real CPU fault */
842     /* now we have a real cpu fault */
843     tb = tb_find_pc(pc);
844     if (tb) {
845         /* the PC is inside the translated code. It means that we have
846            a virtual CPU fault */
847         cpu_restore_state(tb, env, pc, puc);
848     }
849     /* we restore the process signal mask as the sigreturn should
850        do it (XXX: use sigsetjmp) */
851     sigprocmask(SIG_SETMASK, old_set, NULL);
852     cpu_loop_exit();
853     /* never comes here */
854     return 1;
855 }
856 #elif defined(TARGET_SPARC)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)857 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
858                                     int is_write, sigset_t *old_set,
859                                     void *puc)
860 {
861     TranslationBlock *tb;
862     int ret;
863 
864     if (cpu_single_env)
865         env = cpu_single_env; /* XXX: find a correct solution for multithread */
866 #if defined(DEBUG_SIGNAL)
867     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
868            pc, address, is_write, *(unsigned long *)old_set);
869 #endif
870     /* XXX: locking issue */
871     if (is_write && page_unprotect(h2g(address), pc, puc)) {
872         return 1;
873     }
874     /* see if it is an MMU fault */
875     ret = cpu_sparc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
876     if (ret < 0)
877         return 0; /* not an MMU fault */
878     if (ret == 0)
879         return 1; /* the MMU fault was handled without causing real CPU fault */
880     /* now we have a real cpu fault */
881     tb = tb_find_pc(pc);
882     if (tb) {
883         /* the PC is inside the translated code. It means that we have
884            a virtual CPU fault */
885         cpu_restore_state(tb, env, pc, puc);
886     }
887     /* we restore the process signal mask as the sigreturn should
888        do it (XXX: use sigsetjmp) */
889     sigprocmask(SIG_SETMASK, old_set, NULL);
890     cpu_loop_exit();
891     /* never comes here */
892     return 1;
893 }
894 #elif defined (TARGET_PPC)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)895 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
896                                     int is_write, sigset_t *old_set,
897                                     void *puc)
898 {
899     TranslationBlock *tb;
900     int ret;
901 
902     if (cpu_single_env)
903         env = cpu_single_env; /* XXX: find a correct solution for multithread */
904 #if defined(DEBUG_SIGNAL)
905     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
906            pc, address, is_write, *(unsigned long *)old_set);
907 #endif
908     /* XXX: locking issue */
909     if (is_write && page_unprotect(h2g(address), pc, puc)) {
910         return 1;
911     }
912 
913     /* see if it is an MMU fault */
914     ret = cpu_ppc_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
915     if (ret < 0)
916         return 0; /* not an MMU fault */
917     if (ret == 0)
918         return 1; /* the MMU fault was handled without causing real CPU fault */
919 
920     /* now we have a real cpu fault */
921     tb = tb_find_pc(pc);
922     if (tb) {
923         /* the PC is inside the translated code. It means that we have
924            a virtual CPU fault */
925         cpu_restore_state(tb, env, pc, puc);
926     }
927     if (ret == 1) {
928 #if 0
929         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
930                env->nip, env->error_code, tb);
931 #endif
932     /* we restore the process signal mask as the sigreturn should
933        do it (XXX: use sigsetjmp) */
934         sigprocmask(SIG_SETMASK, old_set, NULL);
935         cpu_loop_exit();
936     } else {
937         /* activate soft MMU for this block */
938         cpu_resume_from_signal(env, puc);
939     }
940     /* never comes here */
941     return 1;
942 }
943 
944 #elif defined(TARGET_M68K)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)945 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
946                                     int is_write, sigset_t *old_set,
947                                     void *puc)
948 {
949     TranslationBlock *tb;
950     int ret;
951 
952     if (cpu_single_env)
953         env = cpu_single_env; /* XXX: find a correct solution for multithread */
954 #if defined(DEBUG_SIGNAL)
955     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
956            pc, address, is_write, *(unsigned long *)old_set);
957 #endif
958     /* XXX: locking issue */
959     if (is_write && page_unprotect(address, pc, puc)) {
960         return 1;
961     }
962     /* see if it is an MMU fault */
963     ret = cpu_m68k_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
964     if (ret < 0)
965         return 0; /* not an MMU fault */
966     if (ret == 0)
967         return 1; /* the MMU fault was handled without causing real CPU fault */
968     /* now we have a real cpu fault */
969     tb = tb_find_pc(pc);
970     if (tb) {
971         /* the PC is inside the translated code. It means that we have
972            a virtual CPU fault */
973         cpu_restore_state(tb, env, pc, puc);
974     }
975     /* we restore the process signal mask as the sigreturn should
976        do it (XXX: use sigsetjmp) */
977     sigprocmask(SIG_SETMASK, old_set, NULL);
978     cpu_loop_exit();
979     /* never comes here */
980     return 1;
981 }
982 
983 #elif defined (TARGET_MIPS)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)984 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
985                                     int is_write, sigset_t *old_set,
986                                     void *puc)
987 {
988     TranslationBlock *tb;
989     int ret;
990 
991     if (cpu_single_env)
992         env = cpu_single_env; /* XXX: find a correct solution for multithread */
993 #if defined(DEBUG_SIGNAL)
994     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
995            pc, address, is_write, *(unsigned long *)old_set);
996 #endif
997     /* XXX: locking issue */
998     if (is_write && page_unprotect(h2g(address), pc, puc)) {
999         return 1;
1000     }
1001 
1002     /* see if it is an MMU fault */
1003     ret = cpu_mips_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1004     if (ret < 0)
1005         return 0; /* not an MMU fault */
1006     if (ret == 0)
1007         return 1; /* the MMU fault was handled without causing real CPU fault */
1008 
1009     /* now we have a real cpu fault */
1010     tb = tb_find_pc(pc);
1011     if (tb) {
1012         /* the PC is inside the translated code. It means that we have
1013            a virtual CPU fault */
1014         cpu_restore_state(tb, env, pc, puc);
1015     }
1016     if (ret == 1) {
1017 #if 0
1018         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1019                env->PC, env->error_code, tb);
1020 #endif
1021     /* we restore the process signal mask as the sigreturn should
1022        do it (XXX: use sigsetjmp) */
1023         sigprocmask(SIG_SETMASK, old_set, NULL);
1024         cpu_loop_exit();
1025     } else {
1026         /* activate soft MMU for this block */
1027         cpu_resume_from_signal(env, puc);
1028     }
1029     /* never comes here */
1030     return 1;
1031 }
1032 
1033 #elif defined (TARGET_MICROBLAZE)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1034 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1035                                     int is_write, sigset_t *old_set,
1036                                     void *puc)
1037 {
1038     TranslationBlock *tb;
1039     int ret;
1040 
1041     if (cpu_single_env)
1042         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1043 #if defined(DEBUG_SIGNAL)
1044     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1045            pc, address, is_write, *(unsigned long *)old_set);
1046 #endif
1047     /* XXX: locking issue */
1048     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1049         return 1;
1050     }
1051 
1052     /* see if it is an MMU fault */
1053     ret = cpu_mb_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1054     if (ret < 0)
1055         return 0; /* not an MMU fault */
1056     if (ret == 0)
1057         return 1; /* the MMU fault was handled without causing real CPU fault */
1058 
1059     /* now we have a real cpu fault */
1060     tb = tb_find_pc(pc);
1061     if (tb) {
1062         /* the PC is inside the translated code. It means that we have
1063            a virtual CPU fault */
1064         cpu_restore_state(tb, env, pc, puc);
1065     }
1066     if (ret == 1) {
1067 #if 0
1068         printf("PF exception: PC=0x" TARGET_FMT_lx " error=0x%x %p\n",
1069                env->PC, env->error_code, tb);
1070 #endif
1071     /* we restore the process signal mask as the sigreturn should
1072        do it (XXX: use sigsetjmp) */
1073         sigprocmask(SIG_SETMASK, old_set, NULL);
1074         cpu_loop_exit();
1075     } else {
1076         /* activate soft MMU for this block */
1077         cpu_resume_from_signal(env, puc);
1078     }
1079     /* never comes here */
1080     return 1;
1081 }
1082 
1083 #elif defined (TARGET_SH4)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1084 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1085                                     int is_write, sigset_t *old_set,
1086                                     void *puc)
1087 {
1088     TranslationBlock *tb;
1089     int ret;
1090 
1091     if (cpu_single_env)
1092         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1093 #if defined(DEBUG_SIGNAL)
1094     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1095            pc, address, is_write, *(unsigned long *)old_set);
1096 #endif
1097     /* XXX: locking issue */
1098     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1099         return 1;
1100     }
1101 
1102     /* see if it is an MMU fault */
1103     ret = cpu_sh4_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1104     if (ret < 0)
1105         return 0; /* not an MMU fault */
1106     if (ret == 0)
1107         return 1; /* the MMU fault was handled without causing real CPU fault */
1108 
1109     /* now we have a real cpu fault */
1110     tb = tb_find_pc(pc);
1111     if (tb) {
1112         /* the PC is inside the translated code. It means that we have
1113            a virtual CPU fault */
1114         cpu_restore_state(tb, env, pc, puc);
1115     }
1116 #if 0
1117         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1118                env->nip, env->error_code, tb);
1119 #endif
1120     /* we restore the process signal mask as the sigreturn should
1121        do it (XXX: use sigsetjmp) */
1122     sigprocmask(SIG_SETMASK, old_set, NULL);
1123     cpu_loop_exit();
1124     /* never comes here */
1125     return 1;
1126 }
1127 
1128 #elif defined (TARGET_ALPHA)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1129 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1130                                     int is_write, sigset_t *old_set,
1131                                     void *puc)
1132 {
1133     TranslationBlock *tb;
1134     int ret;
1135 
1136     if (cpu_single_env)
1137         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1138 #if defined(DEBUG_SIGNAL)
1139     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1140            pc, address, is_write, *(unsigned long *)old_set);
1141 #endif
1142     /* XXX: locking issue */
1143     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1144         return 1;
1145     }
1146 
1147     /* see if it is an MMU fault */
1148     ret = cpu_alpha_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1149     if (ret < 0)
1150         return 0; /* not an MMU fault */
1151     if (ret == 0)
1152         return 1; /* the MMU fault was handled without causing real CPU fault */
1153 
1154     /* now we have a real cpu fault */
1155     tb = tb_find_pc(pc);
1156     if (tb) {
1157         /* the PC is inside the translated code. It means that we have
1158            a virtual CPU fault */
1159         cpu_restore_state(tb, env, pc, puc);
1160     }
1161 #if 0
1162         printf("PF exception: NIP=0x%08x error=0x%x %p\n",
1163                env->nip, env->error_code, tb);
1164 #endif
1165     /* we restore the process signal mask as the sigreturn should
1166        do it (XXX: use sigsetjmp) */
1167     sigprocmask(SIG_SETMASK, old_set, NULL);
1168     cpu_loop_exit();
1169     /* never comes here */
1170     return 1;
1171 }
1172 #elif defined (TARGET_CRIS)
handle_cpu_signal(unsigned long pc,unsigned long address,int is_write,sigset_t * old_set,void * puc)1173 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
1174                                     int is_write, sigset_t *old_set,
1175                                     void *puc)
1176 {
1177     TranslationBlock *tb;
1178     int ret;
1179 
1180     if (cpu_single_env)
1181         env = cpu_single_env; /* XXX: find a correct solution for multithread */
1182 #if defined(DEBUG_SIGNAL)
1183     printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
1184            pc, address, is_write, *(unsigned long *)old_set);
1185 #endif
1186     /* XXX: locking issue */
1187     if (is_write && page_unprotect(h2g(address), pc, puc)) {
1188         return 1;
1189     }
1190 
1191     /* see if it is an MMU fault */
1192     ret = cpu_cris_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
1193     if (ret < 0)
1194         return 0; /* not an MMU fault */
1195     if (ret == 0)
1196         return 1; /* the MMU fault was handled without causing real CPU fault */
1197 
1198     /* now we have a real cpu fault */
1199     tb = tb_find_pc(pc);
1200     if (tb) {
1201         /* the PC is inside the translated code. It means that we have
1202            a virtual CPU fault */
1203         cpu_restore_state(tb, env, pc, puc);
1204     }
1205     /* we restore the process signal mask as the sigreturn should
1206        do it (XXX: use sigsetjmp) */
1207     sigprocmask(SIG_SETMASK, old_set, NULL);
1208     cpu_loop_exit();
1209     /* never comes here */
1210     return 1;
1211 }
1212 
1213 #else
1214 #error unsupported target CPU
1215 #endif
1216 
1217 #if defined(__i386__)
1218 
1219 #if defined(__APPLE__)
1220 # include <sys/ucontext.h>
1221 
1222 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
1223 # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
1224 # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
1225 # define MASK_sig(context)    ((context)->uc_sigmask)
1226 #elif defined(__OpenBSD__)
1227 # define EIP_sig(context)     ((context)->sc_eip)
1228 # define TRAP_sig(context)    ((context)->sc_trapno)
1229 # define ERROR_sig(context)   ((context)->sc_err)
1230 # define MASK_sig(context)    ((context)->sc_mask)
1231 #else
1232 # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
1233 # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
1234 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
1235 # define MASK_sig(context)    ((context)->uc_sigmask)
1236 #endif
1237 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1238 int cpu_signal_handler(int host_signum, void *pinfo,
1239                        void *puc)
1240 {
1241     siginfo_t *info = pinfo;
1242 #if defined(__OpenBSD__)
1243     struct sigcontext *uc = puc;
1244 #else
1245     struct ucontext *uc = puc;
1246 #endif
1247     unsigned long pc;
1248     int trapno;
1249 
1250 #ifndef REG_EIP
1251 /* for glibc 2.1 */
1252 #define REG_EIP    EIP
1253 #define REG_ERR    ERR
1254 #define REG_TRAPNO TRAPNO
1255 #endif
1256     pc = EIP_sig(uc);
1257     trapno = TRAP_sig(uc);
1258     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1259                              trapno == 0xe ?
1260                              (ERROR_sig(uc) >> 1) & 1 : 0,
1261                              &MASK_sig(uc), puc);
1262 }
1263 
1264 #elif defined(__x86_64__)
1265 
1266 #ifdef __NetBSD__
1267 #define PC_sig(context)       _UC_MACHINE_PC(context)
1268 #define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
1269 #define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
1270 #define MASK_sig(context)     ((context)->uc_sigmask)
1271 #elif defined(__OpenBSD__)
1272 #define PC_sig(context)       ((context)->sc_rip)
1273 #define TRAP_sig(context)     ((context)->sc_trapno)
1274 #define ERROR_sig(context)    ((context)->sc_err)
1275 #define MASK_sig(context)     ((context)->sc_mask)
1276 #else
1277 #define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
1278 #define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
1279 #define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
1280 #define MASK_sig(context)     ((context)->uc_sigmask)
1281 #endif
1282 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1283 int cpu_signal_handler(int host_signum, void *pinfo,
1284                        void *puc)
1285 {
1286     siginfo_t *info = pinfo;
1287     unsigned long pc;
1288 #ifdef __NetBSD__
1289     ucontext_t *uc = puc;
1290 #elif defined(__OpenBSD__)
1291     struct sigcontext *uc = puc;
1292 #else
1293     struct ucontext *uc = puc;
1294 #endif
1295 
1296     pc = PC_sig(uc);
1297     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1298                              TRAP_sig(uc) == 0xe ?
1299                              (ERROR_sig(uc) >> 1) & 1 : 0,
1300                              &MASK_sig(uc), puc);
1301 }
1302 
1303 #elif defined(_ARCH_PPC)
1304 
1305 /***********************************************************************
1306  * signal context platform-specific definitions
1307  * From Wine
1308  */
1309 #ifdef linux
1310 /* All Registers access - only for local access */
1311 # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
1312 /* Gpr Registers access  */
1313 # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
1314 # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
1315 # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
1316 # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
1317 # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
1318 # define LR_sig(context)			REG_sig(link, context) /* Link register */
1319 # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
1320 /* Float Registers access  */
1321 # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
1322 # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
1323 /* Exception Registers access */
1324 # define DAR_sig(context)			REG_sig(dar, context)
1325 # define DSISR_sig(context)			REG_sig(dsisr, context)
1326 # define TRAP_sig(context)			REG_sig(trap, context)
1327 #endif /* linux */
1328 
1329 #ifdef __APPLE__
1330 # include <sys/ucontext.h>
1331 typedef struct ucontext SIGCONTEXT;
1332 /* All Registers access - only for local access */
1333 # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
1334 # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
1335 # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
1336 # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
1337 /* Gpr Registers access */
1338 # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
1339 # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
1340 # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
1341 # define CTR_sig(context)			REG_sig(ctr, context)
1342 # define XER_sig(context)			REG_sig(xer, context) /* Link register */
1343 # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
1344 # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
1345 /* Float Registers access */
1346 # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
1347 # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
1348 /* Exception Registers access */
1349 # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
1350 # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
1351 # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
1352 #endif /* __APPLE__ */
1353 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1354 int cpu_signal_handler(int host_signum, void *pinfo,
1355                        void *puc)
1356 {
1357     siginfo_t *info = pinfo;
1358     struct ucontext *uc = puc;
1359     unsigned long pc;
1360     int is_write;
1361 
1362     pc = IAR_sig(uc);
1363     is_write = 0;
1364 #if 0
1365     /* ppc 4xx case */
1366     if (DSISR_sig(uc) & 0x00800000)
1367         is_write = 1;
1368 #else
1369     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
1370         is_write = 1;
1371 #endif
1372     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1373                              is_write, &uc->uc_sigmask, puc);
1374 }
1375 
1376 #elif defined(__alpha__)
1377 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1378 int cpu_signal_handler(int host_signum, void *pinfo,
1379                            void *puc)
1380 {
1381     siginfo_t *info = pinfo;
1382     struct ucontext *uc = puc;
1383     uint32_t *pc = uc->uc_mcontext.sc_pc;
1384     uint32_t insn = *pc;
1385     int is_write = 0;
1386 
1387     /* XXX: need kernel patch to get write flag faster */
1388     switch (insn >> 26) {
1389     case 0x0d: // stw
1390     case 0x0e: // stb
1391     case 0x0f: // stq_u
1392     case 0x24: // stf
1393     case 0x25: // stg
1394     case 0x26: // sts
1395     case 0x27: // stt
1396     case 0x2c: // stl
1397     case 0x2d: // stq
1398     case 0x2e: // stl_c
1399     case 0x2f: // stq_c
1400 	is_write = 1;
1401     }
1402 
1403     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1404                              is_write, &uc->uc_sigmask, puc);
1405 }
1406 #elif defined(__sparc__)
1407 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1408 int cpu_signal_handler(int host_signum, void *pinfo,
1409                        void *puc)
1410 {
1411     siginfo_t *info = pinfo;
1412     int is_write;
1413     uint32_t insn;
1414 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
1415     uint32_t *regs = (uint32_t *)(info + 1);
1416     void *sigmask = (regs + 20);
1417     /* XXX: is there a standard glibc define ? */
1418     unsigned long pc = regs[1];
1419 #else
1420 #ifdef __linux__
1421     struct sigcontext *sc = puc;
1422     unsigned long pc = sc->sigc_regs.tpc;
1423     void *sigmask = (void *)sc->sigc_mask;
1424 #elif defined(__OpenBSD__)
1425     struct sigcontext *uc = puc;
1426     unsigned long pc = uc->sc_pc;
1427     void *sigmask = (void *)(long)uc->sc_mask;
1428 #endif
1429 #endif
1430 
1431     /* XXX: need kernel patch to get write flag faster */
1432     is_write = 0;
1433     insn = *(uint32_t *)pc;
1434     if ((insn >> 30) == 3) {
1435       switch((insn >> 19) & 0x3f) {
1436       case 0x05: // stb
1437       case 0x15: // stba
1438       case 0x06: // sth
1439       case 0x16: // stha
1440       case 0x04: // st
1441       case 0x14: // sta
1442       case 0x07: // std
1443       case 0x17: // stda
1444       case 0x0e: // stx
1445       case 0x1e: // stxa
1446       case 0x24: // stf
1447       case 0x34: // stfa
1448       case 0x27: // stdf
1449       case 0x37: // stdfa
1450       case 0x26: // stqf
1451       case 0x36: // stqfa
1452       case 0x25: // stfsr
1453       case 0x3c: // casa
1454       case 0x3e: // casxa
1455 	is_write = 1;
1456 	break;
1457       }
1458     }
1459     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1460                              is_write, sigmask, NULL);
1461 }
1462 
1463 #elif defined(__arm__)
1464 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1465 int cpu_signal_handler(int host_signum, void *pinfo,
1466                        void *puc)
1467 {
1468     siginfo_t *info = pinfo;
1469     struct ucontext *uc = puc;
1470     unsigned long pc;
1471     int is_write;
1472 
1473 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
1474     pc = uc->uc_mcontext.gregs[R15];
1475 #else
1476     pc = uc->uc_mcontext.arm_pc;
1477 #endif
1478     /* XXX: compute is_write */
1479     is_write = 0;
1480     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1481                              is_write,
1482                              &uc->uc_sigmask, puc);
1483 }
1484 
1485 #elif defined(__mc68000)
1486 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1487 int cpu_signal_handler(int host_signum, void *pinfo,
1488                        void *puc)
1489 {
1490     siginfo_t *info = pinfo;
1491     struct ucontext *uc = puc;
1492     unsigned long pc;
1493     int is_write;
1494 
1495     pc = uc->uc_mcontext.gregs[16];
1496     /* XXX: compute is_write */
1497     is_write = 0;
1498     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1499                              is_write,
1500                              &uc->uc_sigmask, puc);
1501 }
1502 
1503 #elif defined(__ia64)
1504 
1505 #ifndef __ISR_VALID
1506   /* This ought to be in <bits/siginfo.h>... */
1507 # define __ISR_VALID	1
1508 #endif
1509 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1510 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
1511 {
1512     siginfo_t *info = pinfo;
1513     struct ucontext *uc = puc;
1514     unsigned long ip;
1515     int is_write = 0;
1516 
1517     ip = uc->uc_mcontext.sc_ip;
1518     switch (host_signum) {
1519       case SIGILL:
1520       case SIGFPE:
1521       case SIGSEGV:
1522       case SIGBUS:
1523       case SIGTRAP:
1524 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
1525 	      /* ISR.W (write-access) is bit 33:  */
1526 	      is_write = (info->si_isr >> 33) & 1;
1527 	  break;
1528 
1529       default:
1530 	  break;
1531     }
1532     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
1533                              is_write,
1534                              &uc->uc_sigmask, puc);
1535 }
1536 
1537 #elif defined(__s390__)
1538 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1539 int cpu_signal_handler(int host_signum, void *pinfo,
1540                        void *puc)
1541 {
1542     siginfo_t *info = pinfo;
1543     struct ucontext *uc = puc;
1544     unsigned long pc;
1545     int is_write;
1546 
1547     pc = uc->uc_mcontext.psw.addr;
1548     /* XXX: compute is_write */
1549     is_write = 0;
1550     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1551                              is_write, &uc->uc_sigmask, puc);
1552 }
1553 
1554 #elif defined(__mips__)
1555 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1556 int cpu_signal_handler(int host_signum, void *pinfo,
1557                        void *puc)
1558 {
1559     siginfo_t *info = pinfo;
1560     struct ucontext *uc = puc;
1561     greg_t pc = uc->uc_mcontext.pc;
1562     int is_write;
1563 
1564     /* XXX: compute is_write */
1565     is_write = 0;
1566     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1567                              is_write, &uc->uc_sigmask, puc);
1568 }
1569 
1570 #elif defined(__hppa__)
1571 
cpu_signal_handler(int host_signum,void * pinfo,void * puc)1572 int cpu_signal_handler(int host_signum, void *pinfo,
1573                        void *puc)
1574 {
1575     struct siginfo *info = pinfo;
1576     struct ucontext *uc = puc;
1577     unsigned long pc;
1578     int is_write;
1579 
1580     pc = uc->uc_mcontext.sc_iaoq[0];
1581     /* FIXME: compute is_write */
1582     is_write = 0;
1583     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
1584                              is_write,
1585                              &uc->uc_sigmask, puc);
1586 }
1587 
1588 #else
1589 
1590 #error host CPU specific signal handler needed
1591 
1592 #endif
1593 
1594 #endif /* !defined(CONFIG_SOFTMMU) */
1595