• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Host code generation
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #ifdef _WIN32
20 #include <windows.h>
21 #else
22 #include <sys/types.h>
23 #include <sys/mman.h>
24 #endif
25 #include <stdarg.h>
26 #include <stdlib.h>
27 #include <stdio.h>
28 #include <string.h>
29 #include <inttypes.h>
30 
31 #include "config.h"
32 
33 #include "qemu-common.h"
34 #define NO_CPU_IO_DEFS
35 #include "cpu.h"
36 #include "exec/exec-all.h"
37 #include "disas/disas.h"
38 #include "tcg.h"
39 #include "exec/cputlb.h"
40 #include "translate-all.h"
41 #include "qemu/timer.h"
42 
43 //#define DEBUG_TB_INVALIDATE
44 //#define DEBUG_FLUSH
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 
48 #if !defined(CONFIG_USER_ONLY)
49 /* TB consistency checks only implemented for usermode emulation.  */
50 #undef DEBUG_TB_CHECK
51 #endif
52 
53 #define SMC_BITMAP_USE_THRESHOLD 10
54 
55 typedef struct PageDesc {
56     /* list of TBs intersecting this ram page */
57     TranslationBlock *first_tb;
58     /* in order to optimize self modifying code, we count the number
59        of lookups we do to a given page to use a bitmap */
60     unsigned int code_write_count;
61     uint8_t *code_bitmap;
62 #if defined(CONFIG_USER_ONLY)
63     unsigned long flags;
64 #endif
65 } PageDesc;
66 
67 /* In system mode we want L1_MAP to be based on ram offsets,
68    while in user mode we want it to be based on virtual addresses.  */
69 #if !defined(CONFIG_USER_ONLY)
70 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
71 # define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
72 #else
73 # define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
74 #endif
75 #else
76 # define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
77 #endif
78 
79 /* The bits remaining after N lower levels of page tables.  */
80 #define V_L1_BITS_REM \
81     ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
82 
83 #if V_L1_BITS_REM < 4
84 #define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
85 #else
86 #define V_L1_BITS  V_L1_BITS_REM
87 #endif
88 
89 #define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
90 
91 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
92 
93 uintptr_t qemu_real_host_page_size;
94 uintptr_t qemu_host_page_size;
95 uintptr_t qemu_host_page_mask;
96 
97 /* This is a multi-level map on the virtual address space.
98    The bottom level has pointers to PageDesc.  */
99 static void *l1_map[V_L1_SIZE];
100 static void* l1_phys_map[V_L1_SIZE];
101 
102 /* code generation context */
103 TCGContext tcg_ctx;
104 
105 /* XXX: suppress that */
code_gen_max_block_size(void)106 unsigned long code_gen_max_block_size(void)
107 {
108     static unsigned long max;
109 
110     if (max == 0) {
111         max = TCG_MAX_OP_SIZE;
112 #define DEF(name, iarg, oarg, carg, flags) DEF2((iarg) + (oarg) + (carg))
113 #define DEF2(copy_size) max = (copy_size > max) ? copy_size : max;
114 #include "tcg-opc.h"
115 #undef DEF
116 #undef DEF2
117         max *= OPC_MAX_SIZE;
118     }
119 
120     return max;
121 }
122 
123 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
124                          tb_page_addr_t phys_page2);
125 
cpu_gen_init(void)126 void cpu_gen_init(void)
127 {
128     tcg_context_init(&tcg_ctx);
129 }
130 
131 /* return non zero if the very first instruction is invalid so that
132    the virtual CPU can trigger an exception.
133 
134    '*gen_code_size_ptr' contains the size of the generated code (host
135    code).
136 */
cpu_gen_code(CPUArchState * env,TranslationBlock * tb,int * gen_code_size_ptr)137 int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr)
138 {
139     TCGContext *s = &tcg_ctx;
140     uint8_t *gen_code_buf;
141     int gen_code_size;
142 #ifdef CONFIG_PROFILER
143     int64_t ti;
144 #endif
145 
146 #ifdef CONFIG_PROFILER
147     s->tb_count1++; /* includes aborted translations because of
148                        exceptions */
149     ti = profile_getclock();
150 #endif
151     tcg_func_start(s);
152 
153     gen_intermediate_code(env, tb);
154 
155     /* generate machine code */
156     gen_code_buf = tb->tc_ptr;
157     tb->tb_next_offset[0] = 0xffff;
158     tb->tb_next_offset[1] = 0xffff;
159     s->tb_next_offset = tb->tb_next_offset;
160 #ifdef USE_DIRECT_JUMP
161     s->tb_jmp_offset = tb->tb_jmp_offset;
162     s->tb_next = NULL;
163     /* the following two entries are optional (only used for string ops) */
164     /* XXX: not used ? */
165     tb->tb_jmp_offset[2] = 0xffff;
166     tb->tb_jmp_offset[3] = 0xffff;
167 #else
168     s->tb_jmp_offset = NULL;
169     s->tb_next = tb->tb_next;
170 #endif
171 
172 #ifdef CONFIG_PROFILER
173     s->tb_count++;
174     s->interm_time += profile_getclock() - ti;
175     s->code_time -= profile_getclock();
176 #endif
177     gen_code_size = tcg_gen_code(s, gen_code_buf);
178     *gen_code_size_ptr = gen_code_size;
179 #ifdef CONFIG_PROFILER
180     s->code_time += profile_getclock();
181     s->code_in_len += tb->size;
182     s->code_out_len += gen_code_size;
183 #endif
184 
185 #ifdef DEBUG_DISAS
186     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) {
187         qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr);
188         log_disas(tb->tc_ptr, *gen_code_size_ptr);
189         qemu_log("\n");
190         qemu_log_flush();
191     }
192 #endif
193     return 0;
194 }
195 
196 /* The cpu state corresponding to 'searched_pc' is restored.
197  */
cpu_restore_state_from_tb(TranslationBlock * tb,CPUArchState * env,uintptr_t searched_pc)198 static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
199                                      uintptr_t searched_pc)
200 {
201     TCGContext *s = &tcg_ctx;
202     int j;
203     uintptr_t tc_ptr;
204 #ifdef CONFIG_PROFILER
205     int64_t ti;
206 #endif
207 
208 #ifdef CONFIG_PROFILER
209     ti = profile_getclock();
210 #endif
211     tcg_func_start(s);
212 
213     gen_intermediate_code_pc(env, tb);
214 
215     if (use_icount) {
216         /* Reset the cycle counter to the start of the block.  */
217         env->icount_decr.u16.low += tb->icount;
218         /* Clear the IO flag.  */
219         env->can_do_io = 0;
220     }
221 
222     /* find opc index corresponding to search_pc */
223     tc_ptr = (uintptr_t)tb->tc_ptr;
224     if (searched_pc < tc_ptr)
225         return -1;
226 
227     s->tb_next_offset = tb->tb_next_offset;
228 #ifdef USE_DIRECT_JUMP
229     s->tb_jmp_offset = tb->tb_jmp_offset;
230     s->tb_next = NULL;
231 #else
232     s->tb_jmp_offset = NULL;
233     s->tb_next = tb->tb_next;
234 #endif
235     j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr);
236     if (j < 0)
237         return -1;
238     /* now find start of instruction before */
239     while (s->gen_opc_instr_start[j] == 0) {
240         j--;
241     }
242     env->icount_decr.u16.low -= s->gen_opc_icount[j];
243 
244     restore_state_to_opc(env, tb, j);
245 
246 #ifdef CONFIG_PROFILER
247     s->restore_time += profile_getclock() - ti;
248     s->restore_count++;
249 #endif
250     return 0;
251 }
252 
cpu_restore_state(CPUArchState * env,uintptr_t retaddr)253 bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
254 {
255     TranslationBlock *tb;
256 
257     tb = tb_find_pc(retaddr);
258     if (tb) {
259         cpu_restore_state_from_tb(tb, env, retaddr);
260         return true;
261     }
262     return false;
263 }
264 
265 #ifdef _WIN32
map_exec(void * addr,long size)266 static inline void map_exec(void *addr, long size)
267 {
268     DWORD old_protect;
269     VirtualProtect(addr, size,
270                    PAGE_EXECUTE_READWRITE, &old_protect);
271 }
272 #else
map_exec(void * addr,long size)273 static inline void map_exec(void *addr, long size)
274 {
275     unsigned long start, end, page_size;
276 
277     page_size = getpagesize();
278     start = (unsigned long)addr;
279     start &= ~(page_size - 1);
280 
281     end = (unsigned long)addr + size;
282     end += page_size - 1;
283     end &= ~(page_size - 1);
284 
285     mprotect((void *)start, end - start,
286              PROT_READ | PROT_WRITE | PROT_EXEC);
287 }
288 #endif
289 
page_init(void)290 static void page_init(void)
291 {
292     /* NOTE: we can always suppose that qemu_host_page_size >=
293        TARGET_PAGE_SIZE */
294 #ifdef _WIN32
295     {
296         SYSTEM_INFO system_info;
297 
298         GetSystemInfo(&system_info);
299         qemu_real_host_page_size = system_info.dwPageSize;
300     }
301 #else
302     qemu_real_host_page_size = getpagesize();
303 #endif
304     if (qemu_host_page_size == 0) {
305         qemu_host_page_size = qemu_real_host_page_size;
306     }
307     if (qemu_host_page_size < TARGET_PAGE_SIZE) {
308         qemu_host_page_size = TARGET_PAGE_SIZE;
309     }
310     qemu_host_page_mask = ~(qemu_host_page_size - 1);
311 
312 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
313     {
314 #ifdef HAVE_KINFO_GETVMMAP
315         struct kinfo_vmentry *freep;
316         int i, cnt;
317 
318         freep = kinfo_getvmmap(getpid(), &cnt);
319         if (freep) {
320             mmap_lock();
321             for (i = 0; i < cnt; i++) {
322                 unsigned long startaddr, endaddr;
323 
324                 startaddr = freep[i].kve_start;
325                 endaddr = freep[i].kve_end;
326                 if (h2g_valid(startaddr)) {
327                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328 
329                     if (h2g_valid(endaddr)) {
330                         endaddr = h2g(endaddr);
331                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
332                     } else {
333 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
334                         endaddr = ~0ul;
335                         page_set_flags(startaddr, endaddr, PAGE_RESERVED);
336 #endif
337                     }
338                 }
339             }
340             free(freep);
341             mmap_unlock();
342         }
343 #else
344         FILE *f;
345 
346         last_brk = (unsigned long)sbrk(0);
347 
348         f = fopen("/compat/linux/proc/self/maps", "r");
349         if (f) {
350             mmap_lock();
351 
352             do {
353                 unsigned long startaddr, endaddr;
354                 int n;
355 
356                 n = fscanf(f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
357 
358                 if (n == 2 && h2g_valid(startaddr)) {
359                     startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
360 
361                     if (h2g_valid(endaddr)) {
362                         endaddr = h2g(endaddr);
363                     } else {
364                         endaddr = ~0ul;
365                     }
366                     page_set_flags(startaddr, endaddr, PAGE_RESERVED);
367                 }
368             } while (!feof(f));
369 
370             fclose(f);
371             mmap_unlock();
372         }
373 #endif
374     }
375 #endif
376 }
377 
page_find_alloc(tb_page_addr_t index,int alloc)378 static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
379 {
380     PageDesc *pd;
381     void **lp;
382     int i;
383 
384 #if defined(CONFIG_USER_ONLY)
385     /* We can't use g_malloc because it may recurse into a locked mutex. */
386 # define ALLOC(P, SIZE)                                 \
387     do {                                                \
388         P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
389                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
390     } while (0)
391 #else
392 # define ALLOC(P, SIZE) \
393     do { P = g_malloc0(SIZE); } while (0)
394 #endif
395 
396     /* Level 1.  Always allocated.  */
397     lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
398 
399     /* Level 2..N-1.  */
400     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
401         void **p = *lp;
402 
403         if (p == NULL) {
404             if (!alloc) {
405                 return NULL;
406             }
407             ALLOC(p, sizeof(void *) * L2_SIZE);
408             *lp = p;
409         }
410 
411         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
412     }
413 
414     pd = *lp;
415     if (pd == NULL) {
416         if (!alloc) {
417             return NULL;
418         }
419         ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
420         *lp = pd;
421     }
422 
423 #undef ALLOC
424 
425     return pd + (index & (L2_SIZE - 1));
426 }
427 
page_find(tb_page_addr_t index)428 static inline PageDesc *page_find(tb_page_addr_t index)
429 {
430     return page_find_alloc(index, 0);
431 }
432 
phys_page_find_alloc(hwaddr index,int alloc)433 PhysPageDesc *phys_page_find_alloc(hwaddr index, int alloc)
434 {
435     void **lp;
436     PhysPageDesc *pd;
437     int i;
438 
439     /* Level 1. Always allocated. */
440     lp = l1_phys_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
441 
442     /* Level 2..N-1 */
443     for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
444         void **p = *lp;
445 
446         if (p == NULL) {
447             if (!alloc) {
448                 return NULL;
449             }
450             p = g_malloc0(sizeof(void *) * L2_SIZE);
451             *lp = p;
452         }
453 
454         lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
455     }
456 
457     pd = *lp;
458     if (pd == NULL) {
459         if (!alloc) {
460             return NULL;
461         }
462         pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
463         *lp = pd;
464         for (i = 0; i < L2_SIZE; i++) {
465             pd[i].phys_offset = IO_MEM_UNASSIGNED;
466             pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
467         }
468     }
469     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
470 }
471 
phys_page_find(hwaddr index)472 PhysPageDesc *phys_page_find(hwaddr index)
473 {
474     return phys_page_find_alloc(index, 0);
475 }
476 
477 #if !defined(CONFIG_USER_ONLY)
478 #define mmap_lock() do { } while (0)
479 #define mmap_unlock() do { } while (0)
480 #endif
481 
482 #if defined(CONFIG_USER_ONLY)
483 /* Currently it is not recommended to allocate big chunks of data in
484    user mode. It will change when a dedicated libc will be used.  */
485 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
486    region in which the guest needs to run.  Revisit this.  */
487 #define USE_STATIC_CODE_GEN_BUFFER
488 #endif
489 
490 /* ??? Should configure for this, not list operating systems here.  */
491 #if (defined(__linux__) \
492     || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
493     || defined(__DragonFly__) || defined(__OpenBSD__) \
494     || defined(__NetBSD__))
495 # define USE_MMAP
496 #endif
497 
498 /* Minimum size of the code gen buffer.  This number is randomly chosen,
499    but not so small that we can't have a fair number of TB's live.  */
500 #define MIN_CODE_GEN_BUFFER_SIZE     (1024u * 1024)
501 
502 /* Maximum size of the code gen buffer we'd like to use.  Unless otherwise
503    indicated, this is constrained by the range of direct branches on the
504    host cpu, as used by the TCG implementation of goto_tb.  */
505 #if defined(__x86_64__)
506 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
507 #elif defined(__sparc__)
508 # define MAX_CODE_GEN_BUFFER_SIZE  (2ul * 1024 * 1024 * 1024)
509 #elif defined(__aarch64__)
510 # define MAX_CODE_GEN_BUFFER_SIZE  (128ul * 1024 * 1024)
511 #elif defined(__arm__)
512 # define MAX_CODE_GEN_BUFFER_SIZE  (16u * 1024 * 1024)
513 #elif defined(__s390x__)
514   /* We have a +- 4GB range on the branches; leave some slop.  */
515 # define MAX_CODE_GEN_BUFFER_SIZE  (3ul * 1024 * 1024 * 1024)
516 #else
517 # define MAX_CODE_GEN_BUFFER_SIZE  ((size_t)-1)
518 #endif
519 
520 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
521 
522 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
523   (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
524    ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
525 
size_code_gen_buffer(size_t tb_size)526 static inline size_t size_code_gen_buffer(size_t tb_size)
527 {
528     /* Size the buffer.  */
529     if (tb_size == 0) {
530 #ifdef USE_STATIC_CODE_GEN_BUFFER
531         tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
532 #else
533         /* ??? Needs adjustments.  */
534         /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
535            static buffer, we could size this on RESERVED_VA, on the text
536            segment size of the executable, or continue to use the default.  */
537         tb_size = (unsigned long)(ram_size / 4);
538 #endif
539     }
540     if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
541         tb_size = MIN_CODE_GEN_BUFFER_SIZE;
542     }
543     if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
544         tb_size = MAX_CODE_GEN_BUFFER_SIZE;
545     }
546     tcg_ctx.code_gen_buffer_size = tb_size;
547     return tb_size;
548 }
549 
550 #ifdef USE_STATIC_CODE_GEN_BUFFER
551 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
552     __attribute__((aligned(CODE_GEN_ALIGN)));
553 
alloc_code_gen_buffer(void)554 static inline void *alloc_code_gen_buffer(void)
555 {
556     map_exec(static_code_gen_buffer, tcg_ctx.code_gen_buffer_size);
557     return static_code_gen_buffer;
558 }
559 #elif defined(USE_MMAP)
alloc_code_gen_buffer(void)560 static inline void *alloc_code_gen_buffer(void)
561 {
562     int flags = MAP_PRIVATE | MAP_ANONYMOUS;
563     uintptr_t start = 0;
564     void *buf;
565 
566     /* Constrain the position of the buffer based on the host cpu.
567        Note that these addresses are chosen in concert with the
568        addresses assigned in the relevant linker script file.  */
569 # if defined(__PIE__) || defined(__PIC__)
570     /* Don't bother setting a preferred location if we're building
571        a position-independent executable.  We're more likely to get
572        an address near the main executable if we let the kernel
573        choose the address.  */
574 # elif defined(__x86_64__) && defined(MAP_32BIT)
575     /* Force the memory down into low memory with the executable.
576        Leave the choice of exact location with the kernel.  */
577     flags |= MAP_32BIT;
578     /* Cannot expect to map more than 800MB in low memory.  */
579     if (tcg_ctx.code_gen_buffer_size > 800u * 1024 * 1024) {
580         tcg_ctx.code_gen_buffer_size = 800u * 1024 * 1024;
581     }
582 # elif defined(__sparc__)
583     start = 0x40000000ul;
584 # elif defined(__s390x__)
585     start = 0x90000000ul;
586 # endif
587 
588     buf = mmap((void *)start, tcg_ctx.code_gen_buffer_size,
589                PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
590     return buf == MAP_FAILED ? NULL : buf;
591 }
592 #else
alloc_code_gen_buffer(void)593 static inline void *alloc_code_gen_buffer(void)
594 {
595     void *buf = g_malloc(tcg_ctx.code_gen_buffer_size);
596 
597     if (buf) {
598         map_exec(buf, tcg_ctx.code_gen_buffer_size);
599     }
600     return buf;
601 }
602 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
603 
code_gen_alloc(size_t tb_size)604 static inline void code_gen_alloc(size_t tb_size)
605 {
606     tcg_ctx.code_gen_buffer_size = size_code_gen_buffer(tb_size);
607     tcg_ctx.code_gen_buffer = alloc_code_gen_buffer();
608     if (tcg_ctx.code_gen_buffer == NULL) {
609         fprintf(stderr, "Could not allocate dynamic translator buffer\n");
610         exit(1);
611     }
612 
613     qemu_madvise(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size,
614             QEMU_MADV_HUGEPAGE);
615 
616     /* Steal room for the prologue at the end of the buffer.  This ensures
617        (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
618        from TB's to the prologue are going to be in range.  It also means
619        that we don't need to mark (additional) portions of the data segment
620        as executable.  */
621     tcg_ctx.code_gen_prologue = tcg_ctx.code_gen_buffer +
622             tcg_ctx.code_gen_buffer_size - 1024;
623     tcg_ctx.code_gen_buffer_size -= 1024;
624 
625     tcg_ctx.code_gen_buffer_max_size = tcg_ctx.code_gen_buffer_size -
626         (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
627     tcg_ctx.code_gen_max_blocks = tcg_ctx.code_gen_buffer_size /
628             CODE_GEN_AVG_BLOCK_SIZE;
629     tcg_ctx.tb_ctx.tbs =
630             g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock));
631 }
632 
633 /* Must be called before using the QEMU cpus. 'tb_size' is the size
634    (in bytes) allocated to the translation buffer. Zero means default
635    size. */
tcg_exec_init(unsigned long tb_size)636 void tcg_exec_init(unsigned long tb_size)
637 {
638     cpu_gen_init();
639     code_gen_alloc(tb_size);
640     tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
641     page_init();
642 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
643     /* There's no guest base to take into account, so go ahead and
644        initialize the prologue now.  */
645     tcg_prologue_init(&tcg_ctx);
646 #endif
647 }
648 
tcg_enabled(void)649 bool tcg_enabled(void)
650 {
651     return tcg_ctx.code_gen_buffer != NULL;
652 }
653 
654 /* Allocate a new translation block. Flush the translation buffer if
655    too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)656 static TranslationBlock *tb_alloc(target_ulong pc)
657 {
658     TranslationBlock *tb;
659 
660     if (tcg_ctx.tb_ctx.nb_tbs >= tcg_ctx.code_gen_max_blocks ||
661         (tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer) >=
662          tcg_ctx.code_gen_buffer_max_size) {
663         return NULL;
664     }
665     tb = &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs++];
666     tb->pc = pc;
667     tb->cflags = 0;
668     return tb;
669 }
670 
tb_free(TranslationBlock * tb)671 void tb_free(TranslationBlock *tb)
672 {
673     /* In practice this is mostly used for single use temporary TB
674        Ignore the hard cases and just back up if this TB happens to
675        be the last one generated.  */
676     if (tcg_ctx.tb_ctx.nb_tbs > 0 &&
677             tb == &tcg_ctx.tb_ctx.tbs[tcg_ctx.tb_ctx.nb_tbs - 1]) {
678         tcg_ctx.code_gen_ptr = tb->tc_ptr;
679         tcg_ctx.tb_ctx.nb_tbs--;
680     }
681 }
682 
invalidate_page_bitmap(PageDesc * p)683 static inline void invalidate_page_bitmap(PageDesc *p)
684 {
685     if (p->code_bitmap) {
686         g_free(p->code_bitmap);
687         p->code_bitmap = NULL;
688     }
689     p->code_write_count = 0;
690 }
691 
692 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
page_flush_tb_1(int level,void ** lp)693 static void page_flush_tb_1(int level, void **lp)
694 {
695     int i;
696 
697     if (*lp == NULL) {
698         return;
699     }
700     if (level == 0) {
701         PageDesc *pd = *lp;
702 
703         for (i = 0; i < L2_SIZE; ++i) {
704             pd[i].first_tb = NULL;
705             invalidate_page_bitmap(pd + i);
706         }
707     } else {
708         void **pp = *lp;
709 
710         for (i = 0; i < L2_SIZE; ++i) {
711             page_flush_tb_1(level - 1, pp + i);
712         }
713     }
714 }
715 
page_flush_tb(void)716 static void page_flush_tb(void)
717 {
718     int i;
719 
720     for (i = 0; i < V_L1_SIZE; i++) {
721         page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
722     }
723 }
724 
725 /* flush all the translation blocks */
726 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUArchState * env1)727 void tb_flush(CPUArchState *env1)
728 {
729     CPUState *cpu;
730 #if defined(DEBUG_FLUSH)
731     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
732            (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer),
733            tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.tb_ctx.nb_tbs > 0 ?
734            ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)) /
735            tcg_ctx.tb_ctx.nb_tbs : 0);
736 #endif
737     if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
738         > tcg_ctx.code_gen_buffer_size) {
739         cpu_abort(env1, "Internal error: code buffer overflow\n");
740     }
741     tcg_ctx.tb_ctx.nb_tbs = 0;
742 
743     CPU_FOREACH(cpu) {
744         CPUArchState *env = cpu->env_ptr;
745         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
746     }
747 
748     memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
749             CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
750     page_flush_tb();
751 
752     tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
753     /* XXX: flush processor icache at this point if cache flush is
754        expensive */
755     tcg_ctx.tb_ctx.tb_flush_count++;
756 }
757 
758 #ifdef DEBUG_TB_CHECK
759 
tb_invalidate_check(target_ulong address)760 static void tb_invalidate_check(target_ulong address)
761 {
762     TranslationBlock *tb;
763     int i;
764 
765     address &= TARGET_PAGE_MASK;
766     for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
767         for (tb = tb_ctx.tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
768             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
769                   address >= tb->pc + tb->size)) {
770                 printf("ERROR invalidate: address=" TARGET_FMT_lx
771                        " PC=%08lx size=%04x\n",
772                        address, (long)tb->pc, tb->size);
773             }
774         }
775     }
776 }
777 
778 /* verify that all the pages have correct rights for code */
tb_page_check(void)779 static void tb_page_check(void)
780 {
781     TranslationBlock *tb;
782     int i, flags1, flags2;
783 
784     for (i = 0; i < CODE_GEN_PHYS_HASH_SIZE; i++) {
785         for (tb = tcg_ctx.tb_ctx.tb_phys_hash[i]; tb != NULL;
786                 tb = tb->phys_hash_next) {
787             flags1 = page_get_flags(tb->pc);
788             flags2 = page_get_flags(tb->pc + tb->size - 1);
789             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
790                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
791                        (long)tb->pc, tb->size, flags1, flags2);
792             }
793         }
794     }
795 }
796 
797 #endif
798 
tb_hash_remove(TranslationBlock ** ptb,TranslationBlock * tb)799 static inline void tb_hash_remove(TranslationBlock **ptb, TranslationBlock *tb)
800 {
801     TranslationBlock *tb1;
802 
803     for (;;) {
804         tb1 = *ptb;
805         if (tb1 == tb) {
806             *ptb = tb1->phys_hash_next;
807             break;
808         }
809         ptb = &tb1->phys_hash_next;
810     }
811 }
812 
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)813 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
814 {
815     TranslationBlock *tb1;
816     unsigned int n1;
817 
818     for (;;) {
819         tb1 = *ptb;
820         n1 = (uintptr_t)tb1 & 3;
821         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
822         if (tb1 == tb) {
823             *ptb = tb1->page_next[n1];
824             break;
825         }
826         ptb = &tb1->page_next[n1];
827     }
828 }
829 
tb_jmp_remove(TranslationBlock * tb,int n)830 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
831 {
832     TranslationBlock *tb1, **ptb;
833     unsigned int n1;
834 
835     ptb = &tb->jmp_next[n];
836     tb1 = *ptb;
837     if (tb1) {
838         /* find tb(n) in circular list */
839         for (;;) {
840             tb1 = *ptb;
841             n1 = (uintptr_t)tb1 & 3;
842             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
843             if (n1 == n && tb1 == tb) {
844                 break;
845             }
846             if (n1 == 2) {
847                 ptb = &tb1->jmp_first;
848             } else {
849                 ptb = &tb1->jmp_next[n1];
850             }
851         }
852         /* now we can suppress tb(n) from the list */
853         *ptb = tb->jmp_next[n];
854 
855         tb->jmp_next[n] = NULL;
856     }
857 }
858 
859 /* reset the jump entry 'n' of a TB so that it is not chained to
860    another TB */
tb_reset_jump(TranslationBlock * tb,int n)861 static inline void tb_reset_jump(TranslationBlock *tb, int n)
862 {
863     tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
864 }
865 
866 /* invalidate one TB */
tb_phys_invalidate(TranslationBlock * tb,tb_page_addr_t page_addr)867 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
868 {
869     CPUState *cpu;
870     PageDesc *p;
871     unsigned int h, n1;
872     tb_page_addr_t phys_pc;
873     TranslationBlock *tb1, *tb2;
874 
875     /* remove the TB from the hash list */
876     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
877     h = tb_phys_hash_func(phys_pc);
878     tb_hash_remove(&tcg_ctx.tb_ctx.tb_phys_hash[h], tb);
879 
880     /* remove the TB from the page list */
881     if (tb->page_addr[0] != page_addr) {
882         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
883         tb_page_remove(&p->first_tb, tb);
884         invalidate_page_bitmap(p);
885     }
886     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
887         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
888         tb_page_remove(&p->first_tb, tb);
889         invalidate_page_bitmap(p);
890     }
891 
892     tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
893 
894     /* remove the TB from the hash list */
895     h = tb_jmp_cache_hash_func(tb->pc);
896     CPU_FOREACH(cpu) {
897         CPUArchState *env = cpu->env_ptr;
898         if (env->tb_jmp_cache[h] == tb) {
899             env->tb_jmp_cache[h] = NULL;
900         }
901     }
902 
903     /* suppress this TB from the two jump lists */
904     tb_jmp_remove(tb, 0);
905     tb_jmp_remove(tb, 1);
906 
907     /* suppress any remaining jumps to this TB */
908     tb1 = tb->jmp_first;
909     for (;;) {
910         n1 = (uintptr_t)tb1 & 3;
911         if (n1 == 2) {
912             break;
913         }
914         tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
915         tb2 = tb1->jmp_next[n1];
916         tb_reset_jump(tb1, n1);
917         tb1->jmp_next[n1] = NULL;
918         tb1 = tb2;
919     }
920     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
921 
922     tcg_ctx.tb_ctx.tb_phys_invalidate_count++;
923 }
924 
set_bits(uint8_t * tab,int start,int len)925 static inline void set_bits(uint8_t *tab, int start, int len)
926 {
927     int end, mask, end1;
928 
929     end = start + len;
930     tab += start >> 3;
931     mask = 0xff << (start & 7);
932     if ((start & ~7) == (end & ~7)) {
933         if (start < end) {
934             mask &= ~(0xff << (end & 7));
935             *tab |= mask;
936         }
937     } else {
938         *tab++ |= mask;
939         start = (start + 8) & ~7;
940         end1 = end & ~7;
941         while (start < end1) {
942             *tab++ = 0xff;
943             start += 8;
944         }
945         if (start < end) {
946             mask = ~(0xff << (end & 7));
947             *tab |= mask;
948         }
949     }
950 }
951 
build_page_bitmap(PageDesc * p)952 static void build_page_bitmap(PageDesc *p)
953 {
954     int n, tb_start, tb_end;
955     TranslationBlock *tb;
956 
957     p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
958 
959     tb = p->first_tb;
960     while (tb != NULL) {
961         n = (uintptr_t)tb & 3;
962         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
963         /* NOTE: this is subtle as a TB may span two physical pages */
964         if (n == 0) {
965             /* NOTE: tb_end may be after the end of the page, but
966                it is not a problem */
967             tb_start = tb->pc & ~TARGET_PAGE_MASK;
968             tb_end = tb_start + tb->size;
969             if (tb_end > TARGET_PAGE_SIZE) {
970                 tb_end = TARGET_PAGE_SIZE;
971             }
972         } else {
973             tb_start = 0;
974             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
975         }
976         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
977         tb = tb->page_next[n];
978     }
979 }
980 
tb_gen_code(CPUArchState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)981 TranslationBlock *tb_gen_code(CPUArchState *env,
982                               target_ulong pc, target_ulong cs_base,
983                               int flags, int cflags)
984 {
985     TranslationBlock *tb;
986     uint8_t *tc_ptr;
987     tb_page_addr_t phys_pc, phys_page2;
988     target_ulong virt_page2;
989     int code_gen_size;
990 
991     phys_pc = get_page_addr_code(env, pc);
992     tb = tb_alloc(pc);
993     if (!tb) {
994         /* flush must be done */
995         tb_flush(env);
996         /* cannot fail at this point */
997         tb = tb_alloc(pc);
998         /* Don't forget to invalidate previous TB info.  */
999         tcg_ctx.tb_ctx.tb_invalidated_flag = 1;
1000     }
1001     tc_ptr = tcg_ctx.code_gen_ptr;
1002     tb->tc_ptr = tc_ptr;
1003     tb->cs_base = cs_base;
1004     tb->flags = flags;
1005     tb->cflags = cflags;
1006     cpu_gen_code(env, tb, &code_gen_size);
1007     tcg_ctx.code_gen_ptr = (void *)(((uintptr_t)tcg_ctx.code_gen_ptr +
1008             code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1009 
1010     /* check next page if needed */
1011     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1012     phys_page2 = -1;
1013     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1014         phys_page2 = get_page_addr_code(env, virt_page2);
1015     }
1016     tb_link_page(tb, phys_pc, phys_page2);
1017     return tb;
1018 }
1019 
1020 /*
1021  * Invalidate all TBs which intersect with the target physical address range
1022  * [start;end[. NOTE: start and end may refer to *different* physical pages.
1023  * 'is_cpu_write_access' should be true if called from a real cpu write
1024  * access: the virtual CPU will exit the current TB if code is modified inside
1025  * this TB.
1026  */
tb_invalidate_phys_range(tb_page_addr_t start,tb_page_addr_t end,int is_cpu_write_access)1027 void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1028                               int is_cpu_write_access)
1029 {
1030     while (start < end) {
1031         tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1032         start &= TARGET_PAGE_MASK;
1033         start += TARGET_PAGE_SIZE;
1034     }
1035 }
1036 
1037 /*
1038  * Invalidate all TBs which intersect with the target physical address range
1039  * [start;end[. NOTE: start and end must refer to the *same* physical page.
1040  * 'is_cpu_write_access' should be true if called from a real cpu write
1041  * access: the virtual CPU will exit the current TB if code is modified inside
1042  * this TB.
1043  */
tb_invalidate_phys_page_range(tb_page_addr_t start,tb_page_addr_t end,int is_cpu_write_access)1044 void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1045                                    int is_cpu_write_access)
1046 {
1047     TranslationBlock *tb, *tb_next, *saved_tb;
1048     CPUState *cpu = current_cpu;
1049     CPUArchState *env = cpu ? cpu->env_ptr : NULL;
1050     tb_page_addr_t tb_start, tb_end;
1051     PageDesc *p;
1052     int n;
1053 #ifdef TARGET_HAS_PRECISE_SMC
1054     int current_tb_not_found = is_cpu_write_access;
1055     TranslationBlock *current_tb = NULL;
1056     int current_tb_modified = 0;
1057     target_ulong current_pc = 0;
1058     target_ulong current_cs_base = 0;
1059     int current_flags = 0;
1060 #endif /* TARGET_HAS_PRECISE_SMC */
1061 
1062     p = page_find(start >> TARGET_PAGE_BITS);
1063     if (!p) {
1064         return;
1065     }
1066     if (!p->code_bitmap &&
1067         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1068         is_cpu_write_access) {
1069         /* build code bitmap */
1070         build_page_bitmap(p);
1071     }
1072 
1073     /* we remove all the TBs in the range [start, end[ */
1074     /* XXX: see if in some cases it could be faster to invalidate all
1075        the code */
1076     tb = p->first_tb;
1077     while (tb != NULL) {
1078         n = (uintptr_t)tb & 3;
1079         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1080         tb_next = tb->page_next[n];
1081         /* NOTE: this is subtle as a TB may span two physical pages */
1082         if (n == 0) {
1083             /* NOTE: tb_end may be after the end of the page, but
1084                it is not a problem */
1085             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1086             tb_end = tb_start + tb->size;
1087         } else {
1088             tb_start = tb->page_addr[1];
1089             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1090         }
1091         if (!(tb_end <= start || tb_start >= end)) {
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093             if (current_tb_not_found) {
1094                 current_tb_not_found = 0;
1095                 current_tb = NULL;
1096                 if (env->mem_io_pc) {
1097                     /* now we have a real cpu fault */
1098                     current_tb = tb_find_pc(env->mem_io_pc);
1099                 }
1100             }
1101             if (current_tb == tb &&
1102                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1103                 /* If we are modifying the current TB, we must stop
1104                 its execution. We could be more precise by checking
1105                 that the modification is after the current PC, but it
1106                 would require a specialized function to partially
1107                 restore the CPU state */
1108 
1109                 current_tb_modified = 1;
1110                 cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
1111                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1112                                      &current_flags);
1113             }
1114 #endif /* TARGET_HAS_PRECISE_SMC */
1115             /* we need to do that to handle the case where a signal
1116                occurs while doing tb_phys_invalidate() */
1117             saved_tb = NULL;
1118             if (env) {
1119                 saved_tb = env->current_tb;
1120                 env->current_tb = NULL;
1121             }
1122             tb_phys_invalidate(tb, -1);
1123             if (env) {
1124                 env->current_tb = saved_tb;
1125                 if (cpu->interrupt_request && env->current_tb) {
1126                     cpu_interrupt(cpu, cpu->interrupt_request);
1127                 }
1128             }
1129         }
1130         tb = tb_next;
1131     }
1132 #if !defined(CONFIG_USER_ONLY)
1133     /* if no code remaining, no need to continue to use slow writes */
1134     if (!p->first_tb) {
1135         invalidate_page_bitmap(p);
1136         if (is_cpu_write_access) {
1137             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1138         }
1139     }
1140 #endif
1141 #ifdef TARGET_HAS_PRECISE_SMC
1142     if (current_tb_modified) {
1143         /* we generate a block containing just the instruction
1144            modifying the memory. It will ensure that it cannot modify
1145            itself */
1146         env->current_tb = NULL;
1147         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1148         cpu_resume_from_signal(env, NULL);
1149     }
1150 #endif
1151 }
1152 
1153 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(tb_page_addr_t start,int len)1154 void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1155 {
1156     PageDesc *p;
1157     int offset, b;
1158 
1159 #if 0
1160     if (1) {
1161         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1162                   cpu_single_env->mem_io_vaddr, len,
1163                   cpu_single_env->eip,
1164                   cpu_single_env->eip +
1165                   (intptr_t)cpu_single_env->segs[R_CS].base);
1166     }
1167 #endif
1168     p = page_find(start >> TARGET_PAGE_BITS);
1169     if (!p) {
1170         return;
1171     }
1172     if (p->code_bitmap) {
1173         offset = start & ~TARGET_PAGE_MASK;
1174         b = p->code_bitmap[offset >> 3] >> (offset & 7);
1175         if (b & ((1 << len) - 1)) {
1176             goto do_invalidate;
1177         }
1178     } else {
1179     do_invalidate:
1180         tb_invalidate_phys_page_range(start, start + len, 1);
1181     }
1182 }
1183 
tb_invalidate_phys_page_fast0(hwaddr start,int len)1184 void tb_invalidate_phys_page_fast0(hwaddr start, int len) {
1185     tb_invalidate_phys_page_fast(start, len);
1186 }
1187 
1188 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(tb_page_addr_t addr,uintptr_t pc,void * puc,bool locked)1189 static void tb_invalidate_phys_page(tb_page_addr_t addr,
1190                                     uintptr_t pc, void *puc,
1191                                     bool locked)
1192 {
1193     TranslationBlock *tb;
1194     PageDesc *p;
1195     int n;
1196 #ifdef TARGET_HAS_PRECISE_SMC
1197     TranslationBlock *current_tb = NULL;
1198     CPUArchState *env = cpu_single_env;
1199     int current_tb_modified = 0;
1200     target_ulong current_pc = 0;
1201     target_ulong current_cs_base = 0;
1202     int current_flags = 0;
1203 #endif
1204 
1205     addr &= TARGET_PAGE_MASK;
1206     p = page_find(addr >> TARGET_PAGE_BITS);
1207     if (!p) {
1208         return;
1209     }
1210     tb = p->first_tb;
1211 #ifdef TARGET_HAS_PRECISE_SMC
1212     if (tb && pc != 0) {
1213         current_tb = tb_find_pc(pc);
1214     }
1215 #endif
1216     while (tb != NULL) {
1217         n = (uintptr_t)tb & 3;
1218         tb = (TranslationBlock *)((uintptr_t)tb & ~3);
1219 #ifdef TARGET_HAS_PRECISE_SMC
1220         if (current_tb == tb &&
1221             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1222                 /* If we are modifying the current TB, we must stop
1223                    its execution. We could be more precise by checking
1224                    that the modification is after the current PC, but it
1225                    would require a specialized function to partially
1226                    restore the CPU state */
1227 
1228             current_tb_modified = 1;
1229             cpu_restore_state_from_tb(current_tb, env, pc);
1230             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1231                                  &current_flags);
1232         }
1233 #endif /* TARGET_HAS_PRECISE_SMC */
1234         tb_phys_invalidate(tb, addr);
1235         tb = tb->page_next[n];
1236     }
1237     p->first_tb = NULL;
1238 #ifdef TARGET_HAS_PRECISE_SMC
1239     if (current_tb_modified) {
1240         /* we generate a block containing just the instruction
1241            modifying the memory. It will ensure that it cannot modify
1242            itself */
1243         env->current_tb = NULL;
1244         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1245         if (locked) {
1246             mmap_unlock();
1247         }
1248         cpu_resume_from_signal(env, puc);
1249     }
1250 #endif
1251 }
1252 #endif
1253 
1254 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,tb_page_addr_t page_addr)1255 static inline void tb_alloc_page(TranslationBlock *tb,
1256                                  unsigned int n, tb_page_addr_t page_addr)
1257 {
1258     PageDesc *p;
1259 #ifndef CONFIG_USER_ONLY
1260     bool page_already_protected;
1261 #endif
1262 
1263     tb->page_addr[n] = page_addr;
1264     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1265     tb->page_next[n] = p->first_tb;
1266 #ifndef CONFIG_USER_ONLY
1267     page_already_protected = p->first_tb != NULL;
1268 #endif
1269     p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
1270     invalidate_page_bitmap(p);
1271 
1272 #if defined(TARGET_HAS_SMC) || 1
1273 
1274 #if defined(CONFIG_USER_ONLY)
1275     if (p->flags & PAGE_WRITE) {
1276         target_ulong addr;
1277         PageDesc *p2;
1278         int prot;
1279 
1280         /* force the host page as non writable (writes will have a
1281            page fault + mprotect overhead) */
1282         page_addr &= qemu_host_page_mask;
1283         prot = 0;
1284         for (addr = page_addr; addr < page_addr + qemu_host_page_size;
1285             addr += TARGET_PAGE_SIZE) {
1286 
1287             p2 = page_find(addr >> TARGET_PAGE_BITS);
1288             if (!p2) {
1289                 continue;
1290             }
1291             prot |= p2->flags;
1292             p2->flags &= ~PAGE_WRITE;
1293           }
1294         mprotect(g2h(page_addr), qemu_host_page_size,
1295                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1296 #ifdef DEBUG_TB_INVALIDATE
1297         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1298                page_addr);
1299 #endif
1300     }
1301 #else
1302     /* if some code is already present, then the pages are already
1303        protected. So we handle the case where only the first TB is
1304        allocated in a physical page */
1305     if (!page_already_protected) {
1306         tlb_protect_code(page_addr);
1307     }
1308 #endif
1309 
1310 #endif /* TARGET_HAS_SMC */
1311 }
1312 
1313 /* add a new TB and link it to the physical page tables. phys_page2 is
1314    (-1) to indicate that only one page contains the TB. */
tb_link_page(TranslationBlock * tb,tb_page_addr_t phys_pc,tb_page_addr_t phys_page2)1315 static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1316                          tb_page_addr_t phys_page2)
1317 {
1318     unsigned int h;
1319     TranslationBlock **ptb;
1320 
1321     /* Grab the mmap lock to stop another thread invalidating this TB
1322        before we are done.  */
1323     mmap_lock();
1324     /* add in the physical hash table */
1325     h = tb_phys_hash_func(phys_pc);
1326     ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h];
1327     tb->phys_hash_next = *ptb;
1328     *ptb = tb;
1329 
1330     /* add in the page list */
1331     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1332     if (phys_page2 != -1) {
1333         tb_alloc_page(tb, 1, phys_page2);
1334     } else {
1335         tb->page_addr[1] = -1;
1336     }
1337 
1338     tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
1339     tb->jmp_next[0] = NULL;
1340     tb->jmp_next[1] = NULL;
1341 
1342     /* init original jump addresses */
1343     if (tb->tb_next_offset[0] != 0xffff) {
1344         tb_reset_jump(tb, 0);
1345     }
1346     if (tb->tb_next_offset[1] != 0xffff) {
1347         tb_reset_jump(tb, 1);
1348     }
1349 
1350 #ifdef DEBUG_TB_CHECK
1351     tb_page_check();
1352 #endif
1353     mmap_unlock();
1354 }
1355 
1356 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1357    tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(uintptr_t tc_ptr)1358 TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
1359 {
1360     int m_min, m_max, m;
1361     uintptr_t v;
1362     TranslationBlock *tb;
1363 
1364     if (tcg_ctx.tb_ctx.nb_tbs <= 0) {
1365         return NULL;
1366     }
1367     if (tc_ptr < (uintptr_t)tcg_ctx.code_gen_buffer ||
1368         tc_ptr >= (uintptr_t)tcg_ctx.code_gen_ptr) {
1369         return NULL;
1370     }
1371     /* binary search (cf Knuth) */
1372     m_min = 0;
1373     m_max = tcg_ctx.tb_ctx.nb_tbs - 1;
1374     while (m_min <= m_max) {
1375         m = (m_min + m_max) >> 1;
1376         tb = &tcg_ctx.tb_ctx.tbs[m];
1377         v = (uintptr_t)tb->tc_ptr;
1378         if (v == tc_ptr) {
1379             return tb;
1380         } else if (tc_ptr < v) {
1381             m_max = m - 1;
1382         } else {
1383             m_min = m + 1;
1384         }
1385     }
1386     return &tcg_ctx.tb_ctx.tbs[m_max];
1387 }
1388 
1389 #ifndef CONFIG_ANDROID
1390 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_addr(hwaddr addr)1391 void tb_invalidate_phys_addr(hwaddr addr)
1392 {
1393     ram_addr_t ram_addr;
1394     MemoryRegion *mr;
1395     hwaddr l = 1;
1396 
1397     mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
1398     if (!(memory_region_is_ram(mr)
1399           || memory_region_is_romd(mr))) {
1400         return;
1401     }
1402     ram_addr = (memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK)
1403         + addr;
1404     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1405 }
1406 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
1407 
tb_check_watchpoint(CPUArchState * env)1408 void tb_check_watchpoint(CPUArchState *env)
1409 {
1410     TranslationBlock *tb;
1411 
1412     tb = tb_find_pc(env->mem_io_pc);
1413     if (!tb) {
1414         cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
1415                   (void *)env->mem_io_pc);
1416     }
1417     cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
1418     tb_phys_invalidate(tb, -1);
1419 }
1420 #endif  // !CONFIG_ANDROID
1421 
1422 #ifndef CONFIG_USER_ONLY
1423 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * cpu,int mask)1424 void cpu_interrupt(CPUState *cpu, int mask)
1425 {
1426     CPUArchState *env = cpu->env_ptr;
1427     int old_mask;
1428 
1429     old_mask = cpu->interrupt_request;
1430     cpu->interrupt_request |= mask;
1431 
1432     /*
1433      * If called from iothread context, wake the target cpu in
1434      * case its halted.
1435      */
1436     if (!qemu_cpu_is_self(cpu)) {
1437         qemu_cpu_kick(cpu);
1438         return;
1439     }
1440 
1441     if (use_icount) {
1442         env->icount_decr.u16.high = 0xffff;
1443         if (!can_do_io(env)
1444             && (mask & ~old_mask) != 0) {
1445             cpu_abort(env, "Raised interrupt while not in I/O function");
1446         }
1447     } else {
1448         // cpu->tcg_exit_req = 1;
1449         cpu_unlink_tb(env);
1450     }
1451 }
1452 
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1453 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1454 {
1455     TranslationBlock *tb1, *tb_next, **ptb;
1456     unsigned int n1;
1457 
1458     tb1 = tb->jmp_next[n];
1459     if (tb1 != NULL) {
1460         /* find head of list */
1461         for(;;) {
1462             n1 = (uintptr_t)tb1 & 3;
1463             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1464             if (n1 == 2)
1465                 break;
1466             tb1 = tb1->jmp_next[n1];
1467         }
1468         /* we are now sure now that tb jumps to tb1 */
1469         tb_next = tb1;
1470 
1471         /* remove tb from the jmp_first list */
1472         ptb = &tb_next->jmp_first;
1473         for(;;) {
1474             tb1 = *ptb;
1475             n1 = (uintptr_t)tb1 & 3;
1476             tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
1477             if (n1 == n && tb1 == tb)
1478                 break;
1479             ptb = &tb1->jmp_next[n1];
1480         }
1481         *ptb = tb->jmp_next[n];
1482         tb->jmp_next[n] = NULL;
1483 
1484         /* suppress the jump to next tb in generated code */
1485         tb_reset_jump(tb, n);
1486 
1487         /* suppress jumps in the tb on which we could have jumped */
1488         tb_reset_jump_recursive(tb_next);
1489     }
1490 }
1491 
tb_reset_jump_recursive(TranslationBlock * tb)1492 void tb_reset_jump_recursive(TranslationBlock *tb)
1493 {
1494     tb_reset_jump_recursive2(tb, 0);
1495     tb_reset_jump_recursive2(tb, 1);
1496 }
1497 
1498 /* in deterministic execution mode, instructions doing device I/Os
1499    must be at the end of the TB */
cpu_io_recompile(CPUArchState * env,uintptr_t retaddr)1500 void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
1501 {
1502     TranslationBlock *tb;
1503     uint32_t n, cflags;
1504     target_ulong pc, cs_base;
1505     uint64_t flags;
1506 
1507     tb = tb_find_pc(retaddr);
1508     if (!tb) {
1509         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
1510                   (void *)retaddr);
1511     }
1512     n = env->icount_decr.u16.low + tb->icount;
1513     cpu_restore_state_from_tb(tb, env, retaddr);
1514     /* Calculate how many instructions had been executed before the fault
1515        occurred.  */
1516     n = n - env->icount_decr.u16.low;
1517     /* Generate a new TB ending on the I/O insn.  */
1518     n++;
1519     /* On MIPS and SH, delay slot instructions can only be restarted if
1520        they were already the first instruction in the TB.  If this is not
1521        the first instruction in a TB then re-execute the preceding
1522        branch.  */
1523 #if defined(TARGET_MIPS)
1524     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
1525         env->active_tc.PC -= 4;
1526         env->icount_decr.u16.low++;
1527         env->hflags &= ~MIPS_HFLAG_BMASK;
1528     }
1529 #elif defined(TARGET_SH4)
1530     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
1531             && n > 1) {
1532         env->pc -= 2;
1533         env->icount_decr.u16.low++;
1534         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
1535     }
1536 #endif
1537     /* This should never happen.  */
1538     if (n > CF_COUNT_MASK) {
1539         cpu_abort(env, "TB too big during recompile");
1540     }
1541 
1542     cflags = n | CF_LAST_IO;
1543     pc = tb->pc;
1544     cs_base = tb->cs_base;
1545     flags = tb->flags;
1546     tb_phys_invalidate(tb, -1);
1547     /* FIXME: In theory this could raise an exception.  In practice
1548        we have already translated the block once so it's probably ok.  */
1549     tb_gen_code(env, pc, cs_base, flags, cflags);
1550     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
1551        the first in the TB) then we end up generating a whole new TB and
1552        repeating the fault, which is horribly inefficient.
1553        Better would be to execute just this insn uncached, or generate a
1554        second new TB.  */
1555     cpu_resume_from_signal(env, NULL);
1556 }
1557 
tb_flush_jmp_cache(CPUArchState * env,target_ulong addr)1558 void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
1559 {
1560     unsigned int i;
1561 
1562     /* Discard jump cache entries for any tb which might potentially
1563        overlap the flushed page.  */
1564     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1565     memset(&env->tb_jmp_cache[i], 0,
1566            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1567 
1568     i = tb_jmp_cache_hash_page(addr);
1569     memset(&env->tb_jmp_cache[i], 0,
1570            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1571 }
1572 
dump_exec_info(FILE * f,fprintf_function cpu_fprintf)1573 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
1574 {
1575     int i, target_code_size, max_target_code_size;
1576     int direct_jmp_count, direct_jmp2_count, cross_page;
1577     TranslationBlock *tb;
1578 
1579     target_code_size = 0;
1580     max_target_code_size = 0;
1581     cross_page = 0;
1582     direct_jmp_count = 0;
1583     direct_jmp2_count = 0;
1584     for (i = 0; i < tcg_ctx.tb_ctx.nb_tbs; i++) {
1585         tb = &tcg_ctx.tb_ctx.tbs[i];
1586         target_code_size += tb->size;
1587         if (tb->size > max_target_code_size) {
1588             max_target_code_size = tb->size;
1589         }
1590         if (tb->page_addr[1] != -1) {
1591             cross_page++;
1592         }
1593         if (tb->tb_next_offset[0] != 0xffff) {
1594             direct_jmp_count++;
1595             if (tb->tb_next_offset[1] != 0xffff) {
1596                 direct_jmp2_count++;
1597             }
1598         }
1599     }
1600     /* XXX: avoid using doubles ? */
1601     cpu_fprintf(f, "Translation buffer state:\n");
1602     cpu_fprintf(f, "gen code size       %td/%zd\n",
1603                 tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer,
1604                 tcg_ctx.code_gen_buffer_max_size);
1605     cpu_fprintf(f, "TB count            %d/%d\n",
1606             tcg_ctx.tb_ctx.nb_tbs, tcg_ctx.code_gen_max_blocks);
1607     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
1608             tcg_ctx.tb_ctx.nb_tbs ? target_code_size /
1609                     tcg_ctx.tb_ctx.nb_tbs : 0,
1610             max_target_code_size);
1611     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
1612             tcg_ctx.tb_ctx.nb_tbs ? (tcg_ctx.code_gen_ptr -
1613                                      tcg_ctx.code_gen_buffer) /
1614                                      tcg_ctx.tb_ctx.nb_tbs : 0,
1615                 target_code_size ? (double) (tcg_ctx.code_gen_ptr -
1616                                              tcg_ctx.code_gen_buffer) /
1617                                              target_code_size : 0);
1618     cpu_fprintf(f, "cross page TB count %d (%d%%)\n", cross_page,
1619             tcg_ctx.tb_ctx.nb_tbs ? (cross_page * 100) /
1620                                     tcg_ctx.tb_ctx.nb_tbs : 0);
1621     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
1622                 direct_jmp_count,
1623                 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp_count * 100) /
1624                         tcg_ctx.tb_ctx.nb_tbs : 0,
1625                 direct_jmp2_count,
1626                 tcg_ctx.tb_ctx.nb_tbs ? (direct_jmp2_count * 100) /
1627                         tcg_ctx.tb_ctx.nb_tbs : 0);
1628     cpu_fprintf(f, "\nStatistics:\n");
1629     cpu_fprintf(f, "TB flush count      %d\n", tcg_ctx.tb_ctx.tb_flush_count);
1630     cpu_fprintf(f, "TB invalidate count %d\n",
1631             tcg_ctx.tb_ctx.tb_phys_invalidate_count);
1632     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
1633     tcg_dump_info(f, cpu_fprintf);
1634 }
1635 
1636 #else /* CONFIG_USER_ONLY */
1637 
cpu_interrupt(CPUState * cpu,int mask)1638 void cpu_interrupt(CPUState *cpu, int mask)
1639 {
1640     cpu->interrupt_request |= mask;
1641     cpu->tcg_exit_req = 1;
1642 }
1643 
1644 /*
1645  * Walks guest process memory "regions" one by one
1646  * and calls callback function 'fn' for each region.
1647  */
1648 struct walk_memory_regions_data {
1649     walk_memory_regions_fn fn;
1650     void *priv;
1651     uintptr_t start;
1652     int prot;
1653 };
1654 
walk_memory_regions_end(struct walk_memory_regions_data * data,abi_ulong end,int new_prot)1655 static int walk_memory_regions_end(struct walk_memory_regions_data *data,
1656                                    abi_ulong end, int new_prot)
1657 {
1658     if (data->start != -1ul) {
1659         int rc = data->fn(data->priv, data->start, end, data->prot);
1660         if (rc != 0) {
1661             return rc;
1662         }
1663     }
1664 
1665     data->start = (new_prot ? end : -1ul);
1666     data->prot = new_prot;
1667 
1668     return 0;
1669 }
1670 
walk_memory_regions_1(struct walk_memory_regions_data * data,abi_ulong base,int level,void ** lp)1671 static int walk_memory_regions_1(struct walk_memory_regions_data *data,
1672                                  abi_ulong base, int level, void **lp)
1673 {
1674     abi_ulong pa;
1675     int i, rc;
1676 
1677     if (*lp == NULL) {
1678         return walk_memory_regions_end(data, base, 0);
1679     }
1680 
1681     if (level == 0) {
1682         PageDesc *pd = *lp;
1683 
1684         for (i = 0; i < L2_SIZE; ++i) {
1685             int prot = pd[i].flags;
1686 
1687             pa = base | (i << TARGET_PAGE_BITS);
1688             if (prot != data->prot) {
1689                 rc = walk_memory_regions_end(data, pa, prot);
1690                 if (rc != 0) {
1691                     return rc;
1692                 }
1693             }
1694         }
1695     } else {
1696         void **pp = *lp;
1697 
1698         for (i = 0; i < L2_SIZE; ++i) {
1699             pa = base | ((abi_ulong)i <<
1700                 (TARGET_PAGE_BITS + L2_BITS * level));
1701             rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1702             if (rc != 0) {
1703                 return rc;
1704             }
1705         }
1706     }
1707 
1708     return 0;
1709 }
1710 
walk_memory_regions(void * priv,walk_memory_regions_fn fn)1711 int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1712 {
1713     struct walk_memory_regions_data data;
1714     uintptr_t i;
1715 
1716     data.fn = fn;
1717     data.priv = priv;
1718     data.start = -1ul;
1719     data.prot = 0;
1720 
1721     for (i = 0; i < V_L1_SIZE; i++) {
1722         int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
1723                                        V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1724 
1725         if (rc != 0) {
1726             return rc;
1727         }
1728     }
1729 
1730     return walk_memory_regions_end(&data, 0, 0);
1731 }
1732 
dump_region(void * priv,abi_ulong start,abi_ulong end,unsigned long prot)1733 static int dump_region(void *priv, abi_ulong start,
1734     abi_ulong end, unsigned long prot)
1735 {
1736     FILE *f = (FILE *)priv;
1737 
1738     (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
1739         " "TARGET_ABI_FMT_lx" %c%c%c\n",
1740         start, end, end - start,
1741         ((prot & PAGE_READ) ? 'r' : '-'),
1742         ((prot & PAGE_WRITE) ? 'w' : '-'),
1743         ((prot & PAGE_EXEC) ? 'x' : '-'));
1744 
1745     return 0;
1746 }
1747 
1748 /* dump memory mappings */
page_dump(FILE * f)1749 void page_dump(FILE *f)
1750 {
1751     const int length = sizeof(abi_ulong) * 2;
1752     (void) fprintf(f, "%-*s %-*s %-*s %s\n",
1753             length, "start", length, "end", length, "size", "prot");
1754     walk_memory_regions(f, dump_region);
1755 }
1756 
page_get_flags(target_ulong address)1757 int page_get_flags(target_ulong address)
1758 {
1759     PageDesc *p;
1760 
1761     p = page_find(address >> TARGET_PAGE_BITS);
1762     if (!p) {
1763         return 0;
1764     }
1765     return p->flags;
1766 }
1767 
1768 /* Modify the flags of a page and invalidate the code if necessary.
1769    The flag PAGE_WRITE_ORG is positioned automatically depending
1770    on PAGE_WRITE.  The mmap_lock should already be held.  */
page_set_flags(target_ulong start,target_ulong end,int flags)1771 void page_set_flags(target_ulong start, target_ulong end, int flags)
1772 {
1773     target_ulong addr, len;
1774 
1775     /* This function should never be called with addresses outside the
1776        guest address space.  If this assert fires, it probably indicates
1777        a missing call to h2g_valid.  */
1778 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1779     assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1780 #endif
1781     assert(start < end);
1782 
1783     start = start & TARGET_PAGE_MASK;
1784     end = TARGET_PAGE_ALIGN(end);
1785 
1786     if (flags & PAGE_WRITE) {
1787         flags |= PAGE_WRITE_ORG;
1788     }
1789 
1790     for (addr = start, len = end - start;
1791          len != 0;
1792          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1793         PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1794 
1795         /* If the write protection bit is set, then we invalidate
1796            the code inside.  */
1797         if (!(p->flags & PAGE_WRITE) &&
1798             (flags & PAGE_WRITE) &&
1799             p->first_tb) {
1800             tb_invalidate_phys_page(addr, 0, NULL, false);
1801         }
1802         p->flags = flags;
1803     }
1804 }
1805 
page_check_range(target_ulong start,target_ulong len,int flags)1806 int page_check_range(target_ulong start, target_ulong len, int flags)
1807 {
1808     PageDesc *p;
1809     target_ulong end;
1810     target_ulong addr;
1811 
1812     /* This function should never be called with addresses outside the
1813        guest address space.  If this assert fires, it probably indicates
1814        a missing call to h2g_valid.  */
1815 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
1816     assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
1817 #endif
1818 
1819     if (len == 0) {
1820         return 0;
1821     }
1822     if (start + len - 1 < start) {
1823         /* We've wrapped around.  */
1824         return -1;
1825     }
1826 
1827     /* must do before we loose bits in the next step */
1828     end = TARGET_PAGE_ALIGN(start + len);
1829     start = start & TARGET_PAGE_MASK;
1830 
1831     for (addr = start, len = end - start;
1832          len != 0;
1833          len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
1834         p = page_find(addr >> TARGET_PAGE_BITS);
1835         if (!p) {
1836             return -1;
1837         }
1838         if (!(p->flags & PAGE_VALID)) {
1839             return -1;
1840         }
1841 
1842         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) {
1843             return -1;
1844         }
1845         if (flags & PAGE_WRITE) {
1846             if (!(p->flags & PAGE_WRITE_ORG)) {
1847                 return -1;
1848             }
1849             /* unprotect the page if it was put read-only because it
1850                contains translated code */
1851             if (!(p->flags & PAGE_WRITE)) {
1852                 if (!page_unprotect(addr, 0, NULL)) {
1853                     return -1;
1854                 }
1855             }
1856             return 0;
1857         }
1858     }
1859     return 0;
1860 }
1861 
1862 /* called from signal handler: invalidate the code and unprotect the
1863    page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,uintptr_t pc,void * puc)1864 int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
1865 {
1866     unsigned int prot;
1867     PageDesc *p;
1868     target_ulong host_start, host_end, addr;
1869 
1870     /* Technically this isn't safe inside a signal handler.  However we
1871        know this only ever happens in a synchronous SEGV handler, so in
1872        practice it seems to be ok.  */
1873     mmap_lock();
1874 
1875     p = page_find(address >> TARGET_PAGE_BITS);
1876     if (!p) {
1877         mmap_unlock();
1878         return 0;
1879     }
1880 
1881     /* if the page was really writable, then we change its
1882        protection back to writable */
1883     if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
1884         host_start = address & qemu_host_page_mask;
1885         host_end = host_start + qemu_host_page_size;
1886 
1887         prot = 0;
1888         for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
1889             p = page_find(addr >> TARGET_PAGE_BITS);
1890             p->flags |= PAGE_WRITE;
1891             prot |= p->flags;
1892 
1893             /* and since the content will be modified, we must invalidate
1894                the corresponding translated code. */
1895             tb_invalidate_phys_page(addr, pc, puc, true);
1896 #ifdef DEBUG_TB_CHECK
1897             tb_invalidate_check(addr);
1898 #endif
1899         }
1900         mprotect((void *)g2h(host_start), qemu_host_page_size,
1901                  prot & PAGE_BITS);
1902 
1903         mmap_unlock();
1904         return 1;
1905     }
1906     mmap_unlock();
1907     return 0;
1908 }
1909 #endif /* CONFIG_USER_ONLY */
1910