• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  virtual page mapping and translated block handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #ifdef _WIN32
21 #define WIN32_LEAN_AND_MEAN
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34 
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #include "hax.h"
43 #include "qemu-timer.h"
44 #if defined(CONFIG_USER_ONLY)
45 #include <qemu.h>
46 #endif
47 #ifdef CONFIG_MEMCHECK
48 #include "memcheck/memcheck_api.h"
49 #endif  // CONFIG_MEMCHECK
50 
51 //#define DEBUG_TB_INVALIDATE
52 //#define DEBUG_FLUSH
53 //#define DEBUG_TLB
54 //#define DEBUG_UNASSIGNED
55 
56 /* make various TB consistency checks */
57 //#define DEBUG_TB_CHECK
58 //#define DEBUG_TLB_CHECK
59 
60 //#define DEBUG_IOPORT
61 //#define DEBUG_SUBPAGE
62 
63 #if !defined(CONFIG_USER_ONLY)
64 /* TB consistency checks only implemented for usermode emulation.  */
65 #undef DEBUG_TB_CHECK
66 #endif
67 
68 #define SMC_BITMAP_USE_THRESHOLD 10
69 
70 #if defined(TARGET_SPARC64)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 41
72 #elif defined(TARGET_SPARC)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 36
74 #elif defined(TARGET_ALPHA)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #define TARGET_VIRT_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_PPC64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_X86_64)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 42
81 #elif defined(TARGET_I386)
82 #define TARGET_PHYS_ADDR_SPACE_BITS 36
83 #else
84 #define TARGET_PHYS_ADDR_SPACE_BITS 32
85 #endif
86 
87 static TranslationBlock *tbs;
88 int code_gen_max_blocks;
89 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90 static int nb_tbs;
91 /* any access to the tbs or the page table must use this lock */
92 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93 
94 #if defined(__arm__) || defined(__sparc_v9__)
95 /* The prologue must be reachable with a direct jump. ARM and Sparc64
96  have limited branch ranges (possibly also PPC) so place it in a
97  section close to code segment. */
98 #define code_gen_section                                \
99     __attribute__((__section__(".gen_code")))           \
100     __attribute__((aligned (32)))
101 #elif defined(_WIN32)
102 /* Maximum alignment for Win32 is 16. */
103 #define code_gen_section                                \
104     __attribute__((aligned (16)))
105 #else
106 #define code_gen_section                                \
107     __attribute__((aligned (32)))
108 #endif
109 
110 uint8_t code_gen_prologue[1024] code_gen_section;
111 static uint8_t *code_gen_buffer;
112 static unsigned long code_gen_buffer_size;
113 /* threshold to flush the translated code buffer */
114 static unsigned long code_gen_buffer_max_size;
115 uint8_t *code_gen_ptr;
116 
117 #if !defined(CONFIG_USER_ONLY)
118 int phys_ram_fd;
119 static int in_migration;
120 
121 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
122 #endif
123 
124 CPUState *first_cpu;
125 /* current CPU in the current thread. It is only valid inside
126    cpu_exec() */
127 CPUState *cpu_single_env;
128 /* 0 = Do not count executed instructions.
129    1 = Precise instruction counting.
130    2 = Adaptive rate instruction counting.  */
131 int use_icount = 0;
132 /* Current instruction counter.  While executing translated code this may
133    include some instructions that have not yet been executed.  */
134 int64_t qemu_icount;
135 
136 typedef struct PageDesc {
137     /* list of TBs intersecting this ram page */
138     TranslationBlock *first_tb;
139     /* in order to optimize self modifying code, we count the number
140        of lookups we do to a given page to use a bitmap */
141     unsigned int code_write_count;
142     uint8_t *code_bitmap;
143 #if defined(CONFIG_USER_ONLY)
144     unsigned long flags;
145 #endif
146 } PageDesc;
147 
148 typedef struct PhysPageDesc {
149     /* offset in host memory of the page + io_index in the low bits */
150     ram_addr_t phys_offset;
151     ram_addr_t region_offset;
152 } PhysPageDesc;
153 
154 #define L2_BITS 10
155 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
156 /* XXX: this is a temporary hack for alpha target.
157  *      In the future, this is to be replaced by a multi-level table
158  *      to actually be able to handle the complete 64 bits address space.
159  */
160 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
161 #else
162 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
163 #endif
164 
165 #define L1_SIZE (1 << L1_BITS)
166 #define L2_SIZE (1 << L2_BITS)
167 
168 unsigned long qemu_real_host_page_size;
169 unsigned long qemu_host_page_bits;
170 unsigned long qemu_host_page_size;
171 unsigned long qemu_host_page_mask;
172 
173 /* XXX: for system emulation, it could just be an array */
174 static PageDesc *l1_map[L1_SIZE];
175 static PhysPageDesc **l1_phys_map;
176 
177 #if !defined(CONFIG_USER_ONLY)
178 static void io_mem_init(void);
179 
180 /* io memory support */
181 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
182 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
183 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
184 static char io_mem_used[IO_MEM_NB_ENTRIES];
185 static int io_mem_watch;
186 #endif
187 
188 /* log support */
189 #ifdef WIN32
190 static const char *logfilename = "qemu.log";
191 #else
192 static const char *logfilename = "/tmp/qemu.log";
193 #endif
194 FILE *logfile;
195 int loglevel;
196 static int log_append = 0;
197 
198 /* statistics */
199 static int tlb_flush_count;
200 static int tb_flush_count;
201 static int tb_phys_invalidate_count;
202 
203 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204 typedef struct subpage_t {
205     target_phys_addr_t base;
206     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208     void *opaque[TARGET_PAGE_SIZE][2][4];
209     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210 } subpage_t;
211 
212 #ifdef _WIN32
map_exec(void * addr,long size)213 static void map_exec(void *addr, long size)
214 {
215     DWORD old_protect;
216     VirtualProtect(addr, size,
217                    PAGE_EXECUTE_READWRITE, &old_protect);
218 
219 }
220 #else
map_exec(void * addr,long size)221 static void map_exec(void *addr, long size)
222 {
223     unsigned long start, end, page_size;
224 
225     page_size = getpagesize();
226     start = (unsigned long)addr;
227     start &= ~(page_size - 1);
228 
229     end = (unsigned long)addr + size;
230     end += page_size - 1;
231     end &= ~(page_size - 1);
232 
233     mprotect((void *)start, end - start,
234              PROT_READ | PROT_WRITE | PROT_EXEC);
235 }
236 #endif
237 
page_init(void)238 static void page_init(void)
239 {
240     /* NOTE: we can always suppose that qemu_host_page_size >=
241        TARGET_PAGE_SIZE */
242 #ifdef _WIN32
243     {
244         SYSTEM_INFO system_info;
245 
246         GetSystemInfo(&system_info);
247         qemu_real_host_page_size = system_info.dwPageSize;
248     }
249 #else
250     qemu_real_host_page_size = getpagesize();
251 #endif
252     if (qemu_host_page_size == 0)
253         qemu_host_page_size = qemu_real_host_page_size;
254     if (qemu_host_page_size < TARGET_PAGE_SIZE)
255         qemu_host_page_size = TARGET_PAGE_SIZE;
256     qemu_host_page_bits = 0;
257     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258         qemu_host_page_bits++;
259     qemu_host_page_mask = ~(qemu_host_page_size - 1);
260     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262 
263 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264     {
265         long long startaddr, endaddr;
266         FILE *f;
267         int n;
268 
269         mmap_lock();
270         last_brk = (unsigned long)sbrk(0);
271         f = fopen("/proc/self/maps", "r");
272         if (f) {
273             do {
274                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275                 if (n == 2) {
276                     startaddr = MIN(startaddr,
277                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278                     endaddr = MIN(endaddr,
279                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280                     page_set_flags(startaddr & TARGET_PAGE_MASK,
281                                    TARGET_PAGE_ALIGN(endaddr),
282                                    PAGE_RESERVED);
283                 }
284             } while (!feof(f));
285             fclose(f);
286         }
287         mmap_unlock();
288     }
289 #endif
290 }
291 
page_l1_map(target_ulong index)292 static inline PageDesc **page_l1_map(target_ulong index)
293 {
294 #if TARGET_LONG_BITS > 32
295     /* Host memory outside guest VM.  For 32-bit targets we have already
296        excluded high addresses.  */
297     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298         return NULL;
299 #endif
300     return &l1_map[index >> L2_BITS];
301 }
302 
page_find_alloc(target_ulong index)303 static inline PageDesc *page_find_alloc(target_ulong index)
304 {
305     PageDesc **lp, *p;
306     lp = page_l1_map(index);
307     if (!lp)
308         return NULL;
309 
310     p = *lp;
311     if (!p) {
312         /* allocate if not found */
313 #if defined(CONFIG_USER_ONLY)
314         size_t len = sizeof(PageDesc) * L2_SIZE;
315         /* Don't use qemu_malloc because it may recurse.  */
316         p = mmap(NULL, len, PROT_READ | PROT_WRITE,
317                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318         *lp = p;
319         if (h2g_valid(p)) {
320             unsigned long addr = h2g(p);
321             page_set_flags(addr & TARGET_PAGE_MASK,
322                            TARGET_PAGE_ALIGN(addr + len),
323                            PAGE_RESERVED);
324         }
325 #else
326         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327         *lp = p;
328 #endif
329     }
330     return p + (index & (L2_SIZE - 1));
331 }
332 
page_find(target_ulong index)333 static inline PageDesc *page_find(target_ulong index)
334 {
335     PageDesc **lp, *p;
336     lp = page_l1_map(index);
337     if (!lp)
338         return NULL;
339 
340     p = *lp;
341     if (!p) {
342         return NULL;
343     }
344     return p + (index & (L2_SIZE - 1));
345 }
346 
phys_page_find_alloc(target_phys_addr_t index,int alloc)347 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
348 {
349     void **lp, **p;
350     PhysPageDesc *pd;
351 
352     p = (void **)l1_phys_map;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
354 
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 #endif
358     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
359     p = *lp;
360     if (!p) {
361         /* allocate if not found */
362         if (!alloc)
363             return NULL;
364         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
365         memset(p, 0, sizeof(void *) * L1_SIZE);
366         *lp = p;
367     }
368 #endif
369     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
370     pd = *lp;
371     if (!pd) {
372         int i;
373         /* allocate if not found */
374         if (!alloc)
375             return NULL;
376         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
377         *lp = pd;
378         for (i = 0; i < L2_SIZE; i++) {
379           pd[i].phys_offset = IO_MEM_UNASSIGNED;
380           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
381         }
382     }
383     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
384 }
385 
phys_page_find(target_phys_addr_t index)386 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
387 {
388     return phys_page_find_alloc(index, 0);
389 }
390 
391 #if !defined(CONFIG_USER_ONLY)
392 static void tlb_protect_code(ram_addr_t ram_addr);
393 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
394                                     target_ulong vaddr);
395 #define mmap_lock() do { } while(0)
396 #define mmap_unlock() do { } while(0)
397 #endif
398 
399 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 
401 #if defined(CONFIG_USER_ONLY)
402 /* Currently it is not recommended to allocate big chunks of data in
403    user mode. It will change when a dedicated libc will be used */
404 #define USE_STATIC_CODE_GEN_BUFFER
405 #endif
406 
407 #ifdef USE_STATIC_CODE_GEN_BUFFER
408 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
409 #endif
410 
code_gen_alloc(unsigned long tb_size)411 static void code_gen_alloc(unsigned long tb_size)
412 {
413 #ifdef USE_STATIC_CODE_GEN_BUFFER
414     code_gen_buffer = static_code_gen_buffer;
415     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416     map_exec(code_gen_buffer, code_gen_buffer_size);
417 #else
418     code_gen_buffer_size = tb_size;
419     if (code_gen_buffer_size == 0) {
420 #if defined(CONFIG_USER_ONLY)
421         /* in user mode, phys_ram_size is not meaningful */
422         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 #else
424         /* XXX: needs adjustments */
425         code_gen_buffer_size = (unsigned long)(ram_size / 4);
426 #endif
427     }
428     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
429         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
430     /* The code gen buffer location may have constraints depending on
431        the host cpu and OS */
432 #if defined(__linux__)
433     {
434         int flags;
435         void *start = NULL;
436 
437         flags = MAP_PRIVATE | MAP_ANONYMOUS;
438 #if defined(__x86_64__)
439         flags |= MAP_32BIT;
440         /* Cannot map more than that */
441         if (code_gen_buffer_size > (800 * 1024 * 1024))
442             code_gen_buffer_size = (800 * 1024 * 1024);
443 #elif defined(__sparc_v9__)
444         // Map the buffer below 2G, so we can use direct calls and branches
445         flags |= MAP_FIXED;
446         start = (void *) 0x60000000UL;
447         if (code_gen_buffer_size > (512 * 1024 * 1024))
448             code_gen_buffer_size = (512 * 1024 * 1024);
449 #elif defined(__arm__)
450         /* Map the buffer below 32M, so we can use direct calls and branches */
451         flags |= MAP_FIXED;
452         start = (void *) 0x01000000UL;
453         if (code_gen_buffer_size > 16 * 1024 * 1024)
454             code_gen_buffer_size = 16 * 1024 * 1024;
455 #elif defined(__s390x__)
456         /* Map the buffer so that we can use direct calls and branches.  */
457         /* We have a +- 4GB range on the branches; leave some slop.  */
458         if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
459             code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
460         }
461         start = (void *)0x90000000UL;
462 #endif
463         code_gen_buffer = mmap(start, code_gen_buffer_size,
464                                PROT_WRITE | PROT_READ | PROT_EXEC,
465                                flags, -1, 0);
466         if (code_gen_buffer == MAP_FAILED) {
467             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468             exit(1);
469         }
470     }
471 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
472     || defined(__DragonFly__) || defined(__OpenBSD__)
473     {
474         int flags;
475         void *addr = NULL;
476         flags = MAP_PRIVATE | MAP_ANONYMOUS;
477 #if defined(__x86_64__)
478         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
479          * 0x40000000 is free */
480         flags |= MAP_FIXED;
481         addr = (void *)0x40000000;
482         /* Cannot map more than that */
483         if (code_gen_buffer_size > (800 * 1024 * 1024))
484             code_gen_buffer_size = (800 * 1024 * 1024);
485 #elif defined(__sparc_v9__)
486         // Map the buffer below 2G, so we can use direct calls and branches
487         flags |= MAP_FIXED;
488         addr = (void *) 0x60000000UL;
489         if (code_gen_buffer_size > (512 * 1024 * 1024)) {
490             code_gen_buffer_size = (512 * 1024 * 1024);
491         }
492 #endif
493         code_gen_buffer = mmap(addr, code_gen_buffer_size,
494                                PROT_WRITE | PROT_READ | PROT_EXEC,
495                                flags, -1, 0);
496         if (code_gen_buffer == MAP_FAILED) {
497             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
498             exit(1);
499         }
500     }
501 #else
502     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
503     map_exec(code_gen_buffer, code_gen_buffer_size);
504 #endif
505 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
506     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
507     code_gen_buffer_max_size = code_gen_buffer_size -
508         code_gen_max_block_size();
509     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
510     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
511 }
512 
513 /* Must be called before using the QEMU cpus. 'tb_size' is the size
514    (in bytes) allocated to the translation buffer. Zero means default
515    size. */
cpu_exec_init_all(unsigned long tb_size)516 void cpu_exec_init_all(unsigned long tb_size)
517 {
518     cpu_gen_init();
519     code_gen_alloc(tb_size);
520     code_gen_ptr = code_gen_buffer;
521     page_init();
522 #if !defined(CONFIG_USER_ONLY)
523     io_mem_init();
524 #endif
525 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
526     /* There's no guest base to take into account, so go ahead and
527        initialize the prologue now.  */
528     tcg_prologue_init(&tcg_ctx);
529 #endif
530 }
531 
532 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
533 
534 #define CPU_COMMON_SAVE_VERSION 1
535 
cpu_common_save(QEMUFile * f,void * opaque)536 static void cpu_common_save(QEMUFile *f, void *opaque)
537 {
538     CPUState *env = opaque;
539 
540     cpu_synchronize_state(env, 0);
541 
542     qemu_put_be32s(f, &env->halted);
543     qemu_put_be32s(f, &env->interrupt_request);
544 }
545 
cpu_common_load(QEMUFile * f,void * opaque,int version_id)546 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
547 {
548     CPUState *env = opaque;
549 
550     if (version_id != CPU_COMMON_SAVE_VERSION)
551         return -EINVAL;
552 
553     qemu_get_be32s(f, &env->halted);
554     qemu_get_be32s(f, &env->interrupt_request);
555     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
556        version_id is increased. */
557     env->interrupt_request &= ~0x01;
558     tlb_flush(env, 1);
559     cpu_synchronize_state(env, 1);
560 
561     return 0;
562 }
563 #endif
564 
qemu_get_cpu(int cpu)565 CPUState *qemu_get_cpu(int cpu)
566 {
567     CPUState *env = first_cpu;
568 
569     while (env) {
570         if (env->cpu_index == cpu)
571             break;
572         env = env->next_cpu;
573     }
574 
575     return env;
576 }
577 
cpu_exec_init(CPUState * env)578 void cpu_exec_init(CPUState *env)
579 {
580     CPUState **penv;
581     int cpu_index;
582 
583 #if defined(CONFIG_USER_ONLY)
584     cpu_list_lock();
585 #endif
586     env->next_cpu = NULL;
587     penv = &first_cpu;
588     cpu_index = 0;
589     while (*penv != NULL) {
590         penv = &(*penv)->next_cpu;
591         cpu_index++;
592     }
593     env->cpu_index = cpu_index;
594     env->numa_node = 0;
595     QTAILQ_INIT(&env->breakpoints);
596     QTAILQ_INIT(&env->watchpoints);
597     *penv = env;
598 #if defined(CONFIG_USER_ONLY)
599     cpu_list_unlock();
600 #endif
601 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
603                     cpu_common_save, cpu_common_load, env);
604     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
605                     cpu_save, cpu_load, env);
606 #endif
607 }
608 
invalidate_page_bitmap(PageDesc * p)609 static inline void invalidate_page_bitmap(PageDesc *p)
610 {
611     if (p->code_bitmap) {
612         qemu_free(p->code_bitmap);
613         p->code_bitmap = NULL;
614     }
615     p->code_write_count = 0;
616 }
617 
618 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)619 static void page_flush_tb(void)
620 {
621     int i, j;
622     PageDesc *p;
623 
624     for(i = 0; i < L1_SIZE; i++) {
625         p = l1_map[i];
626         if (p) {
627             for(j = 0; j < L2_SIZE; j++) {
628                 p->first_tb = NULL;
629                 invalidate_page_bitmap(p);
630                 p++;
631             }
632         }
633     }
634 }
635 
636 /* flush all the translation blocks */
637 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)638 void tb_flush(CPUState *env1)
639 {
640     CPUState *env;
641 #if defined(DEBUG_FLUSH)
642     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
643            (unsigned long)(code_gen_ptr - code_gen_buffer),
644            nb_tbs, nb_tbs > 0 ?
645            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
646 #endif
647     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
648         cpu_abort(env1, "Internal error: code buffer overflow\n");
649 
650     nb_tbs = 0;
651 
652     for(env = first_cpu; env != NULL; env = env->next_cpu) {
653 #ifdef CONFIG_MEMCHECK
654         int tb_to_clean;
655         for (tb_to_clean = 0; tb_to_clean < TB_JMP_CACHE_SIZE; tb_to_clean++) {
656             if (env->tb_jmp_cache[tb_to_clean] != NULL &&
657                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc != NULL) {
658                 qemu_free(env->tb_jmp_cache[tb_to_clean]->tpc2gpc);
659                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc = NULL;
660                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc_pairs = 0;
661             }
662         }
663 #endif  // CONFIG_MEMCHECK
664         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
665     }
666 
667     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
668     page_flush_tb();
669 
670     code_gen_ptr = code_gen_buffer;
671     /* XXX: flush processor icache at this point if cache flush is
672        expensive */
673     tb_flush_count++;
674 }
675 
676 #ifdef DEBUG_TB_CHECK
677 
tb_invalidate_check(target_ulong address)678 static void tb_invalidate_check(target_ulong address)
679 {
680     TranslationBlock *tb;
681     int i;
682     address &= TARGET_PAGE_MASK;
683     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
684         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
685             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
686                   address >= tb->pc + tb->size)) {
687                 printf("ERROR invalidate: address=" TARGET_FMT_lx
688                        " PC=%08lx size=%04x\n",
689                        address, (long)tb->pc, tb->size);
690             }
691         }
692     }
693 }
694 
695 /* verify that all the pages have correct rights for code */
tb_page_check(void)696 static void tb_page_check(void)
697 {
698     TranslationBlock *tb;
699     int i, flags1, flags2;
700 
701     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
702         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
703             flags1 = page_get_flags(tb->pc);
704             flags2 = page_get_flags(tb->pc + tb->size - 1);
705             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
706                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
707                        (long)tb->pc, tb->size, flags1, flags2);
708             }
709         }
710     }
711 }
712 
713 #endif
714 
715 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)716 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
717                              int next_offset)
718 {
719     TranslationBlock *tb1;
720     for(;;) {
721         tb1 = *ptb;
722         if (tb1 == tb) {
723             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
724             break;
725         }
726         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
727     }
728 }
729 
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)730 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
731 {
732     TranslationBlock *tb1;
733     unsigned int n1;
734 
735     for(;;) {
736         tb1 = *ptb;
737         n1 = (long)tb1 & 3;
738         tb1 = (TranslationBlock *)((long)tb1 & ~3);
739         if (tb1 == tb) {
740             *ptb = tb1->page_next[n1];
741             break;
742         }
743         ptb = &tb1->page_next[n1];
744     }
745 }
746 
tb_jmp_remove(TranslationBlock * tb,int n)747 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
748 {
749     TranslationBlock *tb1, **ptb;
750     unsigned int n1;
751 
752     ptb = &tb->jmp_next[n];
753     tb1 = *ptb;
754     if (tb1) {
755         /* find tb(n) in circular list */
756         for(;;) {
757             tb1 = *ptb;
758             n1 = (long)tb1 & 3;
759             tb1 = (TranslationBlock *)((long)tb1 & ~3);
760             if (n1 == n && tb1 == tb)
761                 break;
762             if (n1 == 2) {
763                 ptb = &tb1->jmp_first;
764             } else {
765                 ptb = &tb1->jmp_next[n1];
766             }
767         }
768         /* now we can suppress tb(n) from the list */
769         *ptb = tb->jmp_next[n];
770 
771         tb->jmp_next[n] = NULL;
772     }
773 }
774 
775 /* reset the jump entry 'n' of a TB so that it is not chained to
776    another TB */
tb_reset_jump(TranslationBlock * tb,int n)777 static inline void tb_reset_jump(TranslationBlock *tb, int n)
778 {
779     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
780 }
781 
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)782 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
783 {
784     CPUState *env;
785     PageDesc *p;
786     unsigned int h, n1;
787     target_phys_addr_t phys_pc;
788     TranslationBlock *tb1, *tb2;
789 
790     /* remove the TB from the hash list */
791     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
792     h = tb_phys_hash_func(phys_pc);
793     tb_remove(&tb_phys_hash[h], tb,
794               offsetof(TranslationBlock, phys_hash_next));
795 
796     /* remove the TB from the page list */
797     if (tb->page_addr[0] != page_addr) {
798         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
799         tb_page_remove(&p->first_tb, tb);
800         invalidate_page_bitmap(p);
801     }
802     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
803         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
804         tb_page_remove(&p->first_tb, tb);
805         invalidate_page_bitmap(p);
806     }
807 
808     tb_invalidated_flag = 1;
809 
810     /* remove the TB from the hash list */
811     h = tb_jmp_cache_hash_func(tb->pc);
812     for(env = first_cpu; env != NULL; env = env->next_cpu) {
813         if (env->tb_jmp_cache[h] == tb)
814             env->tb_jmp_cache[h] = NULL;
815     }
816 
817     /* suppress this TB from the two jump lists */
818     tb_jmp_remove(tb, 0);
819     tb_jmp_remove(tb, 1);
820 
821     /* suppress any remaining jumps to this TB */
822     tb1 = tb->jmp_first;
823     for(;;) {
824         n1 = (long)tb1 & 3;
825         if (n1 == 2)
826             break;
827         tb1 = (TranslationBlock *)((long)tb1 & ~3);
828         tb2 = tb1->jmp_next[n1];
829         tb_reset_jump(tb1, n1);
830         tb1->jmp_next[n1] = NULL;
831         tb1 = tb2;
832     }
833     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
834 
835 #ifdef CONFIG_MEMCHECK
836     if (tb->tpc2gpc != NULL) {
837         qemu_free(tb->tpc2gpc);
838         tb->tpc2gpc = NULL;
839         tb->tpc2gpc_pairs = 0;
840     }
841 #endif  // CONFIG_MEMCHECK
842 
843     tb_phys_invalidate_count++;
844 }
845 
set_bits(uint8_t * tab,int start,int len)846 static inline void set_bits(uint8_t *tab, int start, int len)
847 {
848     int end, mask, end1;
849 
850     end = start + len;
851     tab += start >> 3;
852     mask = 0xff << (start & 7);
853     if ((start & ~7) == (end & ~7)) {
854         if (start < end) {
855             mask &= ~(0xff << (end & 7));
856             *tab |= mask;
857         }
858     } else {
859         *tab++ |= mask;
860         start = (start + 8) & ~7;
861         end1 = end & ~7;
862         while (start < end1) {
863             *tab++ = 0xff;
864             start += 8;
865         }
866         if (start < end) {
867             mask = ~(0xff << (end & 7));
868             *tab |= mask;
869         }
870     }
871 }
872 
build_page_bitmap(PageDesc * p)873 static void build_page_bitmap(PageDesc *p)
874 {
875     int n, tb_start, tb_end;
876     TranslationBlock *tb;
877 
878     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
879 
880     tb = p->first_tb;
881     while (tb != NULL) {
882         n = (long)tb & 3;
883         tb = (TranslationBlock *)((long)tb & ~3);
884         /* NOTE: this is subtle as a TB may span two physical pages */
885         if (n == 0) {
886             /* NOTE: tb_end may be after the end of the page, but
887                it is not a problem */
888             tb_start = tb->pc & ~TARGET_PAGE_MASK;
889             tb_end = tb_start + tb->size;
890             if (tb_end > TARGET_PAGE_SIZE)
891                 tb_end = TARGET_PAGE_SIZE;
892         } else {
893             tb_start = 0;
894             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
895         }
896         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
897         tb = tb->page_next[n];
898     }
899 }
900 
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)901 TranslationBlock *tb_gen_code(CPUState *env,
902                               target_ulong pc, target_ulong cs_base,
903                               int flags, int cflags)
904 {
905     TranslationBlock *tb;
906     uint8_t *tc_ptr;
907     target_ulong phys_pc, phys_page2, virt_page2;
908     int code_gen_size;
909 
910     phys_pc = get_phys_addr_code(env, pc);
911     tb = tb_alloc(pc);
912     if (!tb) {
913         /* flush must be done */
914         tb_flush(env);
915         /* cannot fail at this point */
916         tb = tb_alloc(pc);
917         /* Don't forget to invalidate previous TB info.  */
918         tb_invalidated_flag = 1;
919     }
920     tc_ptr = code_gen_ptr;
921     tb->tc_ptr = tc_ptr;
922     tb->cs_base = cs_base;
923     tb->flags = flags;
924     tb->cflags = cflags;
925 #ifdef CONFIG_TRACE
926     tb->bb_rec = NULL;
927     tb->prev_time = 0;
928 #endif
929     cpu_gen_code(env, tb, &code_gen_size);
930     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
931 
932     /* check next page if needed */
933     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
934     phys_page2 = -1;
935     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
936         phys_page2 = get_phys_addr_code(env, virt_page2);
937     }
938     tb_link_phys(tb, phys_pc, phys_page2);
939     return tb;
940 }
941 
942 /* invalidate all TBs which intersect with the target physical page
943    starting in range [start;end[. NOTE: start and end must refer to
944    the same physical page. 'is_cpu_write_access' should be true if called
945    from a real cpu write access: the virtual CPU will exit the current
946    TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)947 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
948                                    int is_cpu_write_access)
949 {
950     TranslationBlock *tb, *tb_next, *saved_tb;
951     CPUState *env = cpu_single_env;
952     target_ulong tb_start, tb_end;
953     PageDesc *p;
954     int n;
955 #ifdef TARGET_HAS_PRECISE_SMC
956     int current_tb_not_found = is_cpu_write_access;
957     TranslationBlock *current_tb = NULL;
958     int current_tb_modified = 0;
959     target_ulong current_pc = 0;
960     target_ulong current_cs_base = 0;
961     int current_flags = 0;
962 #endif /* TARGET_HAS_PRECISE_SMC */
963 
964     p = page_find(start >> TARGET_PAGE_BITS);
965     if (!p)
966         return;
967     if (!p->code_bitmap &&
968         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
969         is_cpu_write_access) {
970         /* build code bitmap */
971         build_page_bitmap(p);
972     }
973 
974     /* we remove all the TBs in the range [start, end[ */
975     /* XXX: see if in some cases it could be faster to invalidate all the code */
976     tb = p->first_tb;
977     while (tb != NULL) {
978         n = (long)tb & 3;
979         tb = (TranslationBlock *)((long)tb & ~3);
980         tb_next = tb->page_next[n];
981         /* NOTE: this is subtle as a TB may span two physical pages */
982         if (n == 0) {
983             /* NOTE: tb_end may be after the end of the page, but
984                it is not a problem */
985             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
986             tb_end = tb_start + tb->size;
987         } else {
988             tb_start = tb->page_addr[1];
989             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
990         }
991         if (!(tb_end <= start || tb_start >= end)) {
992 #ifdef TARGET_HAS_PRECISE_SMC
993             if (current_tb_not_found) {
994                 current_tb_not_found = 0;
995                 current_tb = NULL;
996                 if (env->mem_io_pc) {
997                     /* now we have a real cpu fault */
998                     current_tb = tb_find_pc(env->mem_io_pc);
999                 }
1000             }
1001             if (current_tb == tb &&
1002                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1003                 /* If we are modifying the current TB, we must stop
1004                 its execution. We could be more precise by checking
1005                 that the modification is after the current PC, but it
1006                 would require a specialized function to partially
1007                 restore the CPU state */
1008 
1009                 current_tb_modified = 1;
1010                 cpu_restore_state(current_tb, env,  env->mem_io_pc);
1011                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1012                                      &current_flags);
1013             }
1014 #endif /* TARGET_HAS_PRECISE_SMC */
1015             /* we need to do that to handle the case where a signal
1016                occurs while doing tb_phys_invalidate() */
1017             saved_tb = NULL;
1018             if (env) {
1019                 saved_tb = env->current_tb;
1020                 env->current_tb = NULL;
1021             }
1022             tb_phys_invalidate(tb, -1);
1023             if (env) {
1024                 env->current_tb = saved_tb;
1025                 if (env->interrupt_request && env->current_tb)
1026                     cpu_interrupt(env, env->interrupt_request);
1027             }
1028         }
1029         tb = tb_next;
1030     }
1031 #if !defined(CONFIG_USER_ONLY)
1032     /* if no code remaining, no need to continue to use slow writes */
1033     if (!p->first_tb) {
1034         invalidate_page_bitmap(p);
1035         if (is_cpu_write_access) {
1036             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1037         }
1038     }
1039 #endif
1040 #ifdef TARGET_HAS_PRECISE_SMC
1041     if (current_tb_modified) {
1042         /* we generate a block containing just the instruction
1043            modifying the memory. It will ensure that it cannot modify
1044            itself */
1045         env->current_tb = NULL;
1046         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1047         cpu_resume_from_signal(env, NULL);
1048     }
1049 #endif
1050 }
1051 
1052 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)1053 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1054 {
1055     PageDesc *p;
1056     int offset, b;
1057 #if 0
1058     if (1) {
1059         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1060                   cpu_single_env->mem_io_vaddr, len,
1061                   cpu_single_env->eip,
1062                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1063     }
1064 #endif
1065     p = page_find(start >> TARGET_PAGE_BITS);
1066     if (!p)
1067         return;
1068     if (p->code_bitmap) {
1069         offset = start & ~TARGET_PAGE_MASK;
1070         b = p->code_bitmap[offset >> 3] >> (offset & 7);
1071         if (b & ((1 << len) - 1))
1072             goto do_invalidate;
1073     } else {
1074     do_invalidate:
1075         tb_invalidate_phys_page_range(start, start + len, 1);
1076     }
1077 }
1078 
1079 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1080 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1081                                     unsigned long pc, void *puc)
1082 {
1083     TranslationBlock *tb;
1084     PageDesc *p;
1085     int n;
1086 #ifdef TARGET_HAS_PRECISE_SMC
1087     TranslationBlock *current_tb = NULL;
1088     CPUState *env = cpu_single_env;
1089     int current_tb_modified = 0;
1090     target_ulong current_pc = 0;
1091     target_ulong current_cs_base = 0;
1092     int current_flags = 0;
1093 #endif
1094 
1095     addr &= TARGET_PAGE_MASK;
1096     p = page_find(addr >> TARGET_PAGE_BITS);
1097     if (!p)
1098         return;
1099     tb = p->first_tb;
1100 #ifdef TARGET_HAS_PRECISE_SMC
1101     if (tb && pc != 0) {
1102         current_tb = tb_find_pc(pc);
1103     }
1104 #endif
1105     while (tb != NULL) {
1106         n = (long)tb & 3;
1107         tb = (TranslationBlock *)((long)tb & ~3);
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109         if (current_tb == tb &&
1110             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1111                 /* If we are modifying the current TB, we must stop
1112                    its execution. We could be more precise by checking
1113                    that the modification is after the current PC, but it
1114                    would require a specialized function to partially
1115                    restore the CPU state */
1116 
1117             current_tb_modified = 1;
1118             cpu_restore_state(current_tb, env, pc);
1119             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1120                                  &current_flags);
1121         }
1122 #endif /* TARGET_HAS_PRECISE_SMC */
1123         tb_phys_invalidate(tb, addr);
1124         tb = tb->page_next[n];
1125     }
1126     p->first_tb = NULL;
1127 #ifdef TARGET_HAS_PRECISE_SMC
1128     if (current_tb_modified) {
1129         /* we generate a block containing just the instruction
1130            modifying the memory. It will ensure that it cannot modify
1131            itself */
1132         env->current_tb = NULL;
1133         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1134         cpu_resume_from_signal(env, puc);
1135     }
1136 #endif
1137 }
1138 #endif
1139 
1140 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1141 static inline void tb_alloc_page(TranslationBlock *tb,
1142                                  unsigned int n, target_ulong page_addr)
1143 {
1144     PageDesc *p;
1145     TranslationBlock *last_first_tb;
1146 
1147     tb->page_addr[n] = page_addr;
1148     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1149     tb->page_next[n] = p->first_tb;
1150     last_first_tb = p->first_tb;
1151     p->first_tb = (TranslationBlock *)((long)tb | n);
1152     invalidate_page_bitmap(p);
1153 
1154 #if defined(TARGET_HAS_SMC) || 1
1155 
1156 #if defined(CONFIG_USER_ONLY)
1157     if (p->flags & PAGE_WRITE) {
1158         target_ulong addr;
1159         PageDesc *p2;
1160         int prot;
1161 
1162         /* force the host page as non writable (writes will have a
1163            page fault + mprotect overhead) */
1164         page_addr &= qemu_host_page_mask;
1165         prot = 0;
1166         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1167             addr += TARGET_PAGE_SIZE) {
1168 
1169             p2 = page_find (addr >> TARGET_PAGE_BITS);
1170             if (!p2)
1171                 continue;
1172             prot |= p2->flags;
1173             p2->flags &= ~PAGE_WRITE;
1174             page_get_flags(addr);
1175           }
1176         mprotect(g2h(page_addr), qemu_host_page_size,
1177                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1178 #ifdef DEBUG_TB_INVALIDATE
1179         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1180                page_addr);
1181 #endif
1182     }
1183 #else
1184     /* if some code is already present, then the pages are already
1185        protected. So we handle the case where only the first TB is
1186        allocated in a physical page */
1187     if (!last_first_tb) {
1188         tlb_protect_code(page_addr);
1189     }
1190 #endif
1191 
1192 #endif /* TARGET_HAS_SMC */
1193 }
1194 
1195 /* Allocate a new translation block. Flush the translation buffer if
1196    too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1197 TranslationBlock *tb_alloc(target_ulong pc)
1198 {
1199     TranslationBlock *tb;
1200 
1201     if (nb_tbs >= code_gen_max_blocks ||
1202         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1203         return NULL;
1204     tb = &tbs[nb_tbs++];
1205     tb->pc = pc;
1206     tb->cflags = 0;
1207 #ifdef CONFIG_MEMCHECK
1208     tb->tpc2gpc = NULL;
1209     tb->tpc2gpc_pairs = 0;
1210 #endif  // CONFIG_MEMCHECK
1211     return tb;
1212 }
1213 
tb_free(TranslationBlock * tb)1214 void tb_free(TranslationBlock *tb)
1215 {
1216     /* In practice this is mostly used for single use temporary TB
1217        Ignore the hard cases and just back up if this TB happens to
1218        be the last one generated.  */
1219     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1220         code_gen_ptr = tb->tc_ptr;
1221         nb_tbs--;
1222     }
1223 }
1224 
1225 /* add a new TB and link it to the physical page tables. phys_page2 is
1226    (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1227 void tb_link_phys(TranslationBlock *tb,
1228                   target_ulong phys_pc, target_ulong phys_page2)
1229 {
1230     unsigned int h;
1231     TranslationBlock **ptb;
1232 
1233     /* Grab the mmap lock to stop another thread invalidating this TB
1234        before we are done.  */
1235     mmap_lock();
1236     /* add in the physical hash table */
1237     h = tb_phys_hash_func(phys_pc);
1238     ptb = &tb_phys_hash[h];
1239     tb->phys_hash_next = *ptb;
1240     *ptb = tb;
1241 
1242     /* add in the page list */
1243     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1244     if (phys_page2 != -1)
1245         tb_alloc_page(tb, 1, phys_page2);
1246     else
1247         tb->page_addr[1] = -1;
1248 
1249     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1250     tb->jmp_next[0] = NULL;
1251     tb->jmp_next[1] = NULL;
1252 
1253     /* init original jump addresses */
1254     if (tb->tb_next_offset[0] != 0xffff)
1255         tb_reset_jump(tb, 0);
1256     if (tb->tb_next_offset[1] != 0xffff)
1257         tb_reset_jump(tb, 1);
1258 
1259 #ifdef DEBUG_TB_CHECK
1260     tb_page_check();
1261 #endif
1262     mmap_unlock();
1263 }
1264 
1265 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1266    tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1267 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1268 {
1269     int m_min, m_max, m;
1270     unsigned long v;
1271     TranslationBlock *tb;
1272 
1273     if (nb_tbs <= 0)
1274         return NULL;
1275     if (tc_ptr < (unsigned long)code_gen_buffer ||
1276         tc_ptr >= (unsigned long)code_gen_ptr)
1277         return NULL;
1278     /* binary search (cf Knuth) */
1279     m_min = 0;
1280     m_max = nb_tbs - 1;
1281     while (m_min <= m_max) {
1282         m = (m_min + m_max) >> 1;
1283         tb = &tbs[m];
1284         v = (unsigned long)tb->tc_ptr;
1285         if (v == tc_ptr)
1286             return tb;
1287         else if (tc_ptr < v) {
1288             m_max = m - 1;
1289         } else {
1290             m_min = m + 1;
1291         }
1292     }
1293     return &tbs[m_max];
1294 }
1295 
1296 static void tb_reset_jump_recursive(TranslationBlock *tb);
1297 
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1298 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1299 {
1300     TranslationBlock *tb1, *tb_next, **ptb;
1301     unsigned int n1;
1302 
1303     tb1 = tb->jmp_next[n];
1304     if (tb1 != NULL) {
1305         /* find head of list */
1306         for(;;) {
1307             n1 = (long)tb1 & 3;
1308             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1309             if (n1 == 2)
1310                 break;
1311             tb1 = tb1->jmp_next[n1];
1312         }
1313         /* we are now sure now that tb jumps to tb1 */
1314         tb_next = tb1;
1315 
1316         /* remove tb from the jmp_first list */
1317         ptb = &tb_next->jmp_first;
1318         for(;;) {
1319             tb1 = *ptb;
1320             n1 = (long)tb1 & 3;
1321             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1322             if (n1 == n && tb1 == tb)
1323                 break;
1324             ptb = &tb1->jmp_next[n1];
1325         }
1326         *ptb = tb->jmp_next[n];
1327         tb->jmp_next[n] = NULL;
1328 
1329         /* suppress the jump to next tb in generated code */
1330         tb_reset_jump(tb, n);
1331 
1332         /* suppress jumps in the tb on which we could have jumped */
1333         tb_reset_jump_recursive(tb_next);
1334     }
1335 }
1336 
tb_reset_jump_recursive(TranslationBlock * tb)1337 static void tb_reset_jump_recursive(TranslationBlock *tb)
1338 {
1339     tb_reset_jump_recursive2(tb, 0);
1340     tb_reset_jump_recursive2(tb, 1);
1341 }
1342 
1343 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1344 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1345 {
1346     target_phys_addr_t addr;
1347     target_ulong pd;
1348     ram_addr_t ram_addr;
1349     PhysPageDesc *p;
1350 
1351     addr = cpu_get_phys_page_debug(env, pc);
1352     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1353     if (!p) {
1354         pd = IO_MEM_UNASSIGNED;
1355     } else {
1356         pd = p->phys_offset;
1357     }
1358     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1359     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1360 }
1361 #endif
1362 
1363 /* Add a watchpoint.  */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)1364 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1365                           int flags, CPUWatchpoint **watchpoint)
1366 {
1367     target_ulong len_mask = ~(len - 1);
1368     CPUWatchpoint *wp;
1369 
1370     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1371     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1372         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1373                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1374         return -EINVAL;
1375     }
1376     wp = qemu_malloc(sizeof(*wp));
1377 
1378     wp->vaddr = addr;
1379     wp->len_mask = len_mask;
1380     wp->flags = flags;
1381 
1382     /* keep all GDB-injected watchpoints in front */
1383     if (flags & BP_GDB)
1384         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1385     else
1386         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1387 
1388     tlb_flush_page(env, addr);
1389 
1390     if (watchpoint)
1391         *watchpoint = wp;
1392     return 0;
1393 }
1394 
1395 /* Remove a specific watchpoint.  */
cpu_watchpoint_remove(CPUState * env,target_ulong addr,target_ulong len,int flags)1396 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1397                           int flags)
1398 {
1399     target_ulong len_mask = ~(len - 1);
1400     CPUWatchpoint *wp;
1401 
1402     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1403         if (addr == wp->vaddr && len_mask == wp->len_mask
1404                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1405             cpu_watchpoint_remove_by_ref(env, wp);
1406             return 0;
1407         }
1408     }
1409     return -ENOENT;
1410 }
1411 
1412 /* Remove a specific watchpoint by reference.  */
cpu_watchpoint_remove_by_ref(CPUState * env,CPUWatchpoint * watchpoint)1413 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1414 {
1415     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1416 
1417     tlb_flush_page(env, watchpoint->vaddr);
1418 
1419     qemu_free(watchpoint);
1420 }
1421 
1422 /* Remove all matching watchpoints.  */
cpu_watchpoint_remove_all(CPUState * env,int mask)1423 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1424 {
1425     CPUWatchpoint *wp, *next;
1426 
1427     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1428         if (wp->flags & mask)
1429             cpu_watchpoint_remove_by_ref(env, wp);
1430     }
1431 }
1432 
1433 /* Add a breakpoint.  */
cpu_breakpoint_insert(CPUState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)1434 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1435                           CPUBreakpoint **breakpoint)
1436 {
1437 #if defined(TARGET_HAS_ICE)
1438     CPUBreakpoint *bp;
1439 
1440     bp = qemu_malloc(sizeof(*bp));
1441 
1442     bp->pc = pc;
1443     bp->flags = flags;
1444 
1445     /* keep all GDB-injected breakpoints in front */
1446     if (flags & BP_GDB)
1447         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1448     else
1449         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1450 
1451     breakpoint_invalidate(env, pc);
1452 
1453     if (breakpoint)
1454         *breakpoint = bp;
1455     return 0;
1456 #else
1457     return -ENOSYS;
1458 #endif
1459 }
1460 
1461 /* Remove a specific breakpoint.  */
cpu_breakpoint_remove(CPUState * env,target_ulong pc,int flags)1462 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1463 {
1464 #if defined(TARGET_HAS_ICE)
1465     CPUBreakpoint *bp;
1466 
1467     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1468         if (bp->pc == pc && bp->flags == flags) {
1469             cpu_breakpoint_remove_by_ref(env, bp);
1470             return 0;
1471         }
1472     }
1473     return -ENOENT;
1474 #else
1475     return -ENOSYS;
1476 #endif
1477 }
1478 
1479 /* Remove a specific breakpoint by reference.  */
cpu_breakpoint_remove_by_ref(CPUState * env,CPUBreakpoint * breakpoint)1480 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1481 {
1482 #if defined(TARGET_HAS_ICE)
1483     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1484 
1485     breakpoint_invalidate(env, breakpoint->pc);
1486 
1487     qemu_free(breakpoint);
1488 #endif
1489 }
1490 
1491 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * env,int mask)1492 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1493 {
1494 #if defined(TARGET_HAS_ICE)
1495     CPUBreakpoint *bp, *next;
1496 
1497     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1498         if (bp->flags & mask)
1499             cpu_breakpoint_remove_by_ref(env, bp);
1500     }
1501 #endif
1502 }
1503 
1504 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1505    CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1506 void cpu_single_step(CPUState *env, int enabled)
1507 {
1508 #if defined(TARGET_HAS_ICE)
1509     if (env->singlestep_enabled != enabled) {
1510         env->singlestep_enabled = enabled;
1511         if (kvm_enabled())
1512             kvm_update_guest_debug(env, 0);
1513         else {
1514             /* must flush all the translated code to avoid inconsistencies */
1515             /* XXX: only flush what is necessary */
1516             tb_flush(env);
1517         }
1518     }
1519 #endif
1520 }
1521 
1522 /* enable or disable low levels log */
cpu_set_log(int log_flags)1523 void cpu_set_log(int log_flags)
1524 {
1525     loglevel = log_flags;
1526     if (loglevel && !logfile) {
1527         logfile = fopen(logfilename, log_append ? "a" : "w");
1528         if (!logfile) {
1529             perror(logfilename);
1530             exit(1);
1531         }
1532 #if !defined(CONFIG_SOFTMMU)
1533         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1534         {
1535             static char logfile_buf[4096];
1536             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1537         }
1538 #elif !defined(_WIN32)
1539         /* Win32 doesn't support line-buffering and requires size >= 2 */
1540         setvbuf(logfile, NULL, _IOLBF, 0);
1541 #endif
1542         log_append = 1;
1543     }
1544     if (!loglevel && logfile) {
1545         fclose(logfile);
1546         logfile = NULL;
1547     }
1548 }
1549 
cpu_set_log_filename(const char * filename)1550 void cpu_set_log_filename(const char *filename)
1551 {
1552     logfilename = strdup(filename);
1553     if (logfile) {
1554         fclose(logfile);
1555         logfile = NULL;
1556     }
1557     cpu_set_log(loglevel);
1558 }
1559 
cpu_unlink_tb(CPUState * env)1560 static void cpu_unlink_tb(CPUState *env)
1561 {
1562     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1563        problem and hope the cpu will stop of its own accord.  For userspace
1564        emulation this often isn't actually as bad as it sounds.  Often
1565        signals are used primarily to interrupt blocking syscalls.  */
1566     TranslationBlock *tb;
1567     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1568 
1569     spin_lock(&interrupt_lock);
1570     tb = env->current_tb;
1571     /* if the cpu is currently executing code, we must unlink it and
1572        all the potentially executing TB */
1573     if (tb) {
1574         env->current_tb = NULL;
1575         tb_reset_jump_recursive(tb);
1576     }
1577     spin_unlock(&interrupt_lock);
1578 }
1579 
1580 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1581 void cpu_interrupt(CPUState *env, int mask)
1582 {
1583     int old_mask;
1584 
1585     old_mask = env->interrupt_request;
1586     env->interrupt_request |= mask;
1587 
1588 #ifndef CONFIG_USER_ONLY
1589     /*
1590      * If called from iothread context, wake the target cpu in
1591      * case its halted.
1592      */
1593     if (!qemu_cpu_self(env)) {
1594         qemu_cpu_kick(env);
1595         return;
1596     }
1597 #endif
1598 
1599     if (use_icount) {
1600         env->icount_decr.u16.high = 0xffff;
1601 #ifndef CONFIG_USER_ONLY
1602         if (!can_do_io(env)
1603             && (mask & ~old_mask) != 0) {
1604             cpu_abort(env, "Raised interrupt while not in I/O function");
1605         }
1606 #endif
1607     } else {
1608         cpu_unlink_tb(env);
1609     }
1610 }
1611 
cpu_reset_interrupt(CPUState * env,int mask)1612 void cpu_reset_interrupt(CPUState *env, int mask)
1613 {
1614     env->interrupt_request &= ~mask;
1615 }
1616 
cpu_exit(CPUState * env)1617 void cpu_exit(CPUState *env)
1618 {
1619     env->exit_request = 1;
1620     cpu_unlink_tb(env);
1621 }
1622 
1623 const CPULogItem cpu_log_items[] = {
1624     { CPU_LOG_TB_OUT_ASM, "out_asm",
1625       "show generated host assembly code for each compiled TB" },
1626     { CPU_LOG_TB_IN_ASM, "in_asm",
1627       "show target assembly code for each compiled TB" },
1628     { CPU_LOG_TB_OP, "op",
1629       "show micro ops for each compiled TB" },
1630     { CPU_LOG_TB_OP_OPT, "op_opt",
1631       "show micro ops "
1632 #ifdef TARGET_I386
1633       "before eflags optimization and "
1634 #endif
1635       "after liveness analysis" },
1636     { CPU_LOG_INT, "int",
1637       "show interrupts/exceptions in short format" },
1638     { CPU_LOG_EXEC, "exec",
1639       "show trace before each executed TB (lots of logs)" },
1640     { CPU_LOG_TB_CPU, "cpu",
1641       "show CPU state before block translation" },
1642 #ifdef TARGET_I386
1643     { CPU_LOG_PCALL, "pcall",
1644       "show protected mode far calls/returns/exceptions" },
1645     { CPU_LOG_RESET, "cpu_reset",
1646       "show CPU state before CPU resets" },
1647 #endif
1648 #ifdef DEBUG_IOPORT
1649     { CPU_LOG_IOPORT, "ioport",
1650       "show all i/o ports accesses" },
1651 #endif
1652     { 0, NULL, NULL },
1653 };
1654 
cmp1(const char * s1,int n,const char * s2)1655 static int cmp1(const char *s1, int n, const char *s2)
1656 {
1657     if (strlen(s2) != n)
1658         return 0;
1659     return memcmp(s1, s2, n) == 0;
1660 }
1661 
1662 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1663 int cpu_str_to_log_mask(const char *str)
1664 {
1665     const CPULogItem *item;
1666     int mask;
1667     const char *p, *p1;
1668 
1669     p = str;
1670     mask = 0;
1671     for(;;) {
1672         p1 = strchr(p, ',');
1673         if (!p1)
1674             p1 = p + strlen(p);
1675 	if(cmp1(p,p1-p,"all")) {
1676 		for(item = cpu_log_items; item->mask != 0; item++) {
1677 			mask |= item->mask;
1678 		}
1679 	} else {
1680         for(item = cpu_log_items; item->mask != 0; item++) {
1681             if (cmp1(p, p1 - p, item->name))
1682                 goto found;
1683         }
1684         return 0;
1685 	}
1686     found:
1687         mask |= item->mask;
1688         if (*p1 != ',')
1689             break;
1690         p = p1 + 1;
1691     }
1692     return mask;
1693 }
1694 
cpu_abort(CPUState * env,const char * fmt,...)1695 void cpu_abort(CPUState *env, const char *fmt, ...)
1696 {
1697     va_list ap;
1698     va_list ap2;
1699 
1700     va_start(ap, fmt);
1701     va_copy(ap2, ap);
1702     fprintf(stderr, "qemu: fatal: ");
1703     vfprintf(stderr, fmt, ap);
1704     fprintf(stderr, "\n");
1705 #ifdef TARGET_I386
1706     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1707 #else
1708     cpu_dump_state(env, stderr, fprintf, 0);
1709 #endif
1710     if (qemu_log_enabled()) {
1711         qemu_log("qemu: fatal: ");
1712         qemu_log_vprintf(fmt, ap2);
1713         qemu_log("\n");
1714 #ifdef TARGET_I386
1715         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1716 #else
1717         log_cpu_state(env, 0);
1718 #endif
1719         qemu_log_flush();
1720         qemu_log_close();
1721     }
1722     va_end(ap2);
1723     va_end(ap);
1724 #if defined(CONFIG_USER_ONLY)
1725     {
1726         struct sigaction act;
1727         sigfillset(&act.sa_mask);
1728         act.sa_handler = SIG_DFL;
1729         sigaction(SIGABRT, &act, NULL);
1730     }
1731 #endif
1732     abort();
1733 }
1734 
cpu_copy(CPUState * env)1735 CPUState *cpu_copy(CPUState *env)
1736 {
1737     CPUState *new_env = cpu_init(env->cpu_model_str);
1738     CPUState *next_cpu = new_env->next_cpu;
1739     int cpu_index = new_env->cpu_index;
1740 #if defined(TARGET_HAS_ICE)
1741     CPUBreakpoint *bp;
1742     CPUWatchpoint *wp;
1743 #endif
1744 
1745     memcpy(new_env, env, sizeof(CPUState));
1746 
1747     /* Preserve chaining and index. */
1748     new_env->next_cpu = next_cpu;
1749     new_env->cpu_index = cpu_index;
1750 
1751     /* Clone all break/watchpoints.
1752        Note: Once we support ptrace with hw-debug register access, make sure
1753        BP_CPU break/watchpoints are handled correctly on clone. */
1754     QTAILQ_INIT(&env->breakpoints);
1755     QTAILQ_INIT(&env->watchpoints);
1756 #if defined(TARGET_HAS_ICE)
1757     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1758         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1759     }
1760     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1761         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1762                               wp->flags, NULL);
1763     }
1764 #endif
1765 
1766     return new_env;
1767 }
1768 
1769 #if !defined(CONFIG_USER_ONLY)
1770 
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1771 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1772 {
1773     unsigned int i;
1774 
1775     /* Discard jump cache entries for any tb which might potentially
1776        overlap the flushed page.  */
1777     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1778     memset (&env->tb_jmp_cache[i], 0,
1779             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1780 
1781     i = tb_jmp_cache_hash_page(addr);
1782     memset (&env->tb_jmp_cache[i], 0,
1783             TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1784 }
1785 
1786 /* NOTE: if flush_global is true, also flush global entries (not
1787    implemented yet) */
tlb_flush(CPUState * env,int flush_global)1788 void tlb_flush(CPUState *env, int flush_global)
1789 {
1790     int i;
1791 
1792 #if defined(DEBUG_TLB)
1793     printf("tlb_flush:\n");
1794 #endif
1795     /* must reset current TB so that interrupts cannot modify the
1796        links while we are modifying them */
1797     env->current_tb = NULL;
1798 
1799     for(i = 0; i < CPU_TLB_SIZE; i++) {
1800         int mmu_idx;
1801         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1802             env->tlb_table[mmu_idx][i].addr_read = -1;
1803             env->tlb_table[mmu_idx][i].addr_write = -1;
1804             env->tlb_table[mmu_idx][i].addr_code = -1;
1805         }
1806     }
1807 
1808     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1809 
1810 #ifdef CONFIG_KQEMU
1811     if (env->kqemu_enabled) {
1812         kqemu_flush(env, flush_global);
1813     }
1814 #endif
1815     tlb_flush_count++;
1816 }
1817 
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1818 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1819 {
1820     if (addr == (tlb_entry->addr_read &
1821                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1822         addr == (tlb_entry->addr_write &
1823                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1824         addr == (tlb_entry->addr_code &
1825                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1826         tlb_entry->addr_read = -1;
1827         tlb_entry->addr_write = -1;
1828         tlb_entry->addr_code = -1;
1829     }
1830 }
1831 
tlb_flush_page(CPUState * env,target_ulong addr)1832 void tlb_flush_page(CPUState *env, target_ulong addr)
1833 {
1834     int i;
1835     int mmu_idx;
1836 
1837 #if defined(DEBUG_TLB)
1838     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1839 #endif
1840     /* must reset current TB so that interrupts cannot modify the
1841        links while we are modifying them */
1842     env->current_tb = NULL;
1843 
1844     addr &= TARGET_PAGE_MASK;
1845     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1846     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1847         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1848 
1849     tlb_flush_jmp_cache(env, addr);
1850 }
1851 
1852 /* update the TLBs so that writes to code in the virtual page 'addr'
1853    can be detected */
tlb_protect_code(ram_addr_t ram_addr)1854 static void tlb_protect_code(ram_addr_t ram_addr)
1855 {
1856     cpu_physical_memory_reset_dirty(ram_addr,
1857                                     ram_addr + TARGET_PAGE_SIZE,
1858                                     CODE_DIRTY_FLAG);
1859 }
1860 
1861 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1862    tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1863 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1864                                     target_ulong vaddr)
1865 {
1866     cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1867 }
1868 
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1869 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1870                                          unsigned long start, unsigned long length)
1871 {
1872     unsigned long addr;
1873     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1874         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1875         if ((addr - start) < length) {
1876             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1877         }
1878     }
1879 }
1880 
1881 /* Note: start and end must be within the same ram block.  */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1882 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1883                                      int dirty_flags)
1884 {
1885     CPUState *env;
1886     unsigned long length, start1;
1887     int i;
1888 
1889     start &= TARGET_PAGE_MASK;
1890     end = TARGET_PAGE_ALIGN(end);
1891 
1892     length = end - start;
1893     if (length == 0)
1894         return;
1895     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1896 
1897     /* we modify the TLB cache so that the dirty bit will be set again
1898        when accessing the range */
1899     start1 = (unsigned long)qemu_safe_ram_ptr(start);
1900     /* Chek that we don't span multiple blocks - this breaks the
1901        address comparisons below.  */
1902     if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1903             != (end - 1) - start) {
1904         abort();
1905     }
1906 
1907     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1908         int mmu_idx;
1909         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1910             for(i = 0; i < CPU_TLB_SIZE; i++)
1911                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1912                                       start1, length);
1913         }
1914     }
1915 }
1916 
cpu_physical_memory_set_dirty_tracking(int enable)1917 int cpu_physical_memory_set_dirty_tracking(int enable)
1918 {
1919     in_migration = enable;
1920     if (kvm_enabled()) {
1921         return kvm_set_migration_log(enable);
1922     }
1923     return 0;
1924 }
1925 
cpu_physical_memory_get_dirty_tracking(void)1926 int cpu_physical_memory_get_dirty_tracking(void)
1927 {
1928     return in_migration;
1929 }
1930 
cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)1931 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1932                                    target_phys_addr_t end_addr)
1933 {
1934     int ret = 0;
1935 
1936     if (kvm_enabled())
1937         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1938     return ret;
1939 }
1940 
tlb_update_dirty(CPUTLBEntry * tlb_entry)1941 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1942 {
1943     ram_addr_t ram_addr;
1944     void *p;
1945 
1946     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1947         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1948             + tlb_entry->addend);
1949         ram_addr = qemu_ram_addr_from_host_nofail(p);
1950         if (!cpu_physical_memory_is_dirty(ram_addr)) {
1951             tlb_entry->addr_write |= TLB_NOTDIRTY;
1952         }
1953     }
1954 }
1955 
1956 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1957 void cpu_tlb_update_dirty(CPUState *env)
1958 {
1959     int i;
1960     int mmu_idx;
1961     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1962         for(i = 0; i < CPU_TLB_SIZE; i++)
1963             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1964     }
1965 }
1966 
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1967 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1968 {
1969     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1970         tlb_entry->addr_write = vaddr;
1971 }
1972 
1973 /* update the TLB corresponding to virtual page vaddr
1974    so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1975 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1976 {
1977     int i;
1978     int mmu_idx;
1979 
1980     vaddr &= TARGET_PAGE_MASK;
1981     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1982     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1983         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1984 }
1985 
1986 /* add a new TLB entry. At most one entry for a given virtual address
1987    is permitted. Return 0 if OK or 2 if the page could not be mapped
1988    (can only happen in non SOFTMMU mode for I/O pages or pages
1989    conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1990 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1991                       target_phys_addr_t paddr, int prot,
1992                       int mmu_idx, int is_softmmu)
1993 {
1994     PhysPageDesc *p;
1995     unsigned long pd;
1996     unsigned int index;
1997     target_ulong address;
1998     target_ulong code_address;
1999     ptrdiff_t addend;
2000     int ret;
2001     CPUTLBEntry *te;
2002     CPUWatchpoint *wp;
2003     target_phys_addr_t iotlb;
2004 
2005     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2006     if (!p) {
2007         pd = IO_MEM_UNASSIGNED;
2008     } else {
2009         pd = p->phys_offset;
2010     }
2011 #if defined(DEBUG_TLB)
2012     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2013            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2014 #endif
2015 
2016     ret = 0;
2017     address = vaddr;
2018     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2019         /* IO memory case (romd handled later) */
2020         address |= TLB_MMIO;
2021     }
2022     addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2023     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2024         /* Normal RAM.  */
2025         iotlb = pd & TARGET_PAGE_MASK;
2026         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2027             iotlb |= IO_MEM_NOTDIRTY;
2028         else
2029             iotlb |= IO_MEM_ROM;
2030     } else {
2031         /* IO handlers are currently passed a physical address.
2032            It would be nice to pass an offset from the base address
2033            of that region.  This would avoid having to special case RAM,
2034            and avoid full address decoding in every device.
2035            We can't use the high bits of pd for this because
2036            IO_MEM_ROMD uses these as a ram address.  */
2037         iotlb = (pd & ~TARGET_PAGE_MASK);
2038         if (p) {
2039             iotlb += p->region_offset;
2040         } else {
2041             iotlb += paddr;
2042         }
2043     }
2044 
2045     code_address = address;
2046     /* Make accesses to pages with watchpoints go via the
2047        watchpoint trap routines.  */
2048     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2049         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2050             iotlb = io_mem_watch + paddr;
2051             /* TODO: The memory case can be optimized by not trapping
2052                reads of pages with a write breakpoint.  */
2053             address |= TLB_MMIO;
2054         }
2055     }
2056 
2057     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2058     env->iotlb[mmu_idx][index] = iotlb - vaddr;
2059     te = &env->tlb_table[mmu_idx][index];
2060     te->addend = addend - vaddr;
2061     if (prot & PAGE_READ) {
2062         te->addr_read = address;
2063     } else {
2064         te->addr_read = -1;
2065     }
2066 
2067     if (prot & PAGE_EXEC) {
2068         te->addr_code = code_address;
2069     } else {
2070         te->addr_code = -1;
2071     }
2072     if (prot & PAGE_WRITE) {
2073         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2074             (pd & IO_MEM_ROMD)) {
2075             /* Write access calls the I/O callback.  */
2076             te->addr_write = address | TLB_MMIO;
2077         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2078                    !cpu_physical_memory_is_dirty(pd)) {
2079             te->addr_write = address | TLB_NOTDIRTY;
2080         } else {
2081             te->addr_write = address;
2082         }
2083     } else {
2084         te->addr_write = -1;
2085     }
2086 
2087 #ifdef CONFIG_MEMCHECK
2088     /*
2089      * If we have memchecker running, we need to make sure that page, cached
2090      * into TLB as the result of this operation will comply with our requirement
2091      * to cause __ld/__stx_mmu being called for memory access on the pages
2092      * containing memory blocks that require access violation checks.
2093      *
2094      * We need to check with memory checker if we should invalidate this page
2095      * iff:
2096      *  - Memchecking is enabled.
2097      *  - Page that's been cached belongs to the user space.
2098      *  - Request to cache this page didn't come from softmmu. We're covered
2099      *    there, because after page was cached here we will invalidate it in
2100      *    the __ld/__stx_mmu wrapper.
2101      *  - Cached page belongs to RAM, not I/O area.
2102      *  - Page is cached for read, or write access.
2103      */
2104     if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
2105         (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2106         (prot & (PAGE_READ | PAGE_WRITE)) &&
2107         memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
2108         if (prot & PAGE_READ) {
2109             te->addr_read ^= TARGET_PAGE_MASK;
2110         }
2111         if (prot & PAGE_WRITE) {
2112             te->addr_write ^= TARGET_PAGE_MASK;
2113         }
2114     }
2115 #endif  // CONFIG_MEMCHECK
2116 
2117     return ret;
2118 }
2119 
2120 #else
2121 
tlb_flush(CPUState * env,int flush_global)2122 void tlb_flush(CPUState *env, int flush_global)
2123 {
2124 }
2125 
tlb_flush_page(CPUState * env,target_ulong addr)2126 void tlb_flush_page(CPUState *env, target_ulong addr)
2127 {
2128 }
2129 
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)2130 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2131                       target_phys_addr_t paddr, int prot,
2132                       int mmu_idx, int is_softmmu)
2133 {
2134     return 0;
2135 }
2136 
2137 /*
2138  * Walks guest process memory "regions" one by one
2139  * and calls callback function 'fn' for each region.
2140  */
walk_memory_regions(void * priv,int (* fn)(void *,unsigned long,unsigned long,unsigned long))2141 int walk_memory_regions(void *priv,
2142     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2143 {
2144     unsigned long start, end;
2145     PageDesc *p = NULL;
2146     int i, j, prot, prot1;
2147     int rc = 0;
2148 
2149     start = end = -1;
2150     prot = 0;
2151 
2152     for (i = 0; i <= L1_SIZE; i++) {
2153         p = (i < L1_SIZE) ? l1_map[i] : NULL;
2154         for (j = 0; j < L2_SIZE; j++) {
2155             prot1 = (p == NULL) ? 0 : p[j].flags;
2156             /*
2157              * "region" is one continuous chunk of memory
2158              * that has same protection flags set.
2159              */
2160             if (prot1 != prot) {
2161                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2162                 if (start != -1) {
2163                     rc = (*fn)(priv, start, end, prot);
2164                     /* callback can stop iteration by returning != 0 */
2165                     if (rc != 0)
2166                         return (rc);
2167                 }
2168                 if (prot1 != 0)
2169                     start = end;
2170                 else
2171                     start = -1;
2172                 prot = prot1;
2173             }
2174             if (p == NULL)
2175                 break;
2176         }
2177     }
2178     return (rc);
2179 }
2180 
dump_region(void * priv,unsigned long start,unsigned long end,unsigned long prot)2181 static int dump_region(void *priv, unsigned long start,
2182     unsigned long end, unsigned long prot)
2183 {
2184     FILE *f = (FILE *)priv;
2185 
2186     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2187         start, end, end - start,
2188         ((prot & PAGE_READ) ? 'r' : '-'),
2189         ((prot & PAGE_WRITE) ? 'w' : '-'),
2190         ((prot & PAGE_EXEC) ? 'x' : '-'));
2191 
2192     return (0);
2193 }
2194 
2195 /* dump memory mappings */
page_dump(FILE * f)2196 void page_dump(FILE *f)
2197 {
2198     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2199             "start", "end", "size", "prot");
2200     walk_memory_regions(f, dump_region);
2201 }
2202 
page_get_flags(target_ulong address)2203 int page_get_flags(target_ulong address)
2204 {
2205     PageDesc *p;
2206 
2207     p = page_find(address >> TARGET_PAGE_BITS);
2208     if (!p)
2209         return 0;
2210     return p->flags;
2211 }
2212 
2213 /* Modify the flags of a page and invalidate the code if necessary.
2214    The flag PAGE_WRITE_ORG is positioned automatically depending
2215    on PAGE_WRITE.  The mmap_lock should already be held.  */
page_set_flags(target_ulong start,target_ulong end,int flags)2216 void page_set_flags(target_ulong start, target_ulong end, int flags)
2217 {
2218     PageDesc *p;
2219     target_ulong addr;
2220 
2221     /* mmap_lock should already be held.  */
2222     start = start & TARGET_PAGE_MASK;
2223     end = TARGET_PAGE_ALIGN(end);
2224     if (flags & PAGE_WRITE)
2225         flags |= PAGE_WRITE_ORG;
2226     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2227         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2228         /* We may be called for host regions that are outside guest
2229            address space.  */
2230         if (!p)
2231             return;
2232         /* if the write protection is set, then we invalidate the code
2233            inside */
2234         if (!(p->flags & PAGE_WRITE) &&
2235             (flags & PAGE_WRITE) &&
2236             p->first_tb) {
2237             tb_invalidate_phys_page(addr, 0, NULL);
2238         }
2239         p->flags = flags;
2240     }
2241 }
2242 
page_check_range(target_ulong start,target_ulong len,int flags)2243 int page_check_range(target_ulong start, target_ulong len, int flags)
2244 {
2245     PageDesc *p;
2246     target_ulong end;
2247     target_ulong addr;
2248 
2249     if (start + len < start)
2250         /* we've wrapped around */
2251         return -1;
2252 
2253     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2254     start = start & TARGET_PAGE_MASK;
2255 
2256     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2257         p = page_find(addr >> TARGET_PAGE_BITS);
2258         if( !p )
2259             return -1;
2260         if( !(p->flags & PAGE_VALID) )
2261             return -1;
2262 
2263         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2264             return -1;
2265         if (flags & PAGE_WRITE) {
2266             if (!(p->flags & PAGE_WRITE_ORG))
2267                 return -1;
2268             /* unprotect the page if it was put read-only because it
2269                contains translated code */
2270             if (!(p->flags & PAGE_WRITE)) {
2271                 if (!page_unprotect(addr, 0, NULL))
2272                     return -1;
2273             }
2274             return 0;
2275         }
2276     }
2277     return 0;
2278 }
2279 
2280 /* called from signal handler: invalidate the code and unprotect the
2281    page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2282 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2283 {
2284     unsigned int page_index, prot, pindex;
2285     PageDesc *p, *p1;
2286     target_ulong host_start, host_end, addr;
2287 
2288     /* Technically this isn't safe inside a signal handler.  However we
2289        know this only ever happens in a synchronous SEGV handler, so in
2290        practice it seems to be ok.  */
2291     mmap_lock();
2292 
2293     host_start = address & qemu_host_page_mask;
2294     page_index = host_start >> TARGET_PAGE_BITS;
2295     p1 = page_find(page_index);
2296     if (!p1) {
2297         mmap_unlock();
2298         return 0;
2299     }
2300     host_end = host_start + qemu_host_page_size;
2301     p = p1;
2302     prot = 0;
2303     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2304         prot |= p->flags;
2305         p++;
2306     }
2307     /* if the page was really writable, then we change its
2308        protection back to writable */
2309     if (prot & PAGE_WRITE_ORG) {
2310         pindex = (address - host_start) >> TARGET_PAGE_BITS;
2311         if (!(p1[pindex].flags & PAGE_WRITE)) {
2312             mprotect((void *)g2h(host_start), qemu_host_page_size,
2313                      (prot & PAGE_BITS) | PAGE_WRITE);
2314             p1[pindex].flags |= PAGE_WRITE;
2315             /* and since the content will be modified, we must invalidate
2316                the corresponding translated code. */
2317             tb_invalidate_phys_page(address, pc, puc);
2318 #ifdef DEBUG_TB_CHECK
2319             tb_invalidate_check(address);
2320 #endif
2321             mmap_unlock();
2322             return 1;
2323         }
2324     }
2325     mmap_unlock();
2326     return 0;
2327 }
2328 
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2329 static inline void tlb_set_dirty(CPUState *env,
2330                                  unsigned long addr, target_ulong vaddr)
2331 {
2332 }
2333 #endif /* defined(CONFIG_USER_ONLY) */
2334 
2335 #if !defined(CONFIG_USER_ONLY)
2336 
2337 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2338                              ram_addr_t memory, ram_addr_t region_offset);
2339 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2340                            ram_addr_t orig_memory, ram_addr_t region_offset);
2341 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2342                       need_subpage)                                     \
2343     do {                                                                \
2344         if (addr > start_addr)                                          \
2345             start_addr2 = 0;                                            \
2346         else {                                                          \
2347             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2348             if (start_addr2 > 0)                                        \
2349                 need_subpage = 1;                                       \
2350         }                                                               \
2351                                                                         \
2352         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2353             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2354         else {                                                          \
2355             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2356             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2357                 need_subpage = 1;                                       \
2358         }                                                               \
2359     } while (0)
2360 
2361 /* register physical memory.
2362    For RAM, 'size' must be a multiple of the target page size.
2363    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2364    io memory page.  The address used when calling the IO function is
2365    the offset from the start of the region, plus region_offset.  Both
2366    start_addr and region_offset are rounded down to a page boundary
2367    before calculating this offset.  This should not be a problem unless
2368    the low bits of start_addr and region_offset differ.  */
cpu_register_physical_memory_log(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset,bool log_dirty)2369 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2370                                          ram_addr_t size,
2371                                          ram_addr_t phys_offset,
2372                                          ram_addr_t region_offset,
2373                                          bool log_dirty)
2374 {
2375     target_phys_addr_t addr, end_addr;
2376     PhysPageDesc *p;
2377     CPUState *env;
2378     ram_addr_t orig_size = size;
2379     subpage_t *subpage;
2380 
2381     if (kvm_enabled())
2382         kvm_set_phys_mem(start_addr, size, phys_offset);
2383 #ifdef CONFIG_HAX
2384     if (hax_enabled())
2385         hax_set_phys_mem(start_addr, size, phys_offset);
2386 #endif
2387 
2388     if (phys_offset == IO_MEM_UNASSIGNED) {
2389         region_offset = start_addr;
2390     }
2391     region_offset &= TARGET_PAGE_MASK;
2392     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2393     end_addr = start_addr + (target_phys_addr_t)size;
2394 
2395     addr = start_addr;
2396     do {
2397         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2398         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2399             ram_addr_t orig_memory = p->phys_offset;
2400             target_phys_addr_t start_addr2, end_addr2;
2401             int need_subpage = 0;
2402 
2403             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2404                           need_subpage);
2405             if (need_subpage) {
2406                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2407                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2408                                            &p->phys_offset, orig_memory,
2409                                            p->region_offset);
2410                 } else {
2411                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2412                                             >> IO_MEM_SHIFT];
2413                 }
2414                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2415                                  region_offset);
2416                 p->region_offset = 0;
2417             } else {
2418                 p->phys_offset = phys_offset;
2419                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2420                     (phys_offset & IO_MEM_ROMD))
2421                     phys_offset += TARGET_PAGE_SIZE;
2422             }
2423         } else {
2424             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2425             p->phys_offset = phys_offset;
2426             p->region_offset = region_offset;
2427             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2428                 (phys_offset & IO_MEM_ROMD)) {
2429                 phys_offset += TARGET_PAGE_SIZE;
2430             } else {
2431                 target_phys_addr_t start_addr2, end_addr2;
2432                 int need_subpage = 0;
2433 
2434                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2435                               end_addr2, need_subpage);
2436 
2437                 if (need_subpage) {
2438                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2439                                            &p->phys_offset, IO_MEM_UNASSIGNED,
2440                                            addr & TARGET_PAGE_MASK);
2441                     subpage_register(subpage, start_addr2, end_addr2,
2442                                      phys_offset, region_offset);
2443                     p->region_offset = 0;
2444                 }
2445             }
2446         }
2447         region_offset += TARGET_PAGE_SIZE;
2448         addr += TARGET_PAGE_SIZE;
2449     } while (addr != end_addr);
2450 
2451     /* since each CPU stores ram addresses in its TLB cache, we must
2452        reset the modified entries */
2453     /* XXX: slow ! */
2454     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2455         tlb_flush(env, 1);
2456     }
2457 }
2458 
2459 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2460 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2461 {
2462     PhysPageDesc *p;
2463 
2464     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2465     if (!p)
2466         return IO_MEM_UNASSIGNED;
2467     return p->phys_offset;
2468 }
2469 
qemu_register_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2470 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2471 {
2472     if (kvm_enabled())
2473         kvm_coalesce_mmio_region(addr, size);
2474 }
2475 
qemu_unregister_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2476 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2477 {
2478     if (kvm_enabled())
2479         kvm_uncoalesce_mmio_region(addr, size);
2480 }
2481 
find_ram_offset(ram_addr_t size)2482 static ram_addr_t find_ram_offset(ram_addr_t size)
2483 {
2484     RAMBlock *block, *next_block;
2485     ram_addr_t offset = 0, mingap = ULONG_MAX;
2486 
2487     if (QLIST_EMPTY(&ram_list.blocks))
2488         return 0;
2489 
2490     QLIST_FOREACH(block, &ram_list.blocks, next) {
2491         ram_addr_t end, next = ULONG_MAX;
2492 
2493         end = block->offset + block->length;
2494 
2495         QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2496             if (next_block->offset >= end) {
2497                 next = MIN(next, next_block->offset);
2498             }
2499         }
2500         if (next - end >= size && next - end < mingap) {
2501             offset =  end;
2502             mingap = next - end;
2503         }
2504     }
2505     return offset;
2506 }
2507 
last_ram_offset(void)2508 static ram_addr_t last_ram_offset(void)
2509 {
2510     RAMBlock *block;
2511     ram_addr_t last = 0;
2512 
2513     QLIST_FOREACH(block, &ram_list.blocks, next)
2514         last = MAX(last, block->offset + block->length);
2515 
2516     return last;
2517 }
2518 
qemu_ram_alloc_from_ptr(DeviceState * dev,const char * name,ram_addr_t size,void * host)2519 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2520                                    ram_addr_t size, void *host)
2521 {
2522     RAMBlock *new_block, *block;
2523 
2524     size = TARGET_PAGE_ALIGN(size);
2525     new_block = qemu_mallocz(sizeof(*new_block));
2526 
2527 #if 0
2528     if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2529         char *id = dev->parent_bus->info->get_dev_path(dev);
2530         if (id) {
2531             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2532             qemu_free(id);
2533         }
2534     }
2535 #endif
2536     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2537 
2538     QLIST_FOREACH(block, &ram_list.blocks, next) {
2539         if (!strcmp(block->idstr, new_block->idstr)) {
2540             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2541                     new_block->idstr);
2542             abort();
2543         }
2544     }
2545 
2546     if (host) {
2547         new_block->host = host;
2548         new_block->flags |= RAM_PREALLOC_MASK;
2549     } else {
2550         if (mem_path) {
2551 #if 0 && defined (__linux__) && !defined(TARGET_S390X)
2552             new_block->host = file_ram_alloc(new_block, size, mem_path);
2553             if (!new_block->host) {
2554                 new_block->host = qemu_vmalloc(size);
2555                 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2556             }
2557 #else
2558             fprintf(stderr, "-mem-path option unsupported\n");
2559             exit(1);
2560 #endif
2561         } else {
2562 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2563             /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2564             new_block->host = mmap((void*)0x1000000, size,
2565                                    PROT_EXEC|PROT_READ|PROT_WRITE,
2566                                    MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2567 #else
2568             new_block->host = qemu_vmalloc(size);
2569 
2570 #ifdef CONFIG_HAX
2571         /*
2572          * In HAX, qemu allocates the virtual address, and HAX kernel
2573          * module populates the region with physical memory. Currently
2574          * we don’t populate guest memory on demand, thus we should
2575          * make sure that sufficient amount of memory is available in
2576          * advance.
2577          */
2578         if (hax_enabled())
2579         {
2580             int ret;
2581             ret = hax_populate_ram((uint64_t)new_block->host, size);
2582             if (ret < 0)
2583             {
2584                 fprintf(stderr, "Hax failed to populate ram\n");
2585                 exit(-1);
2586             }
2587         }
2588 #endif
2589 
2590 #endif
2591 #ifdef MADV_MERGEABLE
2592             madvise(new_block->host, size, MADV_MERGEABLE);
2593 #endif
2594         }
2595     }
2596 
2597     new_block->offset = find_ram_offset(size);
2598     new_block->length = size;
2599 
2600     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2601 
2602     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2603                                        last_ram_offset() >> TARGET_PAGE_BITS);
2604     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2605            0xff, size >> TARGET_PAGE_BITS);
2606 
2607     if (kvm_enabled())
2608         kvm_setup_guest_memory(new_block->host, size);
2609 
2610     return new_block->offset;
2611 }
2612 
qemu_ram_alloc(DeviceState * dev,const char * name,ram_addr_t size)2613 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2614 {
2615     return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2616 }
2617 
qemu_ram_free(ram_addr_t addr)2618 void qemu_ram_free(ram_addr_t addr)
2619 {
2620     RAMBlock *block;
2621 
2622     QLIST_FOREACH(block, &ram_list.blocks, next) {
2623         if (addr == block->offset) {
2624             QLIST_REMOVE(block, next);
2625             if (block->flags & RAM_PREALLOC_MASK) {
2626                 ;
2627             } else if (mem_path) {
2628 #if defined (__linux__) && !defined(TARGET_S390X)
2629                 if (block->fd) {
2630                     munmap(block->host, block->length);
2631                     close(block->fd);
2632                 } else {
2633                     qemu_vfree(block->host);
2634                 }
2635 #else
2636                 abort();
2637 #endif
2638             } else {
2639 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2640                 munmap(block->host, block->length);
2641 #else
2642                 qemu_vfree(block->host);
2643 #endif
2644             }
2645             qemu_free(block);
2646             return;
2647         }
2648     }
2649 
2650 }
2651 
2652 #ifndef _WIN32
qemu_ram_remap(ram_addr_t addr,ram_addr_t length)2653 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2654 {
2655 #ifndef CONFIG_ANDROID
2656     RAMBlock *block;
2657     ram_addr_t offset;
2658     int flags;
2659     void *area, *vaddr;
2660 
2661     QLIST_FOREACH(block, &ram_list.blocks, next) {
2662         offset = addr - block->offset;
2663         if (offset < block->length) {
2664             vaddr = block->host + offset;
2665             if (block->flags & RAM_PREALLOC_MASK) {
2666                 ;
2667             } else {
2668                 flags = MAP_FIXED;
2669                 munmap(vaddr, length);
2670                 if (mem_path) {
2671 #if defined(__linux__) && !defined(TARGET_S390X)
2672                     if (block->fd) {
2673 #ifdef MAP_POPULATE
2674                         flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2675                             MAP_PRIVATE;
2676 #else
2677                         flags |= MAP_PRIVATE;
2678 #endif
2679                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2680                                     flags, block->fd, offset);
2681                     } else {
2682                         flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2683                         area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2684                                     flags, -1, 0);
2685                     }
2686 #else
2687                     abort();
2688 #endif
2689                 } else {
2690 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2691                     flags |= MAP_SHARED | MAP_ANONYMOUS;
2692                     area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2693                                 flags, -1, 0);
2694 #else
2695                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2696                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2697                                 flags, -1, 0);
2698 #endif
2699                 }
2700                 if (area != vaddr) {
2701                     fprintf(stderr, "Could not remap addr: %lx@%lx\n",
2702                             length, addr);
2703                     exit(1);
2704                 }
2705                 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2706             }
2707             return;
2708         }
2709     }
2710 #endif /* !CONFIG_ANDROID */
2711 }
2712 #endif /* !_WIN32 */
2713 
2714 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2715    With the exception of the softmmu code in this file, this should
2716    only be used for local memory (e.g. video ram) that the device owns,
2717    and knows it isn't going to access beyond the end of the block.
2718 
2719    It should not be used for general purpose DMA.
2720    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2721  */
qemu_get_ram_ptr(ram_addr_t addr)2722 void *qemu_get_ram_ptr(ram_addr_t addr)
2723 {
2724     RAMBlock *block;
2725 
2726     QLIST_FOREACH(block, &ram_list.blocks, next) {
2727         if (addr - block->offset < block->length) {
2728             /* Move this entry to to start of the list.  */
2729             if (block != QLIST_FIRST(&ram_list.blocks)) {
2730                 QLIST_REMOVE(block, next);
2731                 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2732     }
2733             return block->host + (addr - block->offset);
2734         }
2735     }
2736 
2737         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2738         abort();
2739 
2740     return NULL;
2741     }
2742 
2743 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2744  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2745  */
qemu_safe_ram_ptr(ram_addr_t addr)2746 void *qemu_safe_ram_ptr(ram_addr_t addr)
2747 {
2748     RAMBlock *block;
2749 
2750     QLIST_FOREACH(block, &ram_list.blocks, next) {
2751         if (addr - block->offset < block->length) {
2752     return block->host + (addr - block->offset);
2753 }
2754     }
2755 
2756     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2757     abort();
2758 
2759     return NULL;
2760 }
2761 
qemu_ram_addr_from_host(void * ptr,ram_addr_t * ram_addr)2762 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2763 {
2764     RAMBlock *block;
2765     uint8_t *host = ptr;
2766 
2767     QLIST_FOREACH(block, &ram_list.blocks, next) {
2768         if (host - block->host < block->length) {
2769             *ram_addr = block->offset + (host - block->host);
2770             return 0;
2771     }
2772     }
2773     return -1;
2774 }
2775 
2776 /* Some of the softmmu routines need to translate from a host pointer
2777    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host_nofail(void * ptr)2778 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2779 {
2780     ram_addr_t ram_addr;
2781 
2782     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2783         fprintf(stderr, "Bad ram pointer %p\n", ptr);
2784         abort();
2785     }
2786     return ram_addr;
2787 }
2788 
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2789 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2790 {
2791 #ifdef DEBUG_UNASSIGNED
2792     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2793 #endif
2794 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2795     do_unassigned_access(addr, 0, 0, 0, 1);
2796 #endif
2797     return 0;
2798 }
2799 
unassigned_mem_readw(void * opaque,target_phys_addr_t addr)2800 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2801 {
2802 #ifdef DEBUG_UNASSIGNED
2803     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2804 #endif
2805 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2806     do_unassigned_access(addr, 0, 0, 0, 2);
2807 #endif
2808     return 0;
2809 }
2810 
unassigned_mem_readl(void * opaque,target_phys_addr_t addr)2811 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2812 {
2813 #ifdef DEBUG_UNASSIGNED
2814     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2815 #endif
2816 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2817     do_unassigned_access(addr, 0, 0, 0, 4);
2818 #endif
2819     return 0;
2820 }
2821 
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2822 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2823 {
2824 #ifdef DEBUG_UNASSIGNED
2825     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2826 #endif
2827 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2828     do_unassigned_access(addr, 1, 0, 0, 1);
2829 #endif
2830 }
2831 
unassigned_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2832 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2833 {
2834 #ifdef DEBUG_UNASSIGNED
2835     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2836 #endif
2837 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2838     do_unassigned_access(addr, 1, 0, 0, 2);
2839 #endif
2840 }
2841 
unassigned_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2842 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2843 {
2844 #ifdef DEBUG_UNASSIGNED
2845     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2846 #endif
2847 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2848     do_unassigned_access(addr, 1, 0, 0, 4);
2849 #endif
2850 }
2851 
2852 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2853     unassigned_mem_readb,
2854     unassigned_mem_readw,
2855     unassigned_mem_readl,
2856 };
2857 
2858 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2859     unassigned_mem_writeb,
2860     unassigned_mem_writew,
2861     unassigned_mem_writel,
2862 };
2863 
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2864 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2865                                 uint32_t val)
2866 {
2867     int dirty_flags;
2868     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2869     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2870 #if !defined(CONFIG_USER_ONLY)
2871         tb_invalidate_phys_page_fast(ram_addr, 1);
2872         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2873 #endif
2874     }
2875     stb_p(qemu_get_ram_ptr(ram_addr), val);
2876     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2877     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2878     /* we remove the notdirty callback only if the code has been
2879        flushed */
2880     if (dirty_flags == 0xff)
2881         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2882 }
2883 
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2884 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2885                                 uint32_t val)
2886 {
2887     int dirty_flags;
2888     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2889     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2890 #if !defined(CONFIG_USER_ONLY)
2891         tb_invalidate_phys_page_fast(ram_addr, 2);
2892         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2893 #endif
2894     }
2895     stw_p(qemu_get_ram_ptr(ram_addr), val);
2896     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2897     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2898     /* we remove the notdirty callback only if the code has been
2899        flushed */
2900     if (dirty_flags == 0xff)
2901         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2902 }
2903 
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2904 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2905                                 uint32_t val)
2906 {
2907     int dirty_flags;
2908     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2909     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2910 #if !defined(CONFIG_USER_ONLY)
2911         tb_invalidate_phys_page_fast(ram_addr, 4);
2912         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2913 #endif
2914     }
2915     stl_p(qemu_get_ram_ptr(ram_addr), val);
2916     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2917     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2918     /* we remove the notdirty callback only if the code has been
2919        flushed */
2920     if (dirty_flags == 0xff)
2921         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2922 }
2923 
2924 static CPUReadMemoryFunc * const error_mem_read[3] = {
2925     NULL, /* never used */
2926     NULL, /* never used */
2927     NULL, /* never used */
2928 };
2929 
2930 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2931     notdirty_mem_writeb,
2932     notdirty_mem_writew,
2933     notdirty_mem_writel,
2934 };
2935 
2936 /* Generate a debug exception if a watchpoint has been hit.  */
check_watchpoint(int offset,int len_mask,int flags)2937 static void check_watchpoint(int offset, int len_mask, int flags)
2938 {
2939     CPUState *env = cpu_single_env;
2940     target_ulong pc, cs_base;
2941     TranslationBlock *tb;
2942     target_ulong vaddr;
2943     CPUWatchpoint *wp;
2944     int cpu_flags;
2945 
2946     if (env->watchpoint_hit) {
2947         /* We re-entered the check after replacing the TB. Now raise
2948          * the debug interrupt so that is will trigger after the
2949          * current instruction. */
2950         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2951         return;
2952     }
2953     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2954     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2955         if ((vaddr == (wp->vaddr & len_mask) ||
2956              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2957             wp->flags |= BP_WATCHPOINT_HIT;
2958             if (!env->watchpoint_hit) {
2959                 env->watchpoint_hit = wp;
2960                 tb = tb_find_pc(env->mem_io_pc);
2961                 if (!tb) {
2962                     cpu_abort(env, "check_watchpoint: could not find TB for "
2963                               "pc=%p", (void *)env->mem_io_pc);
2964                 }
2965                 cpu_restore_state(tb, env, env->mem_io_pc);
2966                 tb_phys_invalidate(tb, -1);
2967                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2968                     env->exception_index = EXCP_DEBUG;
2969                 } else {
2970                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2971                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2972                 }
2973                 cpu_resume_from_signal(env, NULL);
2974             }
2975         } else {
2976             wp->flags &= ~BP_WATCHPOINT_HIT;
2977         }
2978     }
2979 }
2980 
2981 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2982    so these check for a hit then pass through to the normal out-of-line
2983    phys routines.  */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2984 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2985 {
2986     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2987     return ldub_phys(addr);
2988 }
2989 
watch_mem_readw(void * opaque,target_phys_addr_t addr)2990 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2991 {
2992     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2993     return lduw_phys(addr);
2994 }
2995 
watch_mem_readl(void * opaque,target_phys_addr_t addr)2996 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2997 {
2998     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2999     return ldl_phys(addr);
3000 }
3001 
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)3002 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3003                              uint32_t val)
3004 {
3005     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3006     stb_phys(addr, val);
3007 }
3008 
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)3009 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3010                              uint32_t val)
3011 {
3012     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3013     stw_phys(addr, val);
3014 }
3015 
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)3016 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3017                              uint32_t val)
3018 {
3019     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3020     stl_phys(addr, val);
3021 }
3022 
3023 static CPUReadMemoryFunc * const watch_mem_read[3] = {
3024     watch_mem_readb,
3025     watch_mem_readw,
3026     watch_mem_readl,
3027 };
3028 
3029 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3030     watch_mem_writeb,
3031     watch_mem_writew,
3032     watch_mem_writel,
3033 };
3034 
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)3035 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3036                                  unsigned int len)
3037 {
3038     uint32_t ret;
3039     unsigned int idx;
3040 
3041     idx = SUBPAGE_IDX(addr);
3042 #if defined(DEBUG_SUBPAGE)
3043     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3044            mmio, len, addr, idx);
3045 #endif
3046     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3047                                        addr + mmio->region_offset[idx][0][len]);
3048 
3049     return ret;
3050 }
3051 
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)3052 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3053                               uint32_t value, unsigned int len)
3054 {
3055     unsigned int idx;
3056 
3057     idx = SUBPAGE_IDX(addr);
3058 #if defined(DEBUG_SUBPAGE)
3059     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3060            mmio, len, addr, idx, value);
3061 #endif
3062     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3063                                   addr + mmio->region_offset[idx][1][len],
3064                                   value);
3065 }
3066 
subpage_readb(void * opaque,target_phys_addr_t addr)3067 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3068 {
3069 #if defined(DEBUG_SUBPAGE)
3070     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3071 #endif
3072 
3073     return subpage_readlen(opaque, addr, 0);
3074 }
3075 
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)3076 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3077                             uint32_t value)
3078 {
3079 #if defined(DEBUG_SUBPAGE)
3080     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3081 #endif
3082     subpage_writelen(opaque, addr, value, 0);
3083 }
3084 
subpage_readw(void * opaque,target_phys_addr_t addr)3085 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3086 {
3087 #if defined(DEBUG_SUBPAGE)
3088     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3089 #endif
3090 
3091     return subpage_readlen(opaque, addr, 1);
3092 }
3093 
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)3094 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3095                             uint32_t value)
3096 {
3097 #if defined(DEBUG_SUBPAGE)
3098     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3099 #endif
3100     subpage_writelen(opaque, addr, value, 1);
3101 }
3102 
subpage_readl(void * opaque,target_phys_addr_t addr)3103 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3104 {
3105 #if defined(DEBUG_SUBPAGE)
3106     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3107 #endif
3108 
3109     return subpage_readlen(opaque, addr, 2);
3110 }
3111 
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)3112 static void subpage_writel (void *opaque,
3113                          target_phys_addr_t addr, uint32_t value)
3114 {
3115 #if defined(DEBUG_SUBPAGE)
3116     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3117 #endif
3118     subpage_writelen(opaque, addr, value, 2);
3119 }
3120 
3121 static CPUReadMemoryFunc * const subpage_read[] = {
3122     &subpage_readb,
3123     &subpage_readw,
3124     &subpage_readl,
3125 };
3126 
3127 static CPUWriteMemoryFunc * const subpage_write[] = {
3128     &subpage_writeb,
3129     &subpage_writew,
3130     &subpage_writel,
3131 };
3132 
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)3133 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3134                              ram_addr_t memory, ram_addr_t region_offset)
3135 {
3136     int idx, eidx;
3137     unsigned int i;
3138 
3139     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3140         return -1;
3141     idx = SUBPAGE_IDX(start);
3142     eidx = SUBPAGE_IDX(end);
3143 #if defined(DEBUG_SUBPAGE)
3144     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3145            mmio, start, end, idx, eidx, memory);
3146 #endif
3147     memory >>= IO_MEM_SHIFT;
3148     for (; idx <= eidx; idx++) {
3149         for (i = 0; i < 4; i++) {
3150             if (io_mem_read[memory][i]) {
3151                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3152                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3153                 mmio->region_offset[idx][0][i] = region_offset;
3154             }
3155             if (io_mem_write[memory][i]) {
3156                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3157                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3158                 mmio->region_offset[idx][1][i] = region_offset;
3159             }
3160         }
3161     }
3162 
3163     return 0;
3164 }
3165 
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)3166 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3167                            ram_addr_t orig_memory, ram_addr_t region_offset)
3168 {
3169     subpage_t *mmio;
3170     int subpage_memory;
3171 
3172     mmio = qemu_mallocz(sizeof(subpage_t));
3173 
3174     mmio->base = base;
3175     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3176 #if defined(DEBUG_SUBPAGE)
3177     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3178            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3179 #endif
3180     *phys = subpage_memory | IO_MEM_SUBPAGE;
3181     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3182                          region_offset);
3183 
3184     return mmio;
3185 }
3186 
get_free_io_mem_idx(void)3187 static int get_free_io_mem_idx(void)
3188 {
3189     int i;
3190 
3191     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3192         if (!io_mem_used[i]) {
3193             io_mem_used[i] = 1;
3194             return i;
3195         }
3196     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3197     return -1;
3198 }
3199 
3200 /* mem_read and mem_write are arrays of functions containing the
3201    function to access byte (index 0), word (index 1) and dword (index
3202    2). Functions can be omitted with a NULL function pointer.
3203    If io_index is non zero, the corresponding io zone is
3204    modified. If it is zero, a new io zone is allocated. The return
3205    value can be used with cpu_register_physical_memory(). (-1) is
3206    returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)3207 static int cpu_register_io_memory_fixed(int io_index,
3208                                         CPUReadMemoryFunc * const *mem_read,
3209                                         CPUWriteMemoryFunc * const *mem_write,
3210                                         void *opaque)
3211 {
3212     int i, subwidth = 0;
3213 
3214     if (io_index <= 0) {
3215         io_index = get_free_io_mem_idx();
3216         if (io_index == -1)
3217             return io_index;
3218     } else {
3219         io_index >>= IO_MEM_SHIFT;
3220         if (io_index >= IO_MEM_NB_ENTRIES)
3221             return -1;
3222     }
3223 
3224     for(i = 0;i < 3; i++) {
3225         if (!mem_read[i] || !mem_write[i])
3226             subwidth = IO_MEM_SUBWIDTH;
3227         io_mem_read[io_index][i] = mem_read[i];
3228         io_mem_write[io_index][i] = mem_write[i];
3229     }
3230     io_mem_opaque[io_index] = opaque;
3231     return (io_index << IO_MEM_SHIFT) | subwidth;
3232 }
3233 
cpu_register_io_memory(CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)3234 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3235                            CPUWriteMemoryFunc * const *mem_write,
3236                            void *opaque)
3237 {
3238     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3239 }
3240 
cpu_unregister_io_memory(int io_table_address)3241 void cpu_unregister_io_memory(int io_table_address)
3242 {
3243     int i;
3244     int io_index = io_table_address >> IO_MEM_SHIFT;
3245 
3246     for (i=0;i < 3; i++) {
3247         io_mem_read[io_index][i] = unassigned_mem_read[i];
3248         io_mem_write[io_index][i] = unassigned_mem_write[i];
3249     }
3250     io_mem_opaque[io_index] = NULL;
3251     io_mem_used[io_index] = 0;
3252 }
3253 
io_mem_init(void)3254 static void io_mem_init(void)
3255 {
3256     int i;
3257 
3258     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3259     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3260     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3261     for (i=0; i<5; i++)
3262         io_mem_used[i] = 1;
3263 
3264     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3265                                           watch_mem_write, NULL);
3266 }
3267 
3268 #endif /* !defined(CONFIG_USER_ONLY) */
3269 
3270 /* physical memory access (slow version, mainly for debug) */
3271 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3272 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3273                             int len, int is_write)
3274 {
3275     int l, flags;
3276     target_ulong page;
3277     void * p;
3278 
3279     while (len > 0) {
3280         page = addr & TARGET_PAGE_MASK;
3281         l = (page + TARGET_PAGE_SIZE) - addr;
3282         if (l > len)
3283             l = len;
3284         flags = page_get_flags(page);
3285         if (!(flags & PAGE_VALID))
3286             return;
3287         if (is_write) {
3288             if (!(flags & PAGE_WRITE))
3289                 return;
3290             /* XXX: this code should not depend on lock_user */
3291             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3292                 /* FIXME - should this return an error rather than just fail? */
3293                 return;
3294             memcpy(p, buf, l);
3295             unlock_user(p, addr, l);
3296         } else {
3297             if (!(flags & PAGE_READ))
3298                 return;
3299             /* XXX: this code should not depend on lock_user */
3300             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3301                 /* FIXME - should this return an error rather than just fail? */
3302                 return;
3303             memcpy(buf, p, l);
3304             unlock_user(p, addr, 0);
3305         }
3306         len -= l;
3307         buf += l;
3308         addr += l;
3309     }
3310 }
3311 
3312 #else
3313 
invalidate_and_set_dirty(target_phys_addr_t addr,target_phys_addr_t length)3314 static void invalidate_and_set_dirty(target_phys_addr_t addr,
3315                                      target_phys_addr_t length)
3316 {
3317     if (!cpu_physical_memory_is_dirty(addr)) {
3318         /* invalidate code */
3319         tb_invalidate_phys_page_range(addr, addr + length, 0);
3320         /* set dirty bit */
3321         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3322     }
3323 }
3324 
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3325 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3326                             int len, int is_write)
3327 {
3328     int l, io_index;
3329     uint8_t *ptr;
3330     uint32_t val;
3331     target_phys_addr_t page;
3332     unsigned long pd;
3333     PhysPageDesc *p;
3334 
3335     while (len > 0) {
3336         page = addr & TARGET_PAGE_MASK;
3337         l = (page + TARGET_PAGE_SIZE) - addr;
3338         if (l > len)
3339             l = len;
3340         p = phys_page_find(page >> TARGET_PAGE_BITS);
3341         if (!p) {
3342             pd = IO_MEM_UNASSIGNED;
3343         } else {
3344             pd = p->phys_offset;
3345         }
3346 
3347         if (is_write) {
3348             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3349                 target_phys_addr_t addr1 = addr;
3350                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3351                 if (p)
3352                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3353                 /* XXX: could force cpu_single_env to NULL to avoid
3354                    potential bugs */
3355                 if (l >= 4 && ((addr1 & 3) == 0)) {
3356                     /* 32 bit write access */
3357                     val = ldl_p(buf);
3358                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3359                     l = 4;
3360                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3361                     /* 16 bit write access */
3362                     val = lduw_p(buf);
3363                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3364                     l = 2;
3365                 } else {
3366                     /* 8 bit write access */
3367                     val = ldub_p(buf);
3368                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3369                     l = 1;
3370                 }
3371             } else {
3372                 unsigned long addr1;
3373                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3374                 /* RAM case */
3375                 ptr = qemu_get_ram_ptr(addr1);
3376                 memcpy(ptr, buf, l);
3377                 invalidate_and_set_dirty(addr1, l);
3378             }
3379         } else {
3380             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3381                 !(pd & IO_MEM_ROMD)) {
3382                 target_phys_addr_t addr1 = addr;
3383                 /* I/O case */
3384                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3385                 if (p)
3386                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3387                 if (l >= 4 && ((addr1 & 3) == 0)) {
3388                     /* 32 bit read access */
3389                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3390                     stl_p(buf, val);
3391                     l = 4;
3392                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3393                     /* 16 bit read access */
3394                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3395                     stw_p(buf, val);
3396                     l = 2;
3397                 } else {
3398                     /* 8 bit read access */
3399                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3400                     stb_p(buf, val);
3401                     l = 1;
3402                 }
3403             } else {
3404                 /* RAM case */
3405                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3406                     (addr & ~TARGET_PAGE_MASK);
3407                 memcpy(buf, ptr, l);
3408             }
3409         }
3410         len -= l;
3411         buf += l;
3412         addr += l;
3413     }
3414 }
3415 
3416 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)3417 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3418                                    const uint8_t *buf, int len)
3419 {
3420     int l;
3421     uint8_t *ptr;
3422     target_phys_addr_t page;
3423     unsigned long pd;
3424     PhysPageDesc *p;
3425 
3426     while (len > 0) {
3427         page = addr & TARGET_PAGE_MASK;
3428         l = (page + TARGET_PAGE_SIZE) - addr;
3429         if (l > len)
3430             l = len;
3431         p = phys_page_find(page >> TARGET_PAGE_BITS);
3432         if (!p) {
3433             pd = IO_MEM_UNASSIGNED;
3434         } else {
3435             pd = p->phys_offset;
3436         }
3437 
3438         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3439             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3440             !(pd & IO_MEM_ROMD)) {
3441             /* do nothing */
3442         } else {
3443             unsigned long addr1;
3444             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3445             /* ROM/RAM case */
3446             ptr = qemu_get_ram_ptr(addr1);
3447             memcpy(ptr, buf, l);
3448             invalidate_and_set_dirty(addr1, l);
3449         }
3450         len -= l;
3451         buf += l;
3452         addr += l;
3453     }
3454 }
3455 
3456 typedef struct {
3457     void *buffer;
3458     target_phys_addr_t addr;
3459     target_phys_addr_t len;
3460 } BounceBuffer;
3461 
3462 static BounceBuffer bounce;
3463 
3464 typedef struct MapClient {
3465     void *opaque;
3466     void (*callback)(void *opaque);
3467     QLIST_ENTRY(MapClient) link;
3468 } MapClient;
3469 
3470 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3471     = QLIST_HEAD_INITIALIZER(map_client_list);
3472 
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))3473 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3474 {
3475     MapClient *client = qemu_malloc(sizeof(*client));
3476 
3477     client->opaque = opaque;
3478     client->callback = callback;
3479     QLIST_INSERT_HEAD(&map_client_list, client, link);
3480     return client;
3481 }
3482 
cpu_unregister_map_client(void * _client)3483 void cpu_unregister_map_client(void *_client)
3484 {
3485     MapClient *client = (MapClient *)_client;
3486 
3487     QLIST_REMOVE(client, link);
3488     qemu_free(client);
3489 }
3490 
cpu_notify_map_clients(void)3491 static void cpu_notify_map_clients(void)
3492 {
3493     MapClient *client;
3494 
3495     while (!QLIST_EMPTY(&map_client_list)) {
3496         client = QLIST_FIRST(&map_client_list);
3497         client->callback(client->opaque);
3498         QLIST_REMOVE(client, link);
3499     }
3500 }
3501 
3502 /* Map a physical memory region into a host virtual address.
3503  * May map a subset of the requested range, given by and returned in *plen.
3504  * May return NULL if resources needed to perform the mapping are exhausted.
3505  * Use only for reads OR writes - not for read-modify-write operations.
3506  * Use cpu_register_map_client() to know when retrying the map operation is
3507  * likely to succeed.
3508  */
cpu_physical_memory_map(target_phys_addr_t addr,target_phys_addr_t * plen,int is_write)3509 void *cpu_physical_memory_map(target_phys_addr_t addr,
3510                               target_phys_addr_t *plen,
3511                               int is_write)
3512 {
3513     target_phys_addr_t len = *plen;
3514     target_phys_addr_t done = 0;
3515     int l;
3516     uint8_t *ret = NULL;
3517     uint8_t *ptr;
3518     target_phys_addr_t page;
3519     unsigned long pd;
3520     PhysPageDesc *p;
3521     unsigned long addr1;
3522 
3523     while (len > 0) {
3524         page = addr & TARGET_PAGE_MASK;
3525         l = (page + TARGET_PAGE_SIZE) - addr;
3526         if (l > len)
3527             l = len;
3528         p = phys_page_find(page >> TARGET_PAGE_BITS);
3529         if (!p) {
3530             pd = IO_MEM_UNASSIGNED;
3531         } else {
3532             pd = p->phys_offset;
3533         }
3534 
3535         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3536             if (done || bounce.buffer) {
3537                 break;
3538             }
3539             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3540             bounce.addr = addr;
3541             bounce.len = l;
3542             if (!is_write) {
3543                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3544             }
3545             ptr = bounce.buffer;
3546         } else {
3547             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3548             ptr = qemu_get_ram_ptr(addr1);
3549         }
3550         if (!done) {
3551             ret = ptr;
3552         } else if (ret + done != ptr) {
3553             break;
3554         }
3555 
3556         len -= l;
3557         addr += l;
3558         done += l;
3559     }
3560     *plen = done;
3561     return ret;
3562 }
3563 
3564 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3565  * Will also mark the memory as dirty if is_write == 1.  access_len gives
3566  * the amount of memory that was actually read or written by the caller.
3567  */
cpu_physical_memory_unmap(void * buffer,target_phys_addr_t len,int is_write,target_phys_addr_t access_len)3568 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3569                                int is_write, target_phys_addr_t access_len)
3570 {
3571     if (buffer != bounce.buffer) {
3572         if (is_write) {
3573             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3574             while (access_len) {
3575                 unsigned l;
3576                 l = TARGET_PAGE_SIZE;
3577                 if (l > access_len)
3578                     l = access_len;
3579                 invalidate_and_set_dirty(addr1, l);
3580                 addr1 += l;
3581                 access_len -= l;
3582             }
3583         }
3584         return;
3585     }
3586     if (is_write) {
3587         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3588     }
3589     qemu_vfree(bounce.buffer);
3590     bounce.buffer = NULL;
3591     cpu_notify_map_clients();
3592 }
3593 
3594 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)3595 uint32_t ldl_phys(target_phys_addr_t addr)
3596 {
3597     int io_index;
3598     uint8_t *ptr;
3599     uint32_t val;
3600     unsigned long pd;
3601     PhysPageDesc *p;
3602 
3603     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3604     if (!p) {
3605         pd = IO_MEM_UNASSIGNED;
3606     } else {
3607         pd = p->phys_offset;
3608     }
3609 
3610     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3611         !(pd & IO_MEM_ROMD)) {
3612         /* I/O case */
3613         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3614         if (p)
3615             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3616         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3617     } else {
3618         /* RAM case */
3619         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3620             (addr & ~TARGET_PAGE_MASK);
3621         val = ldl_p(ptr);
3622     }
3623     return val;
3624 }
3625 
3626 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)3627 uint64_t ldq_phys(target_phys_addr_t addr)
3628 {
3629     int io_index;
3630     uint8_t *ptr;
3631     uint64_t val;
3632     unsigned long pd;
3633     PhysPageDesc *p;
3634 
3635     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3636     if (!p) {
3637         pd = IO_MEM_UNASSIGNED;
3638     } else {
3639         pd = p->phys_offset;
3640     }
3641 
3642     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3643         !(pd & IO_MEM_ROMD)) {
3644         /* I/O case */
3645         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3646         if (p)
3647             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3648 #ifdef TARGET_WORDS_BIGENDIAN
3649         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3650         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3651 #else
3652         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3653         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3654 #endif
3655     } else {
3656         /* RAM case */
3657         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3658             (addr & ~TARGET_PAGE_MASK);
3659         val = ldq_p(ptr);
3660     }
3661     return val;
3662 }
3663 
3664 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)3665 uint32_t ldub_phys(target_phys_addr_t addr)
3666 {
3667     uint8_t val;
3668     cpu_physical_memory_read(addr, &val, 1);
3669     return val;
3670 }
3671 
3672 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)3673 uint32_t lduw_phys(target_phys_addr_t addr)
3674 {
3675     uint16_t val;
3676     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3677     return tswap16(val);
3678 }
3679 
3680 /* warning: addr must be aligned. The ram page is not masked as dirty
3681    and the code inside is not invalidated. It is useful if the dirty
3682    bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)3683 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3684 {
3685     int io_index;
3686     uint8_t *ptr;
3687     unsigned long pd;
3688     PhysPageDesc *p;
3689 
3690     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3691     if (!p) {
3692         pd = IO_MEM_UNASSIGNED;
3693     } else {
3694         pd = p->phys_offset;
3695     }
3696 
3697     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3698         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3699         if (p)
3700             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3701         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3702     } else {
3703         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3704         ptr = qemu_get_ram_ptr(addr1);
3705         stl_p(ptr, val);
3706 
3707         if (unlikely(in_migration)) {
3708             if (!cpu_physical_memory_is_dirty(addr1)) {
3709                 /* invalidate code */
3710                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3711                 /* set dirty bit */
3712                 cpu_physical_memory_set_dirty_flags(
3713                     addr1, (0xff & ~CODE_DIRTY_FLAG));
3714             }
3715         }
3716     }
3717 }
3718 
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)3719 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3720 {
3721     int io_index;
3722     uint8_t *ptr;
3723     unsigned long pd;
3724     PhysPageDesc *p;
3725 
3726     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3727     if (!p) {
3728         pd = IO_MEM_UNASSIGNED;
3729     } else {
3730         pd = p->phys_offset;
3731     }
3732 
3733     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3734         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3735         if (p)
3736             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3737 #ifdef TARGET_WORDS_BIGENDIAN
3738         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3739         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3740 #else
3741         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3742         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3743 #endif
3744     } else {
3745         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3746             (addr & ~TARGET_PAGE_MASK);
3747         stq_p(ptr, val);
3748     }
3749 }
3750 
3751 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)3752 void stl_phys(target_phys_addr_t addr, uint32_t val)
3753 {
3754     int io_index;
3755     uint8_t *ptr;
3756     unsigned long pd;
3757     PhysPageDesc *p;
3758 
3759     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3760     if (!p) {
3761         pd = IO_MEM_UNASSIGNED;
3762     } else {
3763         pd = p->phys_offset;
3764     }
3765 
3766     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3767         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3768         if (p)
3769             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3770         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3771     } else {
3772         unsigned long addr1;
3773         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3774         /* RAM case */
3775         ptr = qemu_get_ram_ptr(addr1);
3776         stl_p(ptr, val);
3777         invalidate_and_set_dirty(addr1, 4);
3778     }
3779 }
3780 
3781 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3782 void stb_phys(target_phys_addr_t addr, uint32_t val)
3783 {
3784     uint8_t v = val;
3785     cpu_physical_memory_write(addr, &v, 1);
3786 }
3787 
3788 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3789 void stw_phys(target_phys_addr_t addr, uint32_t val)
3790 {
3791     uint16_t v = tswap16(val);
3792     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3793 }
3794 
3795 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3796 void stq_phys(target_phys_addr_t addr, uint64_t val)
3797 {
3798     val = tswap64(val);
3799     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3800 }
3801 
3802 #endif
3803 
3804 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3805 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3806                         uint8_t *buf, int len, int is_write)
3807 {
3808     int l;
3809     target_phys_addr_t phys_addr;
3810     target_ulong page;
3811 
3812     while (len > 0) {
3813         page = addr & TARGET_PAGE_MASK;
3814         phys_addr = cpu_get_phys_page_debug(env, page);
3815         /* if no physical page mapped, return an error */
3816         if (phys_addr == -1)
3817             return -1;
3818         l = (page + TARGET_PAGE_SIZE) - addr;
3819         if (l > len)
3820             l = len;
3821         phys_addr += (addr & ~TARGET_PAGE_MASK);
3822 #if !defined(CONFIG_USER_ONLY)
3823         if (is_write)
3824             cpu_physical_memory_write_rom(phys_addr, buf, l);
3825         else
3826 #endif
3827             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3828         len -= l;
3829         buf += l;
3830         addr += l;
3831     }
3832     return 0;
3833 }
3834 
3835 /* in deterministic execution mode, instructions doing device I/Os
3836    must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3837 void cpu_io_recompile(CPUState *env, void *retaddr)
3838 {
3839     TranslationBlock *tb;
3840     uint32_t n, cflags;
3841     target_ulong pc, cs_base;
3842     uint64_t flags;
3843 
3844     tb = tb_find_pc((unsigned long)retaddr);
3845     if (!tb) {
3846         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3847                   retaddr);
3848     }
3849     n = env->icount_decr.u16.low + tb->icount;
3850     cpu_restore_state(tb, env, (unsigned long)retaddr);
3851     /* Calculate how many instructions had been executed before the fault
3852        occurred.  */
3853     n = n - env->icount_decr.u16.low;
3854     /* Generate a new TB ending on the I/O insn.  */
3855     n++;
3856     /* On MIPS and SH, delay slot instructions can only be restarted if
3857        they were already the first instruction in the TB.  If this is not
3858        the first instruction in a TB then re-execute the preceding
3859        branch.  */
3860 #if defined(TARGET_MIPS)
3861     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3862         env->active_tc.PC -= 4;
3863         env->icount_decr.u16.low++;
3864         env->hflags &= ~MIPS_HFLAG_BMASK;
3865     }
3866 #elif defined(TARGET_SH4)
3867     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3868             && n > 1) {
3869         env->pc -= 2;
3870         env->icount_decr.u16.low++;
3871         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3872     }
3873 #endif
3874     /* This should never happen.  */
3875     if (n > CF_COUNT_MASK)
3876         cpu_abort(env, "TB too big during recompile");
3877 
3878     cflags = n | CF_LAST_IO;
3879     pc = tb->pc;
3880     cs_base = tb->cs_base;
3881     flags = tb->flags;
3882     tb_phys_invalidate(tb, -1);
3883     /* FIXME: In theory this could raise an exception.  In practice
3884        we have already translated the block once so it's probably ok.  */
3885     tb_gen_code(env, pc, cs_base, flags, cflags);
3886     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3887        the first in the TB) then we end up generating a whole new TB and
3888        repeating the fault, which is horribly inefficient.
3889        Better would be to execute just this insn uncached, or generate a
3890        second new TB.  */
3891     cpu_resume_from_signal(env, NULL);
3892 }
3893 
3894 #if !defined(CONFIG_USER_ONLY)
3895 
dump_exec_info(FILE * f,fprintf_function cpu_fprintf)3896 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
3897 {
3898     int i, target_code_size, max_target_code_size;
3899     int direct_jmp_count, direct_jmp2_count, cross_page;
3900     TranslationBlock *tb;
3901 
3902     target_code_size = 0;
3903     max_target_code_size = 0;
3904     cross_page = 0;
3905     direct_jmp_count = 0;
3906     direct_jmp2_count = 0;
3907     for(i = 0; i < nb_tbs; i++) {
3908         tb = &tbs[i];
3909         target_code_size += tb->size;
3910         if (tb->size > max_target_code_size)
3911             max_target_code_size = tb->size;
3912         if (tb->page_addr[1] != -1)
3913             cross_page++;
3914         if (tb->tb_next_offset[0] != 0xffff) {
3915             direct_jmp_count++;
3916             if (tb->tb_next_offset[1] != 0xffff) {
3917                 direct_jmp2_count++;
3918             }
3919         }
3920     }
3921     /* XXX: avoid using doubles ? */
3922     cpu_fprintf(f, "Translation buffer state:\n");
3923     cpu_fprintf(f, "gen code size       %td/%ld\n",
3924                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3925     cpu_fprintf(f, "TB count            %d/%d\n",
3926                 nb_tbs, code_gen_max_blocks);
3927     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3928                 nb_tbs ? target_code_size / nb_tbs : 0,
3929                 max_target_code_size);
3930     cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
3931                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3932                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3933     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3934             cross_page,
3935             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3936     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3937                 direct_jmp_count,
3938                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3939                 direct_jmp2_count,
3940                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3941     cpu_fprintf(f, "\nStatistics:\n");
3942     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3943     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3944     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3945     tcg_dump_info(f, cpu_fprintf);
3946 }
3947 
3948 #define MMUSUFFIX _cmmu
3949 #define GETPC() NULL
3950 #define env cpu_single_env
3951 #define SOFTMMU_CODE_ACCESS
3952 
3953 #define SHIFT 0
3954 #include "softmmu_template.h"
3955 
3956 #define SHIFT 1
3957 #include "softmmu_template.h"
3958 
3959 #define SHIFT 2
3960 #include "softmmu_template.h"
3961 
3962 #define SHIFT 3
3963 #include "softmmu_template.h"
3964 
3965 #undef env
3966 
3967 #endif
3968