• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  virtual page mapping and translated block handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #ifdef _WIN32
21 #define WIN32_LEAN_AND_MEAN
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34 
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #if defined(CONFIG_USER_ONLY)
43 #include <qemu.h>
44 #endif
45 #ifdef CONFIG_MEMCHECK
46 #include "memcheck/memcheck_api.h"
47 #endif  // CONFIG_MEMCHECK
48 
49 //#define DEBUG_TB_INVALIDATE
50 //#define DEBUG_FLUSH
51 //#define DEBUG_TLB
52 //#define DEBUG_UNASSIGNED
53 
54 /* make various TB consistency checks */
55 //#define DEBUG_TB_CHECK
56 //#define DEBUG_TLB_CHECK
57 
58 //#define DEBUG_IOPORT
59 //#define DEBUG_SUBPAGE
60 
61 #if !defined(CONFIG_USER_ONLY)
62 /* TB consistency checks only implemented for usermode emulation.  */
63 #undef DEBUG_TB_CHECK
64 #endif
65 
66 #define SMC_BITMAP_USE_THRESHOLD 10
67 
68 #if defined(TARGET_SPARC64)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 41
70 #elif defined(TARGET_SPARC)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 36
72 #elif defined(TARGET_ALPHA)
73 #define TARGET_PHYS_ADDR_SPACE_BITS 42
74 #define TARGET_VIRT_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_PPC64)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_X86_64)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 42
79 #elif defined(TARGET_I386)
80 #define TARGET_PHYS_ADDR_SPACE_BITS 36
81 #else
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 #endif
84 
85 static TranslationBlock *tbs;
86 int code_gen_max_blocks;
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 static int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91 
92 #if defined(__arm__) || defined(__sparc_v9__)
93 /* The prologue must be reachable with a direct jump. ARM and Sparc64
94  have limited branch ranges (possibly also PPC) so place it in a
95  section close to code segment. */
96 #define code_gen_section                                \
97     __attribute__((__section__(".gen_code")))           \
98     __attribute__((aligned (32)))
99 #elif defined(_WIN32)
100 /* Maximum alignment for Win32 is 16. */
101 #define code_gen_section                                \
102     __attribute__((aligned (16)))
103 #else
104 #define code_gen_section                                \
105     __attribute__((aligned (32)))
106 #endif
107 
108 uint8_t code_gen_prologue[1024] code_gen_section;
109 static uint8_t *code_gen_buffer;
110 static unsigned long code_gen_buffer_size;
111 /* threshold to flush the translated code buffer */
112 static unsigned long code_gen_buffer_max_size;
113 uint8_t *code_gen_ptr;
114 
115 #if !defined(CONFIG_USER_ONLY)
116 int phys_ram_fd;
117 uint8_t *phys_ram_dirty;
118 static int in_migration;
119 
120 typedef struct RAMBlock {
121     uint8_t *host;
122     ram_addr_t offset;
123     ram_addr_t length;
124     struct RAMBlock *next;
125 } RAMBlock;
126 
127 static RAMBlock *ram_blocks;
128 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
129    then we can no longer assume contiguous ram offsets, and external uses
130    of this variable will break.  */
131 ram_addr_t last_ram_offset;
132 #endif
133 
134 CPUState *first_cpu;
135 /* current CPU in the current thread. It is only valid inside
136    cpu_exec() */
137 CPUState *cpu_single_env;
138 /* 0 = Do not count executed instructions.
139    1 = Precise instruction counting.
140    2 = Adaptive rate instruction counting.  */
141 int use_icount = 0;
142 /* Current instruction counter.  While executing translated code this may
143    include some instructions that have not yet been executed.  */
144 int64_t qemu_icount;
145 
146 typedef struct PageDesc {
147     /* list of TBs intersecting this ram page */
148     TranslationBlock *first_tb;
149     /* in order to optimize self modifying code, we count the number
150        of lookups we do to a given page to use a bitmap */
151     unsigned int code_write_count;
152     uint8_t *code_bitmap;
153 #if defined(CONFIG_USER_ONLY)
154     unsigned long flags;
155 #endif
156 } PageDesc;
157 
158 typedef struct PhysPageDesc {
159     /* offset in host memory of the page + io_index in the low bits */
160     ram_addr_t phys_offset;
161     ram_addr_t region_offset;
162 } PhysPageDesc;
163 
164 #define L2_BITS 10
165 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
166 /* XXX: this is a temporary hack for alpha target.
167  *      In the future, this is to be replaced by a multi-level table
168  *      to actually be able to handle the complete 64 bits address space.
169  */
170 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
171 #else
172 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
173 #endif
174 
175 #define L1_SIZE (1 << L1_BITS)
176 #define L2_SIZE (1 << L2_BITS)
177 
178 unsigned long qemu_real_host_page_size;
179 unsigned long qemu_host_page_bits;
180 unsigned long qemu_host_page_size;
181 unsigned long qemu_host_page_mask;
182 
183 /* XXX: for system emulation, it could just be an array */
184 static PageDesc *l1_map[L1_SIZE];
185 static PhysPageDesc **l1_phys_map;
186 
187 #if !defined(CONFIG_USER_ONLY)
188 static void io_mem_init(void);
189 
190 /* io memory support */
191 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
192 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
193 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
194 static char io_mem_used[IO_MEM_NB_ENTRIES];
195 static int io_mem_watch;
196 #endif
197 
198 /* log support */
199 #ifdef WIN32
200 static const char *logfilename = "qemu.log";
201 #else
202 static const char *logfilename = "/tmp/qemu.log";
203 #endif
204 FILE *logfile;
205 int loglevel;
206 static int log_append = 0;
207 
208 /* statistics */
209 static int tlb_flush_count;
210 static int tb_flush_count;
211 static int tb_phys_invalidate_count;
212 
213 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
214 typedef struct subpage_t {
215     target_phys_addr_t base;
216     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
217     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
218     void *opaque[TARGET_PAGE_SIZE][2][4];
219     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
220 } subpage_t;
221 
222 #ifdef _WIN32
map_exec(void * addr,long size)223 static void map_exec(void *addr, long size)
224 {
225     DWORD old_protect;
226     VirtualProtect(addr, size,
227                    PAGE_EXECUTE_READWRITE, &old_protect);
228 
229 }
230 #else
map_exec(void * addr,long size)231 static void map_exec(void *addr, long size)
232 {
233     unsigned long start, end, page_size;
234 
235     page_size = getpagesize();
236     start = (unsigned long)addr;
237     start &= ~(page_size - 1);
238 
239     end = (unsigned long)addr + size;
240     end += page_size - 1;
241     end &= ~(page_size - 1);
242 
243     mprotect((void *)start, end - start,
244              PROT_READ | PROT_WRITE | PROT_EXEC);
245 }
246 #endif
247 
page_init(void)248 static void page_init(void)
249 {
250     /* NOTE: we can always suppose that qemu_host_page_size >=
251        TARGET_PAGE_SIZE */
252 #ifdef _WIN32
253     {
254         SYSTEM_INFO system_info;
255 
256         GetSystemInfo(&system_info);
257         qemu_real_host_page_size = system_info.dwPageSize;
258     }
259 #else
260     qemu_real_host_page_size = getpagesize();
261 #endif
262     if (qemu_host_page_size == 0)
263         qemu_host_page_size = qemu_real_host_page_size;
264     if (qemu_host_page_size < TARGET_PAGE_SIZE)
265         qemu_host_page_size = TARGET_PAGE_SIZE;
266     qemu_host_page_bits = 0;
267     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
268         qemu_host_page_bits++;
269     qemu_host_page_mask = ~(qemu_host_page_size - 1);
270     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
271     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
272 
273 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
274     {
275         long long startaddr, endaddr;
276         FILE *f;
277         int n;
278 
279         mmap_lock();
280         last_brk = (unsigned long)sbrk(0);
281         f = fopen("/proc/self/maps", "r");
282         if (f) {
283             do {
284                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
285                 if (n == 2) {
286                     startaddr = MIN(startaddr,
287                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
288                     endaddr = MIN(endaddr,
289                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
290                     page_set_flags(startaddr & TARGET_PAGE_MASK,
291                                    TARGET_PAGE_ALIGN(endaddr),
292                                    PAGE_RESERVED);
293                 }
294             } while (!feof(f));
295             fclose(f);
296         }
297         mmap_unlock();
298     }
299 #endif
300 }
301 
page_l1_map(target_ulong index)302 static inline PageDesc **page_l1_map(target_ulong index)
303 {
304 #if TARGET_LONG_BITS > 32
305     /* Host memory outside guest VM.  For 32-bit targets we have already
306        excluded high addresses.  */
307     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
308         return NULL;
309 #endif
310     return &l1_map[index >> L2_BITS];
311 }
312 
page_find_alloc(target_ulong index)313 static inline PageDesc *page_find_alloc(target_ulong index)
314 {
315     PageDesc **lp, *p;
316     lp = page_l1_map(index);
317     if (!lp)
318         return NULL;
319 
320     p = *lp;
321     if (!p) {
322         /* allocate if not found */
323 #if defined(CONFIG_USER_ONLY)
324         size_t len = sizeof(PageDesc) * L2_SIZE;
325         /* Don't use qemu_malloc because it may recurse.  */
326         p = mmap(NULL, len, PROT_READ | PROT_WRITE,
327                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
328         *lp = p;
329         if (h2g_valid(p)) {
330             unsigned long addr = h2g(p);
331             page_set_flags(addr & TARGET_PAGE_MASK,
332                            TARGET_PAGE_ALIGN(addr + len),
333                            PAGE_RESERVED);
334         }
335 #else
336         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
337         *lp = p;
338 #endif
339     }
340     return p + (index & (L2_SIZE - 1));
341 }
342 
page_find(target_ulong index)343 static inline PageDesc *page_find(target_ulong index)
344 {
345     PageDesc **lp, *p;
346     lp = page_l1_map(index);
347     if (!lp)
348         return NULL;
349 
350     p = *lp;
351     if (!p) {
352         return NULL;
353     }
354     return p + (index & (L2_SIZE - 1));
355 }
356 
phys_page_find_alloc(target_phys_addr_t index,int alloc)357 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
358 {
359     void **lp, **p;
360     PhysPageDesc *pd;
361 
362     p = (void **)l1_phys_map;
363 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
364 
365 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
366 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
367 #endif
368     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
369     p = *lp;
370     if (!p) {
371         /* allocate if not found */
372         if (!alloc)
373             return NULL;
374         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
375         memset(p, 0, sizeof(void *) * L1_SIZE);
376         *lp = p;
377     }
378 #endif
379     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
380     pd = *lp;
381     if (!pd) {
382         int i;
383         /* allocate if not found */
384         if (!alloc)
385             return NULL;
386         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
387         *lp = pd;
388         for (i = 0; i < L2_SIZE; i++) {
389           pd[i].phys_offset = IO_MEM_UNASSIGNED;
390           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
391         }
392     }
393     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
394 }
395 
phys_page_find(target_phys_addr_t index)396 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
397 {
398     return phys_page_find_alloc(index, 0);
399 }
400 
401 #if !defined(CONFIG_USER_ONLY)
402 static void tlb_protect_code(ram_addr_t ram_addr);
403 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
404                                     target_ulong vaddr);
405 #define mmap_lock() do { } while(0)
406 #define mmap_unlock() do { } while(0)
407 #endif
408 
409 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
410 
411 #if defined(CONFIG_USER_ONLY)
412 /* Currently it is not recommended to allocate big chunks of data in
413    user mode. It will change when a dedicated libc will be used */
414 #define USE_STATIC_CODE_GEN_BUFFER
415 #endif
416 
417 #ifdef USE_STATIC_CODE_GEN_BUFFER
418 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
419 #endif
420 
code_gen_alloc(unsigned long tb_size)421 static void code_gen_alloc(unsigned long tb_size)
422 {
423 #ifdef USE_STATIC_CODE_GEN_BUFFER
424     code_gen_buffer = static_code_gen_buffer;
425     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426     map_exec(code_gen_buffer, code_gen_buffer_size);
427 #else
428     code_gen_buffer_size = tb_size;
429     if (code_gen_buffer_size == 0) {
430 #if defined(CONFIG_USER_ONLY)
431         /* in user mode, phys_ram_size is not meaningful */
432         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
433 #else
434         /* XXX: needs adjustments */
435         code_gen_buffer_size = (unsigned long)(ram_size / 4);
436 #endif
437     }
438     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
439         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
440     /* The code gen buffer location may have constraints depending on
441        the host cpu and OS */
442 #if defined(__linux__)
443     {
444         int flags;
445         void *start = NULL;
446 
447         flags = MAP_PRIVATE | MAP_ANONYMOUS;
448 #if defined(__x86_64__)
449         flags |= MAP_32BIT;
450         /* Cannot map more than that */
451         if (code_gen_buffer_size > (800 * 1024 * 1024))
452             code_gen_buffer_size = (800 * 1024 * 1024);
453 #elif defined(__sparc_v9__)
454         // Map the buffer below 2G, so we can use direct calls and branches
455         flags |= MAP_FIXED;
456         start = (void *) 0x60000000UL;
457         if (code_gen_buffer_size > (512 * 1024 * 1024))
458             code_gen_buffer_size = (512 * 1024 * 1024);
459 #elif defined(__arm__)
460         /* Map the buffer below 32M, so we can use direct calls and branches */
461         flags |= MAP_FIXED;
462         start = (void *) 0x01000000UL;
463         if (code_gen_buffer_size > 16 * 1024 * 1024)
464             code_gen_buffer_size = 16 * 1024 * 1024;
465 #endif
466         code_gen_buffer = mmap(start, code_gen_buffer_size,
467                                PROT_WRITE | PROT_READ | PROT_EXEC,
468                                flags, -1, 0);
469         if (code_gen_buffer == MAP_FAILED) {
470             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
471             exit(1);
472         }
473     }
474 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
475     {
476         int flags;
477         void *addr = NULL;
478         flags = MAP_PRIVATE | MAP_ANONYMOUS;
479 #if defined(__x86_64__)
480         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
481          * 0x40000000 is free */
482         flags |= MAP_FIXED;
483         addr = (void *)0x40000000;
484         /* Cannot map more than that */
485         if (code_gen_buffer_size > (800 * 1024 * 1024))
486             code_gen_buffer_size = (800 * 1024 * 1024);
487 #endif
488         code_gen_buffer = mmap(addr, code_gen_buffer_size,
489                                PROT_WRITE | PROT_READ | PROT_EXEC,
490                                flags, -1, 0);
491         if (code_gen_buffer == MAP_FAILED) {
492             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
493             exit(1);
494         }
495     }
496 #else
497     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
498     map_exec(code_gen_buffer, code_gen_buffer_size);
499 #endif
500 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
501     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
502     code_gen_buffer_max_size = code_gen_buffer_size -
503         code_gen_max_block_size();
504     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
505     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
506 }
507 
508 /* Must be called before using the QEMU cpus. 'tb_size' is the size
509    (in bytes) allocated to the translation buffer. Zero means default
510    size. */
cpu_exec_init_all(unsigned long tb_size)511 void cpu_exec_init_all(unsigned long tb_size)
512 {
513     cpu_gen_init();
514     code_gen_alloc(tb_size);
515     code_gen_ptr = code_gen_buffer;
516     page_init();
517 #if !defined(CONFIG_USER_ONLY)
518     io_mem_init();
519 #endif
520 }
521 
522 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
523 
524 #define CPU_COMMON_SAVE_VERSION 1
525 
cpu_common_save(QEMUFile * f,void * opaque)526 static void cpu_common_save(QEMUFile *f, void *opaque)
527 {
528     CPUState *env = opaque;
529 
530     cpu_synchronize_state(env, 0);
531 
532     qemu_put_be32s(f, &env->halted);
533     qemu_put_be32s(f, &env->interrupt_request);
534 }
535 
cpu_common_load(QEMUFile * f,void * opaque,int version_id)536 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
537 {
538     CPUState *env = opaque;
539 
540     if (version_id != CPU_COMMON_SAVE_VERSION)
541         return -EINVAL;
542 
543     qemu_get_be32s(f, &env->halted);
544     qemu_get_be32s(f, &env->interrupt_request);
545     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
546        version_id is increased. */
547     env->interrupt_request &= ~0x01;
548     tlb_flush(env, 1);
549     cpu_synchronize_state(env, 1);
550 
551     return 0;
552 }
553 #endif
554 
qemu_get_cpu(int cpu)555 CPUState *qemu_get_cpu(int cpu)
556 {
557     CPUState *env = first_cpu;
558 
559     while (env) {
560         if (env->cpu_index == cpu)
561             break;
562         env = env->next_cpu;
563     }
564 
565     return env;
566 }
567 
cpu_exec_init(CPUState * env)568 void cpu_exec_init(CPUState *env)
569 {
570     CPUState **penv;
571     int cpu_index;
572 
573 #if defined(CONFIG_USER_ONLY)
574     cpu_list_lock();
575 #endif
576     env->next_cpu = NULL;
577     penv = &first_cpu;
578     cpu_index = 0;
579     while (*penv != NULL) {
580         penv = &(*penv)->next_cpu;
581         cpu_index++;
582     }
583     env->cpu_index = cpu_index;
584     env->numa_node = 0;
585     QTAILQ_INIT(&env->breakpoints);
586     QTAILQ_INIT(&env->watchpoints);
587     *penv = env;
588 #if defined(CONFIG_USER_ONLY)
589     cpu_list_unlock();
590 #endif
591 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
593                     cpu_common_save, cpu_common_load, env);
594     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
595                     cpu_save, cpu_load, env);
596 #endif
597 }
598 
invalidate_page_bitmap(PageDesc * p)599 static inline void invalidate_page_bitmap(PageDesc *p)
600 {
601     if (p->code_bitmap) {
602         qemu_free(p->code_bitmap);
603         p->code_bitmap = NULL;
604     }
605     p->code_write_count = 0;
606 }
607 
608 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)609 static void page_flush_tb(void)
610 {
611     int i, j;
612     PageDesc *p;
613 
614     for(i = 0; i < L1_SIZE; i++) {
615         p = l1_map[i];
616         if (p) {
617             for(j = 0; j < L2_SIZE; j++) {
618                 p->first_tb = NULL;
619                 invalidate_page_bitmap(p);
620                 p++;
621             }
622         }
623     }
624 }
625 
626 /* flush all the translation blocks */
627 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)628 void tb_flush(CPUState *env1)
629 {
630     CPUState *env;
631 #if defined(DEBUG_FLUSH)
632     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
633            (unsigned long)(code_gen_ptr - code_gen_buffer),
634            nb_tbs, nb_tbs > 0 ?
635            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
636 #endif
637     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
638         cpu_abort(env1, "Internal error: code buffer overflow\n");
639 
640     nb_tbs = 0;
641 
642     for(env = first_cpu; env != NULL; env = env->next_cpu) {
643 #ifdef CONFIG_MEMCHECK
644         int tb_to_clean;
645         for (tb_to_clean = 0; tb_to_clean < TB_JMP_CACHE_SIZE; tb_to_clean++) {
646             if (env->tb_jmp_cache[tb_to_clean] != NULL &&
647                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc != NULL) {
648                 qemu_free(env->tb_jmp_cache[tb_to_clean]->tpc2gpc);
649                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc = NULL;
650                 env->tb_jmp_cache[tb_to_clean]->tpc2gpc_pairs = 0;
651             }
652         }
653 #endif  // CONFIG_MEMCHECK
654         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
655     }
656 
657     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
658     page_flush_tb();
659 
660     code_gen_ptr = code_gen_buffer;
661     /* XXX: flush processor icache at this point if cache flush is
662        expensive */
663     tb_flush_count++;
664 }
665 
666 #ifdef DEBUG_TB_CHECK
667 
tb_invalidate_check(target_ulong address)668 static void tb_invalidate_check(target_ulong address)
669 {
670     TranslationBlock *tb;
671     int i;
672     address &= TARGET_PAGE_MASK;
673     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
674         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
675             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
676                   address >= tb->pc + tb->size)) {
677                 printf("ERROR invalidate: address=" TARGET_FMT_lx
678                        " PC=%08lx size=%04x\n",
679                        address, (long)tb->pc, tb->size);
680             }
681         }
682     }
683 }
684 
685 /* verify that all the pages have correct rights for code */
tb_page_check(void)686 static void tb_page_check(void)
687 {
688     TranslationBlock *tb;
689     int i, flags1, flags2;
690 
691     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
692         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
693             flags1 = page_get_flags(tb->pc);
694             flags2 = page_get_flags(tb->pc + tb->size - 1);
695             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
696                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
697                        (long)tb->pc, tb->size, flags1, flags2);
698             }
699         }
700     }
701 }
702 
703 #endif
704 
705 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)706 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
707                              int next_offset)
708 {
709     TranslationBlock *tb1;
710     for(;;) {
711         tb1 = *ptb;
712         if (tb1 == tb) {
713             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
714             break;
715         }
716         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
717     }
718 }
719 
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)720 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
721 {
722     TranslationBlock *tb1;
723     unsigned int n1;
724 
725     for(;;) {
726         tb1 = *ptb;
727         n1 = (long)tb1 & 3;
728         tb1 = (TranslationBlock *)((long)tb1 & ~3);
729         if (tb1 == tb) {
730             *ptb = tb1->page_next[n1];
731             break;
732         }
733         ptb = &tb1->page_next[n1];
734     }
735 }
736 
tb_jmp_remove(TranslationBlock * tb,int n)737 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
738 {
739     TranslationBlock *tb1, **ptb;
740     unsigned int n1;
741 
742     ptb = &tb->jmp_next[n];
743     tb1 = *ptb;
744     if (tb1) {
745         /* find tb(n) in circular list */
746         for(;;) {
747             tb1 = *ptb;
748             n1 = (long)tb1 & 3;
749             tb1 = (TranslationBlock *)((long)tb1 & ~3);
750             if (n1 == n && tb1 == tb)
751                 break;
752             if (n1 == 2) {
753                 ptb = &tb1->jmp_first;
754             } else {
755                 ptb = &tb1->jmp_next[n1];
756             }
757         }
758         /* now we can suppress tb(n) from the list */
759         *ptb = tb->jmp_next[n];
760 
761         tb->jmp_next[n] = NULL;
762     }
763 }
764 
765 /* reset the jump entry 'n' of a TB so that it is not chained to
766    another TB */
tb_reset_jump(TranslationBlock * tb,int n)767 static inline void tb_reset_jump(TranslationBlock *tb, int n)
768 {
769     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
770 }
771 
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)772 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
773 {
774     CPUState *env;
775     PageDesc *p;
776     unsigned int h, n1;
777     target_phys_addr_t phys_pc;
778     TranslationBlock *tb1, *tb2;
779 
780     /* remove the TB from the hash list */
781     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
782     h = tb_phys_hash_func(phys_pc);
783     tb_remove(&tb_phys_hash[h], tb,
784               offsetof(TranslationBlock, phys_hash_next));
785 
786     /* remove the TB from the page list */
787     if (tb->page_addr[0] != page_addr) {
788         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
789         tb_page_remove(&p->first_tb, tb);
790         invalidate_page_bitmap(p);
791     }
792     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
793         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
794         tb_page_remove(&p->first_tb, tb);
795         invalidate_page_bitmap(p);
796     }
797 
798     tb_invalidated_flag = 1;
799 
800     /* remove the TB from the hash list */
801     h = tb_jmp_cache_hash_func(tb->pc);
802     for(env = first_cpu; env != NULL; env = env->next_cpu) {
803         if (env->tb_jmp_cache[h] == tb)
804             env->tb_jmp_cache[h] = NULL;
805     }
806 
807     /* suppress this TB from the two jump lists */
808     tb_jmp_remove(tb, 0);
809     tb_jmp_remove(tb, 1);
810 
811     /* suppress any remaining jumps to this TB */
812     tb1 = tb->jmp_first;
813     for(;;) {
814         n1 = (long)tb1 & 3;
815         if (n1 == 2)
816             break;
817         tb1 = (TranslationBlock *)((long)tb1 & ~3);
818         tb2 = tb1->jmp_next[n1];
819         tb_reset_jump(tb1, n1);
820         tb1->jmp_next[n1] = NULL;
821         tb1 = tb2;
822     }
823     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
824 
825 #ifdef CONFIG_MEMCHECK
826     if (tb->tpc2gpc != NULL) {
827         qemu_free(tb->tpc2gpc);
828         tb->tpc2gpc = NULL;
829         tb->tpc2gpc_pairs = 0;
830     }
831 #endif  // CONFIG_MEMCHECK
832 
833     tb_phys_invalidate_count++;
834 }
835 
set_bits(uint8_t * tab,int start,int len)836 static inline void set_bits(uint8_t *tab, int start, int len)
837 {
838     int end, mask, end1;
839 
840     end = start + len;
841     tab += start >> 3;
842     mask = 0xff << (start & 7);
843     if ((start & ~7) == (end & ~7)) {
844         if (start < end) {
845             mask &= ~(0xff << (end & 7));
846             *tab |= mask;
847         }
848     } else {
849         *tab++ |= mask;
850         start = (start + 8) & ~7;
851         end1 = end & ~7;
852         while (start < end1) {
853             *tab++ = 0xff;
854             start += 8;
855         }
856         if (start < end) {
857             mask = ~(0xff << (end & 7));
858             *tab |= mask;
859         }
860     }
861 }
862 
build_page_bitmap(PageDesc * p)863 static void build_page_bitmap(PageDesc *p)
864 {
865     int n, tb_start, tb_end;
866     TranslationBlock *tb;
867 
868     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
869 
870     tb = p->first_tb;
871     while (tb != NULL) {
872         n = (long)tb & 3;
873         tb = (TranslationBlock *)((long)tb & ~3);
874         /* NOTE: this is subtle as a TB may span two physical pages */
875         if (n == 0) {
876             /* NOTE: tb_end may be after the end of the page, but
877                it is not a problem */
878             tb_start = tb->pc & ~TARGET_PAGE_MASK;
879             tb_end = tb_start + tb->size;
880             if (tb_end > TARGET_PAGE_SIZE)
881                 tb_end = TARGET_PAGE_SIZE;
882         } else {
883             tb_start = 0;
884             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
885         }
886         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
887         tb = tb->page_next[n];
888     }
889 }
890 
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)891 TranslationBlock *tb_gen_code(CPUState *env,
892                               target_ulong pc, target_ulong cs_base,
893                               int flags, int cflags)
894 {
895     TranslationBlock *tb;
896     uint8_t *tc_ptr;
897     target_ulong phys_pc, phys_page2, virt_page2;
898     int code_gen_size;
899 
900     phys_pc = get_phys_addr_code(env, pc);
901     tb = tb_alloc(pc);
902     if (!tb) {
903         /* flush must be done */
904         tb_flush(env);
905         /* cannot fail at this point */
906         tb = tb_alloc(pc);
907         /* Don't forget to invalidate previous TB info.  */
908         tb_invalidated_flag = 1;
909     }
910     tc_ptr = code_gen_ptr;
911     tb->tc_ptr = tc_ptr;
912     tb->cs_base = cs_base;
913     tb->flags = flags;
914     tb->cflags = cflags;
915 #ifdef CONFIG_TRACE
916     tb->bb_rec = NULL;
917     tb->prev_time = 0;
918 #endif
919     cpu_gen_code(env, tb, &code_gen_size);
920     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
921 
922     /* check next page if needed */
923     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
924     phys_page2 = -1;
925     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
926         phys_page2 = get_phys_addr_code(env, virt_page2);
927     }
928     tb_link_phys(tb, phys_pc, phys_page2);
929     return tb;
930 }
931 
932 /* invalidate all TBs which intersect with the target physical page
933    starting in range [start;end[. NOTE: start and end must refer to
934    the same physical page. 'is_cpu_write_access' should be true if called
935    from a real cpu write access: the virtual CPU will exit the current
936    TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)937 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
938                                    int is_cpu_write_access)
939 {
940     TranslationBlock *tb, *tb_next, *saved_tb;
941     CPUState *env = cpu_single_env;
942     target_ulong tb_start, tb_end;
943     PageDesc *p;
944     int n;
945 #ifdef TARGET_HAS_PRECISE_SMC
946     int current_tb_not_found = is_cpu_write_access;
947     TranslationBlock *current_tb = NULL;
948     int current_tb_modified = 0;
949     target_ulong current_pc = 0;
950     target_ulong current_cs_base = 0;
951     int current_flags = 0;
952 #endif /* TARGET_HAS_PRECISE_SMC */
953 
954     p = page_find(start >> TARGET_PAGE_BITS);
955     if (!p)
956         return;
957     if (!p->code_bitmap &&
958         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
959         is_cpu_write_access) {
960         /* build code bitmap */
961         build_page_bitmap(p);
962     }
963 
964     /* we remove all the TBs in the range [start, end[ */
965     /* XXX: see if in some cases it could be faster to invalidate all the code */
966     tb = p->first_tb;
967     while (tb != NULL) {
968         n = (long)tb & 3;
969         tb = (TranslationBlock *)((long)tb & ~3);
970         tb_next = tb->page_next[n];
971         /* NOTE: this is subtle as a TB may span two physical pages */
972         if (n == 0) {
973             /* NOTE: tb_end may be after the end of the page, but
974                it is not a problem */
975             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
976             tb_end = tb_start + tb->size;
977         } else {
978             tb_start = tb->page_addr[1];
979             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
980         }
981         if (!(tb_end <= start || tb_start >= end)) {
982 #ifdef TARGET_HAS_PRECISE_SMC
983             if (current_tb_not_found) {
984                 current_tb_not_found = 0;
985                 current_tb = NULL;
986                 if (env->mem_io_pc) {
987                     /* now we have a real cpu fault */
988                     current_tb = tb_find_pc(env->mem_io_pc);
989                 }
990             }
991             if (current_tb == tb &&
992                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
993                 /* If we are modifying the current TB, we must stop
994                 its execution. We could be more precise by checking
995                 that the modification is after the current PC, but it
996                 would require a specialized function to partially
997                 restore the CPU state */
998 
999                 current_tb_modified = 1;
1000                 cpu_restore_state(current_tb, env,
1001                                   env->mem_io_pc, NULL);
1002                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1003                                      &current_flags);
1004             }
1005 #endif /* TARGET_HAS_PRECISE_SMC */
1006             /* we need to do that to handle the case where a signal
1007                occurs while doing tb_phys_invalidate() */
1008             saved_tb = NULL;
1009             if (env) {
1010                 saved_tb = env->current_tb;
1011                 env->current_tb = NULL;
1012             }
1013             tb_phys_invalidate(tb, -1);
1014             if (env) {
1015                 env->current_tb = saved_tb;
1016                 if (env->interrupt_request && env->current_tb)
1017                     cpu_interrupt(env, env->interrupt_request);
1018             }
1019         }
1020         tb = tb_next;
1021     }
1022 #if !defined(CONFIG_USER_ONLY)
1023     /* if no code remaining, no need to continue to use slow writes */
1024     if (!p->first_tb) {
1025         invalidate_page_bitmap(p);
1026         if (is_cpu_write_access) {
1027             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1028         }
1029     }
1030 #endif
1031 #ifdef TARGET_HAS_PRECISE_SMC
1032     if (current_tb_modified) {
1033         /* we generate a block containing just the instruction
1034            modifying the memory. It will ensure that it cannot modify
1035            itself */
1036         env->current_tb = NULL;
1037         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1038         cpu_resume_from_signal(env, NULL);
1039     }
1040 #endif
1041 }
1042 
1043 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)1044 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1045 {
1046     PageDesc *p;
1047     int offset, b;
1048 #if 0
1049     if (1) {
1050         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1051                   cpu_single_env->mem_io_vaddr, len,
1052                   cpu_single_env->eip,
1053                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1054     }
1055 #endif
1056     p = page_find(start >> TARGET_PAGE_BITS);
1057     if (!p)
1058         return;
1059     if (p->code_bitmap) {
1060         offset = start & ~TARGET_PAGE_MASK;
1061         b = p->code_bitmap[offset >> 3] >> (offset & 7);
1062         if (b & ((1 << len) - 1))
1063             goto do_invalidate;
1064     } else {
1065     do_invalidate:
1066         tb_invalidate_phys_page_range(start, start + len, 1);
1067     }
1068 }
1069 
1070 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1071 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1072                                     unsigned long pc, void *puc)
1073 {
1074     TranslationBlock *tb;
1075     PageDesc *p;
1076     int n;
1077 #ifdef TARGET_HAS_PRECISE_SMC
1078     TranslationBlock *current_tb = NULL;
1079     CPUState *env = cpu_single_env;
1080     int current_tb_modified = 0;
1081     target_ulong current_pc = 0;
1082     target_ulong current_cs_base = 0;
1083     int current_flags = 0;
1084 #endif
1085 
1086     addr &= TARGET_PAGE_MASK;
1087     p = page_find(addr >> TARGET_PAGE_BITS);
1088     if (!p)
1089         return;
1090     tb = p->first_tb;
1091 #ifdef TARGET_HAS_PRECISE_SMC
1092     if (tb && pc != 0) {
1093         current_tb = tb_find_pc(pc);
1094     }
1095 #endif
1096     while (tb != NULL) {
1097         n = (long)tb & 3;
1098         tb = (TranslationBlock *)((long)tb & ~3);
1099 #ifdef TARGET_HAS_PRECISE_SMC
1100         if (current_tb == tb &&
1101             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1102                 /* If we are modifying the current TB, we must stop
1103                    its execution. We could be more precise by checking
1104                    that the modification is after the current PC, but it
1105                    would require a specialized function to partially
1106                    restore the CPU state */
1107 
1108             current_tb_modified = 1;
1109             cpu_restore_state(current_tb, env, pc, puc);
1110             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1111                                  &current_flags);
1112         }
1113 #endif /* TARGET_HAS_PRECISE_SMC */
1114         tb_phys_invalidate(tb, addr);
1115         tb = tb->page_next[n];
1116     }
1117     p->first_tb = NULL;
1118 #ifdef TARGET_HAS_PRECISE_SMC
1119     if (current_tb_modified) {
1120         /* we generate a block containing just the instruction
1121            modifying the memory. It will ensure that it cannot modify
1122            itself */
1123         env->current_tb = NULL;
1124         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1125         cpu_resume_from_signal(env, puc);
1126     }
1127 #endif
1128 }
1129 #endif
1130 
1131 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1132 static inline void tb_alloc_page(TranslationBlock *tb,
1133                                  unsigned int n, target_ulong page_addr)
1134 {
1135     PageDesc *p;
1136     TranslationBlock *last_first_tb;
1137 
1138     tb->page_addr[n] = page_addr;
1139     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1140     tb->page_next[n] = p->first_tb;
1141     last_first_tb = p->first_tb;
1142     p->first_tb = (TranslationBlock *)((long)tb | n);
1143     invalidate_page_bitmap(p);
1144 
1145 #if defined(TARGET_HAS_SMC) || 1
1146 
1147 #if defined(CONFIG_USER_ONLY)
1148     if (p->flags & PAGE_WRITE) {
1149         target_ulong addr;
1150         PageDesc *p2;
1151         int prot;
1152 
1153         /* force the host page as non writable (writes will have a
1154            page fault + mprotect overhead) */
1155         page_addr &= qemu_host_page_mask;
1156         prot = 0;
1157         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1158             addr += TARGET_PAGE_SIZE) {
1159 
1160             p2 = page_find (addr >> TARGET_PAGE_BITS);
1161             if (!p2)
1162                 continue;
1163             prot |= p2->flags;
1164             p2->flags &= ~PAGE_WRITE;
1165             page_get_flags(addr);
1166           }
1167         mprotect(g2h(page_addr), qemu_host_page_size,
1168                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1169 #ifdef DEBUG_TB_INVALIDATE
1170         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1171                page_addr);
1172 #endif
1173     }
1174 #else
1175     /* if some code is already present, then the pages are already
1176        protected. So we handle the case where only the first TB is
1177        allocated in a physical page */
1178     if (!last_first_tb) {
1179         tlb_protect_code(page_addr);
1180     }
1181 #endif
1182 
1183 #endif /* TARGET_HAS_SMC */
1184 }
1185 
1186 /* Allocate a new translation block. Flush the translation buffer if
1187    too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1188 TranslationBlock *tb_alloc(target_ulong pc)
1189 {
1190     TranslationBlock *tb;
1191 
1192     if (nb_tbs >= code_gen_max_blocks ||
1193         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1194         return NULL;
1195     tb = &tbs[nb_tbs++];
1196     tb->pc = pc;
1197     tb->cflags = 0;
1198 #ifdef CONFIG_MEMCHECK
1199     tb->tpc2gpc = NULL;
1200     tb->tpc2gpc_pairs = 0;
1201 #endif  // CONFIG_MEMCHECK
1202     return tb;
1203 }
1204 
tb_free(TranslationBlock * tb)1205 void tb_free(TranslationBlock *tb)
1206 {
1207     /* In practice this is mostly used for single use temporary TB
1208        Ignore the hard cases and just back up if this TB happens to
1209        be the last one generated.  */
1210     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1211         code_gen_ptr = tb->tc_ptr;
1212         nb_tbs--;
1213     }
1214 }
1215 
1216 /* add a new TB and link it to the physical page tables. phys_page2 is
1217    (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1218 void tb_link_phys(TranslationBlock *tb,
1219                   target_ulong phys_pc, target_ulong phys_page2)
1220 {
1221     unsigned int h;
1222     TranslationBlock **ptb;
1223 
1224     /* Grab the mmap lock to stop another thread invalidating this TB
1225        before we are done.  */
1226     mmap_lock();
1227     /* add in the physical hash table */
1228     h = tb_phys_hash_func(phys_pc);
1229     ptb = &tb_phys_hash[h];
1230     tb->phys_hash_next = *ptb;
1231     *ptb = tb;
1232 
1233     /* add in the page list */
1234     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1235     if (phys_page2 != -1)
1236         tb_alloc_page(tb, 1, phys_page2);
1237     else
1238         tb->page_addr[1] = -1;
1239 
1240     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1241     tb->jmp_next[0] = NULL;
1242     tb->jmp_next[1] = NULL;
1243 
1244     /* init original jump addresses */
1245     if (tb->tb_next_offset[0] != 0xffff)
1246         tb_reset_jump(tb, 0);
1247     if (tb->tb_next_offset[1] != 0xffff)
1248         tb_reset_jump(tb, 1);
1249 
1250 #ifdef DEBUG_TB_CHECK
1251     tb_page_check();
1252 #endif
1253     mmap_unlock();
1254 }
1255 
1256 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1257    tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1258 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1259 {
1260     int m_min, m_max, m;
1261     unsigned long v;
1262     TranslationBlock *tb;
1263 
1264     if (nb_tbs <= 0)
1265         return NULL;
1266     if (tc_ptr < (unsigned long)code_gen_buffer ||
1267         tc_ptr >= (unsigned long)code_gen_ptr)
1268         return NULL;
1269     /* binary search (cf Knuth) */
1270     m_min = 0;
1271     m_max = nb_tbs - 1;
1272     while (m_min <= m_max) {
1273         m = (m_min + m_max) >> 1;
1274         tb = &tbs[m];
1275         v = (unsigned long)tb->tc_ptr;
1276         if (v == tc_ptr)
1277             return tb;
1278         else if (tc_ptr < v) {
1279             m_max = m - 1;
1280         } else {
1281             m_min = m + 1;
1282         }
1283     }
1284     return &tbs[m_max];
1285 }
1286 
1287 static void tb_reset_jump_recursive(TranslationBlock *tb);
1288 
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1289 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1290 {
1291     TranslationBlock *tb1, *tb_next, **ptb;
1292     unsigned int n1;
1293 
1294     tb1 = tb->jmp_next[n];
1295     if (tb1 != NULL) {
1296         /* find head of list */
1297         for(;;) {
1298             n1 = (long)tb1 & 3;
1299             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1300             if (n1 == 2)
1301                 break;
1302             tb1 = tb1->jmp_next[n1];
1303         }
1304         /* we are now sure now that tb jumps to tb1 */
1305         tb_next = tb1;
1306 
1307         /* remove tb from the jmp_first list */
1308         ptb = &tb_next->jmp_first;
1309         for(;;) {
1310             tb1 = *ptb;
1311             n1 = (long)tb1 & 3;
1312             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1313             if (n1 == n && tb1 == tb)
1314                 break;
1315             ptb = &tb1->jmp_next[n1];
1316         }
1317         *ptb = tb->jmp_next[n];
1318         tb->jmp_next[n] = NULL;
1319 
1320         /* suppress the jump to next tb in generated code */
1321         tb_reset_jump(tb, n);
1322 
1323         /* suppress jumps in the tb on which we could have jumped */
1324         tb_reset_jump_recursive(tb_next);
1325     }
1326 }
1327 
tb_reset_jump_recursive(TranslationBlock * tb)1328 static void tb_reset_jump_recursive(TranslationBlock *tb)
1329 {
1330     tb_reset_jump_recursive2(tb, 0);
1331     tb_reset_jump_recursive2(tb, 1);
1332 }
1333 
1334 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1335 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1336 {
1337     target_phys_addr_t addr;
1338     target_ulong pd;
1339     ram_addr_t ram_addr;
1340     PhysPageDesc *p;
1341 
1342     addr = cpu_get_phys_page_debug(env, pc);
1343     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1344     if (!p) {
1345         pd = IO_MEM_UNASSIGNED;
1346     } else {
1347         pd = p->phys_offset;
1348     }
1349     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1350     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1351 }
1352 #endif
1353 
1354 /* Add a watchpoint.  */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)1355 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1356                           int flags, CPUWatchpoint **watchpoint)
1357 {
1358     target_ulong len_mask = ~(len - 1);
1359     CPUWatchpoint *wp;
1360 
1361     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1362     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1363         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1364                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1365         return -EINVAL;
1366     }
1367     wp = qemu_malloc(sizeof(*wp));
1368 
1369     wp->vaddr = addr;
1370     wp->len_mask = len_mask;
1371     wp->flags = flags;
1372 
1373     /* keep all GDB-injected watchpoints in front */
1374     if (flags & BP_GDB)
1375         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1376     else
1377         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1378 
1379     tlb_flush_page(env, addr);
1380 
1381     if (watchpoint)
1382         *watchpoint = wp;
1383     return 0;
1384 }
1385 
1386 /* Remove a specific watchpoint.  */
cpu_watchpoint_remove(CPUState * env,target_ulong addr,target_ulong len,int flags)1387 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1388                           int flags)
1389 {
1390     target_ulong len_mask = ~(len - 1);
1391     CPUWatchpoint *wp;
1392 
1393     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1394         if (addr == wp->vaddr && len_mask == wp->len_mask
1395                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1396             cpu_watchpoint_remove_by_ref(env, wp);
1397             return 0;
1398         }
1399     }
1400     return -ENOENT;
1401 }
1402 
1403 /* Remove a specific watchpoint by reference.  */
cpu_watchpoint_remove_by_ref(CPUState * env,CPUWatchpoint * watchpoint)1404 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1405 {
1406     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1407 
1408     tlb_flush_page(env, watchpoint->vaddr);
1409 
1410     qemu_free(watchpoint);
1411 }
1412 
1413 /* Remove all matching watchpoints.  */
cpu_watchpoint_remove_all(CPUState * env,int mask)1414 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1415 {
1416     CPUWatchpoint *wp, *next;
1417 
1418     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1419         if (wp->flags & mask)
1420             cpu_watchpoint_remove_by_ref(env, wp);
1421     }
1422 }
1423 
1424 /* Add a breakpoint.  */
cpu_breakpoint_insert(CPUState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)1425 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1426                           CPUBreakpoint **breakpoint)
1427 {
1428 #if defined(TARGET_HAS_ICE)
1429     CPUBreakpoint *bp;
1430 
1431     bp = qemu_malloc(sizeof(*bp));
1432 
1433     bp->pc = pc;
1434     bp->flags = flags;
1435 
1436     /* keep all GDB-injected breakpoints in front */
1437     if (flags & BP_GDB)
1438         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1439     else
1440         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1441 
1442     breakpoint_invalidate(env, pc);
1443 
1444     if (breakpoint)
1445         *breakpoint = bp;
1446     return 0;
1447 #else
1448     return -ENOSYS;
1449 #endif
1450 }
1451 
1452 /* Remove a specific breakpoint.  */
cpu_breakpoint_remove(CPUState * env,target_ulong pc,int flags)1453 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1454 {
1455 #if defined(TARGET_HAS_ICE)
1456     CPUBreakpoint *bp;
1457 
1458     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1459         if (bp->pc == pc && bp->flags == flags) {
1460             cpu_breakpoint_remove_by_ref(env, bp);
1461             return 0;
1462         }
1463     }
1464     return -ENOENT;
1465 #else
1466     return -ENOSYS;
1467 #endif
1468 }
1469 
1470 /* Remove a specific breakpoint by reference.  */
cpu_breakpoint_remove_by_ref(CPUState * env,CPUBreakpoint * breakpoint)1471 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1472 {
1473 #if defined(TARGET_HAS_ICE)
1474     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1475 
1476     breakpoint_invalidate(env, breakpoint->pc);
1477 
1478     qemu_free(breakpoint);
1479 #endif
1480 }
1481 
1482 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * env,int mask)1483 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1484 {
1485 #if defined(TARGET_HAS_ICE)
1486     CPUBreakpoint *bp, *next;
1487 
1488     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1489         if (bp->flags & mask)
1490             cpu_breakpoint_remove_by_ref(env, bp);
1491     }
1492 #endif
1493 }
1494 
1495 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1496    CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1497 void cpu_single_step(CPUState *env, int enabled)
1498 {
1499 #if defined(TARGET_HAS_ICE)
1500     if (env->singlestep_enabled != enabled) {
1501         env->singlestep_enabled = enabled;
1502         if (kvm_enabled())
1503             kvm_update_guest_debug(env, 0);
1504         else {
1505             /* must flush all the translated code to avoid inconsistencies */
1506             /* XXX: only flush what is necessary */
1507             tb_flush(env);
1508         }
1509     }
1510 #endif
1511 }
1512 
1513 /* enable or disable low levels log */
cpu_set_log(int log_flags)1514 void cpu_set_log(int log_flags)
1515 {
1516     loglevel = log_flags;
1517     if (loglevel && !logfile) {
1518         logfile = fopen(logfilename, log_append ? "a" : "w");
1519         if (!logfile) {
1520             perror(logfilename);
1521             _exit(1);
1522         }
1523 #if !defined(CONFIG_SOFTMMU)
1524         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1525         {
1526             static char logfile_buf[4096];
1527             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1528         }
1529 #elif !defined(_WIN32)
1530         /* Win32 doesn't support line-buffering and requires size >= 2 */
1531         setvbuf(logfile, NULL, _IOLBF, 0);
1532 #endif
1533         log_append = 1;
1534     }
1535     if (!loglevel && logfile) {
1536         fclose(logfile);
1537         logfile = NULL;
1538     }
1539 }
1540 
cpu_set_log_filename(const char * filename)1541 void cpu_set_log_filename(const char *filename)
1542 {
1543     logfilename = strdup(filename);
1544     if (logfile) {
1545         fclose(logfile);
1546         logfile = NULL;
1547     }
1548     cpu_set_log(loglevel);
1549 }
1550 
cpu_unlink_tb(CPUState * env)1551 static void cpu_unlink_tb(CPUState *env)
1552 {
1553     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1554        problem and hope the cpu will stop of its own accord.  For userspace
1555        emulation this often isn't actually as bad as it sounds.  Often
1556        signals are used primarily to interrupt blocking syscalls.  */
1557     TranslationBlock *tb;
1558     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1559 
1560     tb = env->current_tb;
1561     /* if the cpu is currently executing code, we must unlink it and
1562        all the potentially executing TB */
1563     if (tb && !testandset(&interrupt_lock)) {
1564         env->current_tb = NULL;
1565         tb_reset_jump_recursive(tb);
1566         resetlock(&interrupt_lock);
1567     }
1568 }
1569 
1570 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1571 void cpu_interrupt(CPUState *env, int mask)
1572 {
1573     int old_mask;
1574 
1575     old_mask = env->interrupt_request;
1576     env->interrupt_request |= mask;
1577 
1578 #ifndef CONFIG_USER_ONLY
1579     /*
1580      * If called from iothread context, wake the target cpu in
1581      * case its halted.
1582      */
1583     if (!qemu_cpu_self(env)) {
1584         qemu_cpu_kick(env);
1585         return;
1586     }
1587 #endif
1588 
1589     if (use_icount) {
1590         env->icount_decr.u16.high = 0xffff;
1591 #ifndef CONFIG_USER_ONLY
1592         if (!can_do_io(env)
1593             && (mask & ~old_mask) != 0) {
1594             cpu_abort(env, "Raised interrupt while not in I/O function");
1595         }
1596 #endif
1597     } else {
1598         cpu_unlink_tb(env);
1599     }
1600 }
1601 
cpu_reset_interrupt(CPUState * env,int mask)1602 void cpu_reset_interrupt(CPUState *env, int mask)
1603 {
1604     env->interrupt_request &= ~mask;
1605 }
1606 
cpu_exit(CPUState * env)1607 void cpu_exit(CPUState *env)
1608 {
1609     env->exit_request = 1;
1610     cpu_unlink_tb(env);
1611 }
1612 
1613 const CPULogItem cpu_log_items[] = {
1614     { CPU_LOG_TB_OUT_ASM, "out_asm",
1615       "show generated host assembly code for each compiled TB" },
1616     { CPU_LOG_TB_IN_ASM, "in_asm",
1617       "show target assembly code for each compiled TB" },
1618     { CPU_LOG_TB_OP, "op",
1619       "show micro ops for each compiled TB" },
1620     { CPU_LOG_TB_OP_OPT, "op_opt",
1621       "show micro ops "
1622 #ifdef TARGET_I386
1623       "before eflags optimization and "
1624 #endif
1625       "after liveness analysis" },
1626     { CPU_LOG_INT, "int",
1627       "show interrupts/exceptions in short format" },
1628     { CPU_LOG_EXEC, "exec",
1629       "show trace before each executed TB (lots of logs)" },
1630     { CPU_LOG_TB_CPU, "cpu",
1631       "show CPU state before block translation" },
1632 #ifdef TARGET_I386
1633     { CPU_LOG_PCALL, "pcall",
1634       "show protected mode far calls/returns/exceptions" },
1635     { CPU_LOG_RESET, "cpu_reset",
1636       "show CPU state before CPU resets" },
1637 #endif
1638 #ifdef DEBUG_IOPORT
1639     { CPU_LOG_IOPORT, "ioport",
1640       "show all i/o ports accesses" },
1641 #endif
1642     { 0, NULL, NULL },
1643 };
1644 
cmp1(const char * s1,int n,const char * s2)1645 static int cmp1(const char *s1, int n, const char *s2)
1646 {
1647     if (strlen(s2) != n)
1648         return 0;
1649     return memcmp(s1, s2, n) == 0;
1650 }
1651 
1652 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1653 int cpu_str_to_log_mask(const char *str)
1654 {
1655     const CPULogItem *item;
1656     int mask;
1657     const char *p, *p1;
1658 
1659     p = str;
1660     mask = 0;
1661     for(;;) {
1662         p1 = strchr(p, ',');
1663         if (!p1)
1664             p1 = p + strlen(p);
1665 	if(cmp1(p,p1-p,"all")) {
1666 		for(item = cpu_log_items; item->mask != 0; item++) {
1667 			mask |= item->mask;
1668 		}
1669 	} else {
1670         for(item = cpu_log_items; item->mask != 0; item++) {
1671             if (cmp1(p, p1 - p, item->name))
1672                 goto found;
1673         }
1674         return 0;
1675 	}
1676     found:
1677         mask |= item->mask;
1678         if (*p1 != ',')
1679             break;
1680         p = p1 + 1;
1681     }
1682     return mask;
1683 }
1684 
cpu_abort(CPUState * env,const char * fmt,...)1685 void cpu_abort(CPUState *env, const char *fmt, ...)
1686 {
1687     va_list ap;
1688     va_list ap2;
1689 
1690     va_start(ap, fmt);
1691     va_copy(ap2, ap);
1692     fprintf(stderr, "qemu: fatal: ");
1693     vfprintf(stderr, fmt, ap);
1694     fprintf(stderr, "\n");
1695 #ifdef TARGET_I386
1696     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1697 #else
1698     cpu_dump_state(env, stderr, fprintf, 0);
1699 #endif
1700     if (qemu_log_enabled()) {
1701         qemu_log("qemu: fatal: ");
1702         qemu_log_vprintf(fmt, ap2);
1703         qemu_log("\n");
1704 #ifdef TARGET_I386
1705         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1706 #else
1707         log_cpu_state(env, 0);
1708 #endif
1709         qemu_log_flush();
1710         qemu_log_close();
1711     }
1712     va_end(ap2);
1713     va_end(ap);
1714     abort();
1715 }
1716 
cpu_copy(CPUState * env)1717 CPUState *cpu_copy(CPUState *env)
1718 {
1719     CPUState *new_env = cpu_init(env->cpu_model_str);
1720     CPUState *next_cpu = new_env->next_cpu;
1721     int cpu_index = new_env->cpu_index;
1722 #if defined(TARGET_HAS_ICE)
1723     CPUBreakpoint *bp;
1724     CPUWatchpoint *wp;
1725 #endif
1726 
1727     memcpy(new_env, env, sizeof(CPUState));
1728 
1729     /* Preserve chaining and index. */
1730     new_env->next_cpu = next_cpu;
1731     new_env->cpu_index = cpu_index;
1732 
1733     /* Clone all break/watchpoints.
1734        Note: Once we support ptrace with hw-debug register access, make sure
1735        BP_CPU break/watchpoints are handled correctly on clone. */
1736     QTAILQ_INIT(&env->breakpoints);
1737     QTAILQ_INIT(&env->watchpoints);
1738 #if defined(TARGET_HAS_ICE)
1739     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1740         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1741     }
1742     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1743         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1744                               wp->flags, NULL);
1745     }
1746 #endif
1747 
1748     return new_env;
1749 }
1750 
1751 #if !defined(CONFIG_USER_ONLY)
1752 
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1753 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1754 {
1755     unsigned int i;
1756 
1757     /* Discard jump cache entries for any tb which might potentially
1758        overlap the flushed page.  */
1759     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1760     memset (&env->tb_jmp_cache[i], 0,
1761 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1762 
1763     i = tb_jmp_cache_hash_page(addr);
1764     memset (&env->tb_jmp_cache[i], 0,
1765 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1766 }
1767 
1768 /* NOTE: if flush_global is true, also flush global entries (not
1769    implemented yet) */
tlb_flush(CPUState * env,int flush_global)1770 void tlb_flush(CPUState *env, int flush_global)
1771 {
1772     int i;
1773 
1774 #if defined(DEBUG_TLB)
1775     printf("tlb_flush:\n");
1776 #endif
1777     /* must reset current TB so that interrupts cannot modify the
1778        links while we are modifying them */
1779     env->current_tb = NULL;
1780 
1781     for(i = 0; i < CPU_TLB_SIZE; i++) {
1782         int mmu_idx;
1783         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1784             env->tlb_table[mmu_idx][i].addr_read = -1;
1785             env->tlb_table[mmu_idx][i].addr_write = -1;
1786             env->tlb_table[mmu_idx][i].addr_code = -1;
1787         }
1788     }
1789 
1790     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1791 
1792 #ifdef CONFIG_KQEMU
1793     if (env->kqemu_enabled) {
1794         kqemu_flush(env, flush_global);
1795     }
1796 #endif
1797     tlb_flush_count++;
1798 }
1799 
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1800 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1801 {
1802     if (addr == (tlb_entry->addr_read &
1803                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1804         addr == (tlb_entry->addr_write &
1805                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1806         addr == (tlb_entry->addr_code &
1807                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1808         tlb_entry->addr_read = -1;
1809         tlb_entry->addr_write = -1;
1810         tlb_entry->addr_code = -1;
1811     }
1812 }
1813 
tlb_flush_page(CPUState * env,target_ulong addr)1814 void tlb_flush_page(CPUState *env, target_ulong addr)
1815 {
1816     int i;
1817     int mmu_idx;
1818 
1819 #if defined(DEBUG_TLB)
1820     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1821 #endif
1822     /* must reset current TB so that interrupts cannot modify the
1823        links while we are modifying them */
1824     env->current_tb = NULL;
1825 
1826     addr &= TARGET_PAGE_MASK;
1827     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1828     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1829         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1830 
1831     tlb_flush_jmp_cache(env, addr);
1832 }
1833 
1834 /* update the TLBs so that writes to code in the virtual page 'addr'
1835    can be detected */
tlb_protect_code(ram_addr_t ram_addr)1836 static void tlb_protect_code(ram_addr_t ram_addr)
1837 {
1838     cpu_physical_memory_reset_dirty(ram_addr,
1839                                     ram_addr + TARGET_PAGE_SIZE,
1840                                     CODE_DIRTY_FLAG);
1841 }
1842 
1843 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1844    tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1845 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1846                                     target_ulong vaddr)
1847 {
1848     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1849 }
1850 
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1851 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1852                                          unsigned long start, unsigned long length)
1853 {
1854     unsigned long addr;
1855     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1856         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1857         if ((addr - start) < length) {
1858             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1859         }
1860     }
1861 }
1862 
1863 /* Note: start and end must be within the same ram block.  */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1864 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1865                                      int dirty_flags)
1866 {
1867     CPUState *env;
1868     unsigned long length, start1;
1869     int i, mask, len;
1870     uint8_t *p;
1871 
1872     start &= TARGET_PAGE_MASK;
1873     end = TARGET_PAGE_ALIGN(end);
1874 
1875     length = end - start;
1876     if (length == 0)
1877         return;
1878     len = length >> TARGET_PAGE_BITS;
1879     mask = ~dirty_flags;
1880     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1881     for(i = 0; i < len; i++)
1882         p[i] &= mask;
1883 
1884     /* we modify the TLB cache so that the dirty bit will be set again
1885        when accessing the range */
1886     start1 = (unsigned long)qemu_get_ram_ptr(start);
1887     /* Chek that we don't span multiple blocks - this breaks the
1888        address comparisons below.  */
1889     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1890             != (end - 1) - start) {
1891         abort();
1892     }
1893 
1894     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1895         int mmu_idx;
1896         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1897             for(i = 0; i < CPU_TLB_SIZE; i++)
1898                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1899                                       start1, length);
1900         }
1901     }
1902 }
1903 
cpu_physical_memory_set_dirty_tracking(int enable)1904 int cpu_physical_memory_set_dirty_tracking(int enable)
1905 {
1906     in_migration = enable;
1907     if (kvm_enabled()) {
1908         return kvm_set_migration_log(enable);
1909     }
1910     return 0;
1911 }
1912 
cpu_physical_memory_get_dirty_tracking(void)1913 int cpu_physical_memory_get_dirty_tracking(void)
1914 {
1915     return in_migration;
1916 }
1917 
cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)1918 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1919                                    target_phys_addr_t end_addr)
1920 {
1921     int ret = 0;
1922 
1923     if (kvm_enabled())
1924         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1925     return ret;
1926 }
1927 
tlb_update_dirty(CPUTLBEntry * tlb_entry)1928 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1929 {
1930     ram_addr_t ram_addr;
1931     void *p;
1932 
1933     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1934         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1935             + tlb_entry->addend);
1936         ram_addr = qemu_ram_addr_from_host(p);
1937         if (!cpu_physical_memory_is_dirty(ram_addr)) {
1938             tlb_entry->addr_write |= TLB_NOTDIRTY;
1939         }
1940     }
1941 }
1942 
1943 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1944 void cpu_tlb_update_dirty(CPUState *env)
1945 {
1946     int i;
1947     int mmu_idx;
1948     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1949         for(i = 0; i < CPU_TLB_SIZE; i++)
1950             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1951     }
1952 }
1953 
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1954 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1955 {
1956     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1957         tlb_entry->addr_write = vaddr;
1958 }
1959 
1960 /* update the TLB corresponding to virtual page vaddr
1961    so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1962 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1963 {
1964     int i;
1965     int mmu_idx;
1966 
1967     vaddr &= TARGET_PAGE_MASK;
1968     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1969     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1970         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1971 }
1972 
1973 /* add a new TLB entry. At most one entry for a given virtual address
1974    is permitted. Return 0 if OK or 2 if the page could not be mapped
1975    (can only happen in non SOFTMMU mode for I/O pages or pages
1976    conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1977 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1978                       target_phys_addr_t paddr, int prot,
1979                       int mmu_idx, int is_softmmu)
1980 {
1981     PhysPageDesc *p;
1982     unsigned long pd;
1983     unsigned int index;
1984     target_ulong address;
1985     target_ulong code_address;
1986     target_phys_addr_t addend;
1987     int ret;
1988     CPUTLBEntry *te;
1989     CPUWatchpoint *wp;
1990     target_phys_addr_t iotlb;
1991 
1992     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1993     if (!p) {
1994         pd = IO_MEM_UNASSIGNED;
1995     } else {
1996         pd = p->phys_offset;
1997     }
1998 #if defined(DEBUG_TLB)
1999     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2000            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2001 #endif
2002 
2003     ret = 0;
2004     address = vaddr;
2005     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2006         /* IO memory case (romd handled later) */
2007         address |= TLB_MMIO;
2008     }
2009     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2010     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2011         /* Normal RAM.  */
2012         iotlb = pd & TARGET_PAGE_MASK;
2013         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2014             iotlb |= IO_MEM_NOTDIRTY;
2015         else
2016             iotlb |= IO_MEM_ROM;
2017     } else {
2018         /* IO handlers are currently passed a physical address.
2019            It would be nice to pass an offset from the base address
2020            of that region.  This would avoid having to special case RAM,
2021            and avoid full address decoding in every device.
2022            We can't use the high bits of pd for this because
2023            IO_MEM_ROMD uses these as a ram address.  */
2024         iotlb = (pd & ~TARGET_PAGE_MASK);
2025         if (p) {
2026             iotlb += p->region_offset;
2027         } else {
2028             iotlb += paddr;
2029         }
2030     }
2031 
2032     code_address = address;
2033     /* Make accesses to pages with watchpoints go via the
2034        watchpoint trap routines.  */
2035     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2036         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2037             iotlb = io_mem_watch + paddr;
2038             /* TODO: The memory case can be optimized by not trapping
2039                reads of pages with a write breakpoint.  */
2040             address |= TLB_MMIO;
2041         }
2042     }
2043 
2044     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2045     env->iotlb[mmu_idx][index] = iotlb - vaddr;
2046     te = &env->tlb_table[mmu_idx][index];
2047     te->addend = addend - vaddr;
2048     if (prot & PAGE_READ) {
2049         te->addr_read = address;
2050     } else {
2051         te->addr_read = -1;
2052     }
2053 
2054     if (prot & PAGE_EXEC) {
2055         te->addr_code = code_address;
2056     } else {
2057         te->addr_code = -1;
2058     }
2059     if (prot & PAGE_WRITE) {
2060         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2061             (pd & IO_MEM_ROMD)) {
2062             /* Write access calls the I/O callback.  */
2063             te->addr_write = address | TLB_MMIO;
2064         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2065                    !cpu_physical_memory_is_dirty(pd)) {
2066             te->addr_write = address | TLB_NOTDIRTY;
2067         } else {
2068             te->addr_write = address;
2069         }
2070     } else {
2071         te->addr_write = -1;
2072     }
2073 
2074 #ifdef CONFIG_MEMCHECK
2075     /*
2076      * If we have memchecker running, we need to make sure that page, cached
2077      * into TLB as the result of this operation will comply with our requirement
2078      * to cause __ld/__stx_mmu being called for memory access on the pages
2079      * containing memory blocks that require access violation checks.
2080      *
2081      * We need to check with memory checker if we should invalidate this page
2082      * iff:
2083      *  - Memchecking is enabled.
2084      *  - Page that's been cached belongs to the user space.
2085      *  - Request to cache this page didn't come from softmmu. We're covered
2086      *    there, because after page was cached here we will invalidate it in
2087      *    the __ld/__stx_mmu wrapper.
2088      *  - Cached page belongs to RAM, not I/O area.
2089      *  - Page is cached for read, or write access.
2090      */
2091     if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
2092         (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2093         (prot & (PAGE_READ | PAGE_WRITE)) &&
2094         memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
2095         if (prot & PAGE_READ) {
2096             te->addr_read ^= TARGET_PAGE_MASK;
2097         }
2098         if (prot & PAGE_WRITE) {
2099             te->addr_write ^= TARGET_PAGE_MASK;
2100         }
2101     }
2102 #endif  // CONFIG_MEMCHECK
2103 
2104     return ret;
2105 }
2106 
2107 #else
2108 
tlb_flush(CPUState * env,int flush_global)2109 void tlb_flush(CPUState *env, int flush_global)
2110 {
2111 }
2112 
tlb_flush_page(CPUState * env,target_ulong addr)2113 void tlb_flush_page(CPUState *env, target_ulong addr)
2114 {
2115 }
2116 
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)2117 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2118                       target_phys_addr_t paddr, int prot,
2119                       int mmu_idx, int is_softmmu)
2120 {
2121     return 0;
2122 }
2123 
2124 /*
2125  * Walks guest process memory "regions" one by one
2126  * and calls callback function 'fn' for each region.
2127  */
walk_memory_regions(void * priv,int (* fn)(void *,unsigned long,unsigned long,unsigned long))2128 int walk_memory_regions(void *priv,
2129     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2130 {
2131     unsigned long start, end;
2132     PageDesc *p = NULL;
2133     int i, j, prot, prot1;
2134     int rc = 0;
2135 
2136     start = end = -1;
2137     prot = 0;
2138 
2139     for (i = 0; i <= L1_SIZE; i++) {
2140         p = (i < L1_SIZE) ? l1_map[i] : NULL;
2141         for (j = 0; j < L2_SIZE; j++) {
2142             prot1 = (p == NULL) ? 0 : p[j].flags;
2143             /*
2144              * "region" is one continuous chunk of memory
2145              * that has same protection flags set.
2146              */
2147             if (prot1 != prot) {
2148                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2149                 if (start != -1) {
2150                     rc = (*fn)(priv, start, end, prot);
2151                     /* callback can stop iteration by returning != 0 */
2152                     if (rc != 0)
2153                         return (rc);
2154                 }
2155                 if (prot1 != 0)
2156                     start = end;
2157                 else
2158                     start = -1;
2159                 prot = prot1;
2160             }
2161             if (p == NULL)
2162                 break;
2163         }
2164     }
2165     return (rc);
2166 }
2167 
dump_region(void * priv,unsigned long start,unsigned long end,unsigned long prot)2168 static int dump_region(void *priv, unsigned long start,
2169     unsigned long end, unsigned long prot)
2170 {
2171     FILE *f = (FILE *)priv;
2172 
2173     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2174         start, end, end - start,
2175         ((prot & PAGE_READ) ? 'r' : '-'),
2176         ((prot & PAGE_WRITE) ? 'w' : '-'),
2177         ((prot & PAGE_EXEC) ? 'x' : '-'));
2178 
2179     return (0);
2180 }
2181 
2182 /* dump memory mappings */
page_dump(FILE * f)2183 void page_dump(FILE *f)
2184 {
2185     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2186             "start", "end", "size", "prot");
2187     walk_memory_regions(f, dump_region);
2188 }
2189 
page_get_flags(target_ulong address)2190 int page_get_flags(target_ulong address)
2191 {
2192     PageDesc *p;
2193 
2194     p = page_find(address >> TARGET_PAGE_BITS);
2195     if (!p)
2196         return 0;
2197     return p->flags;
2198 }
2199 
2200 /* modify the flags of a page and invalidate the code if
2201    necessary. The flag PAGE_WRITE_ORG is positioned automatically
2202    depending on PAGE_WRITE */
page_set_flags(target_ulong start,target_ulong end,int flags)2203 void page_set_flags(target_ulong start, target_ulong end, int flags)
2204 {
2205     PageDesc *p;
2206     target_ulong addr;
2207 
2208     /* mmap_lock should already be held.  */
2209     start = start & TARGET_PAGE_MASK;
2210     end = TARGET_PAGE_ALIGN(end);
2211     if (flags & PAGE_WRITE)
2212         flags |= PAGE_WRITE_ORG;
2213     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2214         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2215         /* We may be called for host regions that are outside guest
2216            address space.  */
2217         if (!p)
2218             return;
2219         /* if the write protection is set, then we invalidate the code
2220            inside */
2221         if (!(p->flags & PAGE_WRITE) &&
2222             (flags & PAGE_WRITE) &&
2223             p->first_tb) {
2224             tb_invalidate_phys_page(addr, 0, NULL);
2225         }
2226         p->flags = flags;
2227     }
2228 }
2229 
page_check_range(target_ulong start,target_ulong len,int flags)2230 int page_check_range(target_ulong start, target_ulong len, int flags)
2231 {
2232     PageDesc *p;
2233     target_ulong end;
2234     target_ulong addr;
2235 
2236     if (start + len < start)
2237         /* we've wrapped around */
2238         return -1;
2239 
2240     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2241     start = start & TARGET_PAGE_MASK;
2242 
2243     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2244         p = page_find(addr >> TARGET_PAGE_BITS);
2245         if( !p )
2246             return -1;
2247         if( !(p->flags & PAGE_VALID) )
2248             return -1;
2249 
2250         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2251             return -1;
2252         if (flags & PAGE_WRITE) {
2253             if (!(p->flags & PAGE_WRITE_ORG))
2254                 return -1;
2255             /* unprotect the page if it was put read-only because it
2256                contains translated code */
2257             if (!(p->flags & PAGE_WRITE)) {
2258                 if (!page_unprotect(addr, 0, NULL))
2259                     return -1;
2260             }
2261             return 0;
2262         }
2263     }
2264     return 0;
2265 }
2266 
2267 /* called from signal handler: invalidate the code and unprotect the
2268    page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2269 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2270 {
2271     unsigned int page_index, prot, pindex;
2272     PageDesc *p, *p1;
2273     target_ulong host_start, host_end, addr;
2274 
2275     /* Technically this isn't safe inside a signal handler.  However we
2276        know this only ever happens in a synchronous SEGV handler, so in
2277        practice it seems to be ok.  */
2278     mmap_lock();
2279 
2280     host_start = address & qemu_host_page_mask;
2281     page_index = host_start >> TARGET_PAGE_BITS;
2282     p1 = page_find(page_index);
2283     if (!p1) {
2284         mmap_unlock();
2285         return 0;
2286     }
2287     host_end = host_start + qemu_host_page_size;
2288     p = p1;
2289     prot = 0;
2290     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2291         prot |= p->flags;
2292         p++;
2293     }
2294     /* if the page was really writable, then we change its
2295        protection back to writable */
2296     if (prot & PAGE_WRITE_ORG) {
2297         pindex = (address - host_start) >> TARGET_PAGE_BITS;
2298         if (!(p1[pindex].flags & PAGE_WRITE)) {
2299             mprotect((void *)g2h(host_start), qemu_host_page_size,
2300                      (prot & PAGE_BITS) | PAGE_WRITE);
2301             p1[pindex].flags |= PAGE_WRITE;
2302             /* and since the content will be modified, we must invalidate
2303                the corresponding translated code. */
2304             tb_invalidate_phys_page(address, pc, puc);
2305 #ifdef DEBUG_TB_CHECK
2306             tb_invalidate_check(address);
2307 #endif
2308             mmap_unlock();
2309             return 1;
2310         }
2311     }
2312     mmap_unlock();
2313     return 0;
2314 }
2315 
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2316 static inline void tlb_set_dirty(CPUState *env,
2317                                  unsigned long addr, target_ulong vaddr)
2318 {
2319 }
2320 #endif /* defined(CONFIG_USER_ONLY) */
2321 
2322 #if !defined(CONFIG_USER_ONLY)
2323 
2324 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2325                              ram_addr_t memory, ram_addr_t region_offset);
2326 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2327                            ram_addr_t orig_memory, ram_addr_t region_offset);
2328 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2329                       need_subpage)                                     \
2330     do {                                                                \
2331         if (addr > start_addr)                                          \
2332             start_addr2 = 0;                                            \
2333         else {                                                          \
2334             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2335             if (start_addr2 > 0)                                        \
2336                 need_subpage = 1;                                       \
2337         }                                                               \
2338                                                                         \
2339         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2340             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2341         else {                                                          \
2342             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2343             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2344                 need_subpage = 1;                                       \
2345         }                                                               \
2346     } while (0)
2347 
2348 /* register physical memory.
2349    For RAM, 'size' must be a multiple of the target page size.
2350    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2351    io memory page.  The address used when calling the IO function is
2352    the offset from the start of the region, plus region_offset.  Both
2353    start_addr and region_offset are rounded down to a page boundary
2354    before calculating this offset.  This should not be a problem unless
2355    the low bits of start_addr and region_offset differ.  */
cpu_register_physical_memory_offset(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset)2356 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2357                                          ram_addr_t size,
2358                                          ram_addr_t phys_offset,
2359                                          ram_addr_t region_offset)
2360 {
2361     target_phys_addr_t addr, end_addr;
2362     PhysPageDesc *p;
2363     CPUState *env;
2364     ram_addr_t orig_size = size;
2365     void *subpage;
2366 
2367     if (kvm_enabled())
2368         kvm_set_phys_mem(start_addr, size, phys_offset);
2369 
2370     if (phys_offset == IO_MEM_UNASSIGNED) {
2371         region_offset = start_addr;
2372     }
2373     region_offset &= TARGET_PAGE_MASK;
2374     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2375     end_addr = start_addr + (target_phys_addr_t)size;
2376     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2377         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2378         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2379             ram_addr_t orig_memory = p->phys_offset;
2380             target_phys_addr_t start_addr2, end_addr2;
2381             int need_subpage = 0;
2382 
2383             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2384                           need_subpage);
2385             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2386                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2387                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2388                                            &p->phys_offset, orig_memory,
2389                                            p->region_offset);
2390                 } else {
2391                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2392                                             >> IO_MEM_SHIFT];
2393                 }
2394                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2395                                  region_offset);
2396                 p->region_offset = 0;
2397             } else {
2398                 p->phys_offset = phys_offset;
2399                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2400                     (phys_offset & IO_MEM_ROMD))
2401                     phys_offset += TARGET_PAGE_SIZE;
2402             }
2403         } else {
2404             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2405             p->phys_offset = phys_offset;
2406             p->region_offset = region_offset;
2407             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2408                 (phys_offset & IO_MEM_ROMD)) {
2409                 phys_offset += TARGET_PAGE_SIZE;
2410             } else {
2411                 target_phys_addr_t start_addr2, end_addr2;
2412                 int need_subpage = 0;
2413 
2414                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2415                               end_addr2, need_subpage);
2416 
2417                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2418                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2419                                            &p->phys_offset, IO_MEM_UNASSIGNED,
2420                                            addr & TARGET_PAGE_MASK);
2421                     subpage_register(subpage, start_addr2, end_addr2,
2422                                      phys_offset, region_offset);
2423                     p->region_offset = 0;
2424                 }
2425             }
2426         }
2427         region_offset += TARGET_PAGE_SIZE;
2428     }
2429 
2430     /* since each CPU stores ram addresses in its TLB cache, we must
2431        reset the modified entries */
2432     /* XXX: slow ! */
2433     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2434         tlb_flush(env, 1);
2435     }
2436 }
2437 
2438 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2439 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2440 {
2441     PhysPageDesc *p;
2442 
2443     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2444     if (!p)
2445         return IO_MEM_UNASSIGNED;
2446     return p->phys_offset;
2447 }
2448 
qemu_register_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2449 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2450 {
2451     if (kvm_enabled())
2452         kvm_coalesce_mmio_region(addr, size);
2453 }
2454 
qemu_unregister_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2455 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2456 {
2457     if (kvm_enabled())
2458         kvm_uncoalesce_mmio_region(addr, size);
2459 }
2460 
qemu_ram_alloc(ram_addr_t size)2461 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2462 {
2463     RAMBlock *new_block;
2464 
2465     size = TARGET_PAGE_ALIGN(size);
2466     new_block = qemu_malloc(sizeof(*new_block));
2467 
2468 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2469     /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2470     new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2471                            MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2472 #else
2473     new_block->host = qemu_vmalloc(size);
2474 #endif
2475 #ifdef MADV_MERGEABLE
2476     madvise(new_block->host, size, MADV_MERGEABLE);
2477 #endif
2478     new_block->offset = last_ram_offset;
2479     new_block->length = size;
2480 
2481     new_block->next = ram_blocks;
2482     ram_blocks = new_block;
2483 
2484     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2485         (last_ram_offset + size) >> TARGET_PAGE_BITS);
2486     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2487            0xff, size >> TARGET_PAGE_BITS);
2488 
2489     last_ram_offset += size;
2490 
2491     if (kvm_enabled())
2492         kvm_setup_guest_memory(new_block->host, size);
2493 
2494     return new_block->offset;
2495 }
2496 
qemu_ram_free(ram_addr_t addr)2497 void qemu_ram_free(ram_addr_t addr)
2498 {
2499     /* TODO: implement this.  */
2500 }
2501 
2502 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2503    With the exception of the softmmu code in this file, this should
2504    only be used for local memory (e.g. video ram) that the device owns,
2505    and knows it isn't going to access beyond the end of the block.
2506 
2507    It should not be used for general purpose DMA.
2508    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2509  */
qemu_get_ram_ptr(ram_addr_t addr)2510 void *qemu_get_ram_ptr(ram_addr_t addr)
2511 {
2512     RAMBlock *prev;
2513     RAMBlock **prevp;
2514     RAMBlock *block;
2515 
2516     prev = NULL;
2517     prevp = &ram_blocks;
2518     block = ram_blocks;
2519     while (block && (block->offset > addr
2520                      || block->offset + block->length <= addr)) {
2521         if (prev)
2522           prevp = &prev->next;
2523         prev = block;
2524         block = block->next;
2525     }
2526     if (!block) {
2527         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2528         abort();
2529     }
2530     /* Move this entry to to start of the list.  */
2531     if (prev) {
2532         prev->next = block->next;
2533         block->next = *prevp;
2534         *prevp = block;
2535     }
2536     return block->host + (addr - block->offset);
2537 }
2538 
2539 /* Some of the softmmu routines need to translate from a host pointer
2540    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host(void * ptr)2541 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2542 {
2543     RAMBlock *prev;
2544     RAMBlock **prevp;
2545     RAMBlock *block;
2546     uint8_t *host = ptr;
2547 
2548     prev = NULL;
2549     prevp = &ram_blocks;
2550     block = ram_blocks;
2551     while (block && (block->host > host
2552                      || block->host + block->length <= host)) {
2553         if (prev)
2554           prevp = &prev->next;
2555         prev = block;
2556         block = block->next;
2557     }
2558     if (!block) {
2559         fprintf(stderr, "Bad ram pointer %p\n", ptr);
2560         abort();
2561     }
2562     return block->offset + (host - block->host);
2563 }
2564 
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2565 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2566 {
2567 #ifdef DEBUG_UNASSIGNED
2568     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2569 #endif
2570 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2571     do_unassigned_access(addr, 0, 0, 0, 1);
2572 #endif
2573     return 0;
2574 }
2575 
unassigned_mem_readw(void * opaque,target_phys_addr_t addr)2576 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2577 {
2578 #ifdef DEBUG_UNASSIGNED
2579     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2580 #endif
2581 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2582     do_unassigned_access(addr, 0, 0, 0, 2);
2583 #endif
2584     return 0;
2585 }
2586 
unassigned_mem_readl(void * opaque,target_phys_addr_t addr)2587 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2588 {
2589 #ifdef DEBUG_UNASSIGNED
2590     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2591 #endif
2592 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2593     do_unassigned_access(addr, 0, 0, 0, 4);
2594 #endif
2595     return 0;
2596 }
2597 
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2598 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2599 {
2600 #ifdef DEBUG_UNASSIGNED
2601     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2602 #endif
2603 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2604     do_unassigned_access(addr, 1, 0, 0, 1);
2605 #endif
2606 }
2607 
unassigned_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2608 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2609 {
2610 #ifdef DEBUG_UNASSIGNED
2611     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2612 #endif
2613 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2614     do_unassigned_access(addr, 1, 0, 0, 2);
2615 #endif
2616 }
2617 
unassigned_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2618 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2619 {
2620 #ifdef DEBUG_UNASSIGNED
2621     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2622 #endif
2623 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2624     do_unassigned_access(addr, 1, 0, 0, 4);
2625 #endif
2626 }
2627 
2628 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2629     unassigned_mem_readb,
2630     unassigned_mem_readw,
2631     unassigned_mem_readl,
2632 };
2633 
2634 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2635     unassigned_mem_writeb,
2636     unassigned_mem_writew,
2637     unassigned_mem_writel,
2638 };
2639 
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2640 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2641                                 uint32_t val)
2642 {
2643     int dirty_flags;
2644     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2645     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2646 #if !defined(CONFIG_USER_ONLY)
2647         tb_invalidate_phys_page_fast(ram_addr, 1);
2648         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2649 #endif
2650     }
2651     stb_p(qemu_get_ram_ptr(ram_addr), val);
2652     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2653     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2654     /* we remove the notdirty callback only if the code has been
2655        flushed */
2656     if (dirty_flags == 0xff)
2657         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2658 }
2659 
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2660 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2661                                 uint32_t val)
2662 {
2663     int dirty_flags;
2664     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2665     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2666 #if !defined(CONFIG_USER_ONLY)
2667         tb_invalidate_phys_page_fast(ram_addr, 2);
2668         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2669 #endif
2670     }
2671     stw_p(qemu_get_ram_ptr(ram_addr), val);
2672     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2673     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2674     /* we remove the notdirty callback only if the code has been
2675        flushed */
2676     if (dirty_flags == 0xff)
2677         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2678 }
2679 
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2680 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2681                                 uint32_t val)
2682 {
2683     int dirty_flags;
2684     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2685     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2686 #if !defined(CONFIG_USER_ONLY)
2687         tb_invalidate_phys_page_fast(ram_addr, 4);
2688         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2689 #endif
2690     }
2691     stl_p(qemu_get_ram_ptr(ram_addr), val);
2692     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2693     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2694     /* we remove the notdirty callback only if the code has been
2695        flushed */
2696     if (dirty_flags == 0xff)
2697         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2698 }
2699 
2700 static CPUReadMemoryFunc *error_mem_read[3] = {
2701     NULL, /* never used */
2702     NULL, /* never used */
2703     NULL, /* never used */
2704 };
2705 
2706 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2707     notdirty_mem_writeb,
2708     notdirty_mem_writew,
2709     notdirty_mem_writel,
2710 };
2711 
2712 /* Generate a debug exception if a watchpoint has been hit.  */
check_watchpoint(int offset,int len_mask,int flags)2713 static void check_watchpoint(int offset, int len_mask, int flags)
2714 {
2715     CPUState *env = cpu_single_env;
2716     target_ulong pc, cs_base;
2717     TranslationBlock *tb;
2718     target_ulong vaddr;
2719     CPUWatchpoint *wp;
2720     int cpu_flags;
2721 
2722     if (env->watchpoint_hit) {
2723         /* We re-entered the check after replacing the TB. Now raise
2724          * the debug interrupt so that is will trigger after the
2725          * current instruction. */
2726         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2727         return;
2728     }
2729     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2730     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2731         if ((vaddr == (wp->vaddr & len_mask) ||
2732              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2733             wp->flags |= BP_WATCHPOINT_HIT;
2734             if (!env->watchpoint_hit) {
2735                 env->watchpoint_hit = wp;
2736                 tb = tb_find_pc(env->mem_io_pc);
2737                 if (!tb) {
2738                     cpu_abort(env, "check_watchpoint: could not find TB for "
2739                               "pc=%p", (void *)env->mem_io_pc);
2740                 }
2741                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2742                 tb_phys_invalidate(tb, -1);
2743                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2744                     env->exception_index = EXCP_DEBUG;
2745                 } else {
2746                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2747                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2748                 }
2749                 cpu_resume_from_signal(env, NULL);
2750             }
2751         } else {
2752             wp->flags &= ~BP_WATCHPOINT_HIT;
2753         }
2754     }
2755 }
2756 
2757 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2758    so these check for a hit then pass through to the normal out-of-line
2759    phys routines.  */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2760 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2761 {
2762     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2763     return ldub_phys(addr);
2764 }
2765 
watch_mem_readw(void * opaque,target_phys_addr_t addr)2766 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2767 {
2768     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2769     return lduw_phys(addr);
2770 }
2771 
watch_mem_readl(void * opaque,target_phys_addr_t addr)2772 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2773 {
2774     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2775     return ldl_phys(addr);
2776 }
2777 
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2778 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2779                              uint32_t val)
2780 {
2781     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2782     stb_phys(addr, val);
2783 }
2784 
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2785 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2786                              uint32_t val)
2787 {
2788     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2789     stw_phys(addr, val);
2790 }
2791 
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2792 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2793                              uint32_t val)
2794 {
2795     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2796     stl_phys(addr, val);
2797 }
2798 
2799 static CPUReadMemoryFunc *watch_mem_read[3] = {
2800     watch_mem_readb,
2801     watch_mem_readw,
2802     watch_mem_readl,
2803 };
2804 
2805 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2806     watch_mem_writeb,
2807     watch_mem_writew,
2808     watch_mem_writel,
2809 };
2810 
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)2811 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2812                                  unsigned int len)
2813 {
2814     uint32_t ret;
2815     unsigned int idx;
2816 
2817     idx = SUBPAGE_IDX(addr);
2818 #if defined(DEBUG_SUBPAGE)
2819     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2820            mmio, len, addr, idx);
2821 #endif
2822     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2823                                        addr + mmio->region_offset[idx][0][len]);
2824 
2825     return ret;
2826 }
2827 
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)2828 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2829                               uint32_t value, unsigned int len)
2830 {
2831     unsigned int idx;
2832 
2833     idx = SUBPAGE_IDX(addr);
2834 #if defined(DEBUG_SUBPAGE)
2835     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2836            mmio, len, addr, idx, value);
2837 #endif
2838     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2839                                   addr + mmio->region_offset[idx][1][len],
2840                                   value);
2841 }
2842 
subpage_readb(void * opaque,target_phys_addr_t addr)2843 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2844 {
2845 #if defined(DEBUG_SUBPAGE)
2846     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2847 #endif
2848 
2849     return subpage_readlen(opaque, addr, 0);
2850 }
2851 
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)2852 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2853                             uint32_t value)
2854 {
2855 #if defined(DEBUG_SUBPAGE)
2856     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2857 #endif
2858     subpage_writelen(opaque, addr, value, 0);
2859 }
2860 
subpage_readw(void * opaque,target_phys_addr_t addr)2861 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2862 {
2863 #if defined(DEBUG_SUBPAGE)
2864     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2865 #endif
2866 
2867     return subpage_readlen(opaque, addr, 1);
2868 }
2869 
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)2870 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2871                             uint32_t value)
2872 {
2873 #if defined(DEBUG_SUBPAGE)
2874     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2875 #endif
2876     subpage_writelen(opaque, addr, value, 1);
2877 }
2878 
subpage_readl(void * opaque,target_phys_addr_t addr)2879 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2880 {
2881 #if defined(DEBUG_SUBPAGE)
2882     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2883 #endif
2884 
2885     return subpage_readlen(opaque, addr, 2);
2886 }
2887 
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)2888 static void subpage_writel (void *opaque,
2889                          target_phys_addr_t addr, uint32_t value)
2890 {
2891 #if defined(DEBUG_SUBPAGE)
2892     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2893 #endif
2894     subpage_writelen(opaque, addr, value, 2);
2895 }
2896 
2897 static CPUReadMemoryFunc *subpage_read[] = {
2898     &subpage_readb,
2899     &subpage_readw,
2900     &subpage_readl,
2901 };
2902 
2903 static CPUWriteMemoryFunc *subpage_write[] = {
2904     &subpage_writeb,
2905     &subpage_writew,
2906     &subpage_writel,
2907 };
2908 
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)2909 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2910                              ram_addr_t memory, ram_addr_t region_offset)
2911 {
2912     int idx, eidx;
2913     unsigned int i;
2914 
2915     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2916         return -1;
2917     idx = SUBPAGE_IDX(start);
2918     eidx = SUBPAGE_IDX(end);
2919 #if defined(DEBUG_SUBPAGE)
2920     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2921            mmio, start, end, idx, eidx, memory);
2922 #endif
2923     memory >>= IO_MEM_SHIFT;
2924     for (; idx <= eidx; idx++) {
2925         for (i = 0; i < 4; i++) {
2926             if (io_mem_read[memory][i]) {
2927                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2928                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2929                 mmio->region_offset[idx][0][i] = region_offset;
2930             }
2931             if (io_mem_write[memory][i]) {
2932                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2933                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2934                 mmio->region_offset[idx][1][i] = region_offset;
2935             }
2936         }
2937     }
2938 
2939     return 0;
2940 }
2941 
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)2942 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2943                            ram_addr_t orig_memory, ram_addr_t region_offset)
2944 {
2945     subpage_t *mmio;
2946     int subpage_memory;
2947 
2948     mmio = qemu_mallocz(sizeof(subpage_t));
2949 
2950     mmio->base = base;
2951     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2952 #if defined(DEBUG_SUBPAGE)
2953     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2954            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2955 #endif
2956     *phys = subpage_memory | IO_MEM_SUBPAGE;
2957     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2958                          region_offset);
2959 
2960     return mmio;
2961 }
2962 
get_free_io_mem_idx(void)2963 static int get_free_io_mem_idx(void)
2964 {
2965     int i;
2966 
2967     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2968         if (!io_mem_used[i]) {
2969             io_mem_used[i] = 1;
2970             return i;
2971         }
2972     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2973     return -1;
2974 }
2975 
2976 /* mem_read and mem_write are arrays of functions containing the
2977    function to access byte (index 0), word (index 1) and dword (index
2978    2). Functions can be omitted with a NULL function pointer.
2979    If io_index is non zero, the corresponding io zone is
2980    modified. If it is zero, a new io zone is allocated. The return
2981    value can be used with cpu_register_physical_memory(). (-1) is
2982    returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc ** mem_read,CPUWriteMemoryFunc ** mem_write,void * opaque)2983 static int cpu_register_io_memory_fixed(int io_index,
2984                                         CPUReadMemoryFunc **mem_read,
2985                                         CPUWriteMemoryFunc **mem_write,
2986                                         void *opaque)
2987 {
2988     int i, subwidth = 0;
2989 
2990     if (io_index <= 0) {
2991         io_index = get_free_io_mem_idx();
2992         if (io_index == -1)
2993             return io_index;
2994     } else {
2995         io_index >>= IO_MEM_SHIFT;
2996         if (io_index >= IO_MEM_NB_ENTRIES)
2997             return -1;
2998     }
2999 
3000     for(i = 0;i < 3; i++) {
3001         if (!mem_read[i] || !mem_write[i])
3002             subwidth = IO_MEM_SUBWIDTH;
3003         io_mem_read[io_index][i] = mem_read[i];
3004         io_mem_write[io_index][i] = mem_write[i];
3005     }
3006     io_mem_opaque[io_index] = opaque;
3007     return (io_index << IO_MEM_SHIFT) | subwidth;
3008 }
3009 
cpu_register_io_memory(CPUReadMemoryFunc ** mem_read,CPUWriteMemoryFunc ** mem_write,void * opaque)3010 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3011                            CPUWriteMemoryFunc **mem_write,
3012                            void *opaque)
3013 {
3014     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3015 }
3016 
cpu_unregister_io_memory(int io_table_address)3017 void cpu_unregister_io_memory(int io_table_address)
3018 {
3019     int i;
3020     int io_index = io_table_address >> IO_MEM_SHIFT;
3021 
3022     for (i=0;i < 3; i++) {
3023         io_mem_read[io_index][i] = unassigned_mem_read[i];
3024         io_mem_write[io_index][i] = unassigned_mem_write[i];
3025     }
3026     io_mem_opaque[io_index] = NULL;
3027     io_mem_used[io_index] = 0;
3028 }
3029 
io_mem_init(void)3030 static void io_mem_init(void)
3031 {
3032     int i;
3033 
3034     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3035     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3036     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3037     for (i=0; i<5; i++)
3038         io_mem_used[i] = 1;
3039 
3040     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3041                                           watch_mem_write, NULL);
3042 }
3043 
3044 #endif /* !defined(CONFIG_USER_ONLY) */
3045 
3046 /* physical memory access (slow version, mainly for debug) */
3047 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3048 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3049                             int len, int is_write)
3050 {
3051     int l, flags;
3052     target_ulong page;
3053     void * p;
3054 
3055     while (len > 0) {
3056         page = addr & TARGET_PAGE_MASK;
3057         l = (page + TARGET_PAGE_SIZE) - addr;
3058         if (l > len)
3059             l = len;
3060         flags = page_get_flags(page);
3061         if (!(flags & PAGE_VALID))
3062             return;
3063         if (is_write) {
3064             if (!(flags & PAGE_WRITE))
3065                 return;
3066             /* XXX: this code should not depend on lock_user */
3067             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3068                 /* FIXME - should this return an error rather than just fail? */
3069                 return;
3070             memcpy(p, buf, l);
3071             unlock_user(p, addr, l);
3072         } else {
3073             if (!(flags & PAGE_READ))
3074                 return;
3075             /* XXX: this code should not depend on lock_user */
3076             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3077                 /* FIXME - should this return an error rather than just fail? */
3078                 return;
3079             memcpy(buf, p, l);
3080             unlock_user(p, addr, 0);
3081         }
3082         len -= l;
3083         buf += l;
3084         addr += l;
3085     }
3086 }
3087 
3088 #else
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3089 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3090                             int len, int is_write)
3091 {
3092     int l, io_index;
3093     uint8_t *ptr;
3094     uint32_t val;
3095     target_phys_addr_t page;
3096     unsigned long pd;
3097     PhysPageDesc *p;
3098 
3099     while (len > 0) {
3100         page = addr & TARGET_PAGE_MASK;
3101         l = (page + TARGET_PAGE_SIZE) - addr;
3102         if (l > len)
3103             l = len;
3104         p = phys_page_find(page >> TARGET_PAGE_BITS);
3105         if (!p) {
3106             pd = IO_MEM_UNASSIGNED;
3107         } else {
3108             pd = p->phys_offset;
3109         }
3110 
3111         if (is_write) {
3112             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3113                 target_phys_addr_t addr1 = addr;
3114                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3115                 if (p)
3116                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3117                 /* XXX: could force cpu_single_env to NULL to avoid
3118                    potential bugs */
3119                 if (l >= 4 && ((addr1 & 3) == 0)) {
3120                     /* 32 bit write access */
3121                     val = ldl_p(buf);
3122                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3123                     l = 4;
3124                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3125                     /* 16 bit write access */
3126                     val = lduw_p(buf);
3127                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3128                     l = 2;
3129                 } else {
3130                     /* 8 bit write access */
3131                     val = ldub_p(buf);
3132                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3133                     l = 1;
3134                 }
3135             } else {
3136                 unsigned long addr1;
3137                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3138                 /* RAM case */
3139                 ptr = qemu_get_ram_ptr(addr1);
3140                 memcpy(ptr, buf, l);
3141                 if (!cpu_physical_memory_is_dirty(addr1)) {
3142                     /* invalidate code */
3143                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3144                     /* set dirty bit */
3145                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3146                         (0xff & ~CODE_DIRTY_FLAG);
3147                 }
3148             }
3149         } else {
3150             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3151                 !(pd & IO_MEM_ROMD)) {
3152                 target_phys_addr_t addr1 = addr;
3153                 /* I/O case */
3154                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3155                 if (p)
3156                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3157                 if (l >= 4 && ((addr1 & 3) == 0)) {
3158                     /* 32 bit read access */
3159                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3160                     stl_p(buf, val);
3161                     l = 4;
3162                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3163                     /* 16 bit read access */
3164                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3165                     stw_p(buf, val);
3166                     l = 2;
3167                 } else {
3168                     /* 8 bit read access */
3169                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3170                     stb_p(buf, val);
3171                     l = 1;
3172                 }
3173             } else {
3174                 /* RAM case */
3175                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3176                     (addr & ~TARGET_PAGE_MASK);
3177                 memcpy(buf, ptr, l);
3178             }
3179         }
3180         len -= l;
3181         buf += l;
3182         addr += l;
3183     }
3184 }
3185 
3186 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)3187 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3188                                    const uint8_t *buf, int len)
3189 {
3190     int l;
3191     uint8_t *ptr;
3192     target_phys_addr_t page;
3193     unsigned long pd;
3194     PhysPageDesc *p;
3195 
3196     while (len > 0) {
3197         page = addr & TARGET_PAGE_MASK;
3198         l = (page + TARGET_PAGE_SIZE) - addr;
3199         if (l > len)
3200             l = len;
3201         p = phys_page_find(page >> TARGET_PAGE_BITS);
3202         if (!p) {
3203             pd = IO_MEM_UNASSIGNED;
3204         } else {
3205             pd = p->phys_offset;
3206         }
3207 
3208         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3209             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3210             !(pd & IO_MEM_ROMD)) {
3211             /* do nothing */
3212         } else {
3213             unsigned long addr1;
3214             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3215             /* ROM/RAM case */
3216             ptr = qemu_get_ram_ptr(addr1);
3217             memcpy(ptr, buf, l);
3218         }
3219         len -= l;
3220         buf += l;
3221         addr += l;
3222     }
3223 }
3224 
3225 typedef struct {
3226     void *buffer;
3227     target_phys_addr_t addr;
3228     target_phys_addr_t len;
3229 } BounceBuffer;
3230 
3231 static BounceBuffer bounce;
3232 
3233 typedef struct MapClient {
3234     void *opaque;
3235     void (*callback)(void *opaque);
3236     QLIST_ENTRY(MapClient) link;
3237 } MapClient;
3238 
3239 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3240     = QLIST_HEAD_INITIALIZER(map_client_list);
3241 
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))3242 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3243 {
3244     MapClient *client = qemu_malloc(sizeof(*client));
3245 
3246     client->opaque = opaque;
3247     client->callback = callback;
3248     QLIST_INSERT_HEAD(&map_client_list, client, link);
3249     return client;
3250 }
3251 
cpu_unregister_map_client(void * _client)3252 void cpu_unregister_map_client(void *_client)
3253 {
3254     MapClient *client = (MapClient *)_client;
3255 
3256     QLIST_REMOVE(client, link);
3257     qemu_free(client);
3258 }
3259 
cpu_notify_map_clients(void)3260 static void cpu_notify_map_clients(void)
3261 {
3262     MapClient *client;
3263 
3264     while (!QLIST_EMPTY(&map_client_list)) {
3265         client = QLIST_FIRST(&map_client_list);
3266         client->callback(client->opaque);
3267         QLIST_REMOVE(client, link);
3268     }
3269 }
3270 
3271 /* Map a physical memory region into a host virtual address.
3272  * May map a subset of the requested range, given by and returned in *plen.
3273  * May return NULL if resources needed to perform the mapping are exhausted.
3274  * Use only for reads OR writes - not for read-modify-write operations.
3275  * Use cpu_register_map_client() to know when retrying the map operation is
3276  * likely to succeed.
3277  */
cpu_physical_memory_map(target_phys_addr_t addr,target_phys_addr_t * plen,int is_write)3278 void *cpu_physical_memory_map(target_phys_addr_t addr,
3279                               target_phys_addr_t *plen,
3280                               int is_write)
3281 {
3282     target_phys_addr_t len = *plen;
3283     target_phys_addr_t done = 0;
3284     int l;
3285     uint8_t *ret = NULL;
3286     uint8_t *ptr;
3287     target_phys_addr_t page;
3288     unsigned long pd;
3289     PhysPageDesc *p;
3290     unsigned long addr1;
3291 
3292     while (len > 0) {
3293         page = addr & TARGET_PAGE_MASK;
3294         l = (page + TARGET_PAGE_SIZE) - addr;
3295         if (l > len)
3296             l = len;
3297         p = phys_page_find(page >> TARGET_PAGE_BITS);
3298         if (!p) {
3299             pd = IO_MEM_UNASSIGNED;
3300         } else {
3301             pd = p->phys_offset;
3302         }
3303 
3304         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3305             if (done || bounce.buffer) {
3306                 break;
3307             }
3308             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3309             bounce.addr = addr;
3310             bounce.len = l;
3311             if (!is_write) {
3312                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3313             }
3314             ptr = bounce.buffer;
3315         } else {
3316             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3317             ptr = qemu_get_ram_ptr(addr1);
3318         }
3319         if (!done) {
3320             ret = ptr;
3321         } else if (ret + done != ptr) {
3322             break;
3323         }
3324 
3325         len -= l;
3326         addr += l;
3327         done += l;
3328     }
3329     *plen = done;
3330     return ret;
3331 }
3332 
3333 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3334  * Will also mark the memory as dirty if is_write == 1.  access_len gives
3335  * the amount of memory that was actually read or written by the caller.
3336  */
cpu_physical_memory_unmap(void * buffer,target_phys_addr_t len,int is_write,target_phys_addr_t access_len)3337 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3338                                int is_write, target_phys_addr_t access_len)
3339 {
3340     if (buffer != bounce.buffer) {
3341         if (is_write) {
3342             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3343             while (access_len) {
3344                 unsigned l;
3345                 l = TARGET_PAGE_SIZE;
3346                 if (l > access_len)
3347                     l = access_len;
3348                 if (!cpu_physical_memory_is_dirty(addr1)) {
3349                     /* invalidate code */
3350                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3351                     /* set dirty bit */
3352                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3353                         (0xff & ~CODE_DIRTY_FLAG);
3354                 }
3355                 addr1 += l;
3356                 access_len -= l;
3357             }
3358         }
3359         return;
3360     }
3361     if (is_write) {
3362         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3363     }
3364     qemu_free(bounce.buffer);
3365     bounce.buffer = NULL;
3366     cpu_notify_map_clients();
3367 }
3368 
3369 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)3370 uint32_t ldl_phys(target_phys_addr_t addr)
3371 {
3372     int io_index;
3373     uint8_t *ptr;
3374     uint32_t val;
3375     unsigned long pd;
3376     PhysPageDesc *p;
3377 
3378     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3379     if (!p) {
3380         pd = IO_MEM_UNASSIGNED;
3381     } else {
3382         pd = p->phys_offset;
3383     }
3384 
3385     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3386         !(pd & IO_MEM_ROMD)) {
3387         /* I/O case */
3388         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3389         if (p)
3390             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3391         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3392     } else {
3393         /* RAM case */
3394         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3395             (addr & ~TARGET_PAGE_MASK);
3396         val = ldl_p(ptr);
3397     }
3398     return val;
3399 }
3400 
3401 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)3402 uint64_t ldq_phys(target_phys_addr_t addr)
3403 {
3404     int io_index;
3405     uint8_t *ptr;
3406     uint64_t val;
3407     unsigned long pd;
3408     PhysPageDesc *p;
3409 
3410     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3411     if (!p) {
3412         pd = IO_MEM_UNASSIGNED;
3413     } else {
3414         pd = p->phys_offset;
3415     }
3416 
3417     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3418         !(pd & IO_MEM_ROMD)) {
3419         /* I/O case */
3420         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3421         if (p)
3422             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3423 #ifdef TARGET_WORDS_BIGENDIAN
3424         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3425         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3426 #else
3427         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3428         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3429 #endif
3430     } else {
3431         /* RAM case */
3432         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3433             (addr & ~TARGET_PAGE_MASK);
3434         val = ldq_p(ptr);
3435     }
3436     return val;
3437 }
3438 
3439 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)3440 uint32_t ldub_phys(target_phys_addr_t addr)
3441 {
3442     uint8_t val;
3443     cpu_physical_memory_read(addr, &val, 1);
3444     return val;
3445 }
3446 
3447 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)3448 uint32_t lduw_phys(target_phys_addr_t addr)
3449 {
3450     uint16_t val;
3451     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3452     return tswap16(val);
3453 }
3454 
3455 /* warning: addr must be aligned. The ram page is not masked as dirty
3456    and the code inside is not invalidated. It is useful if the dirty
3457    bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)3458 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3459 {
3460     int io_index;
3461     uint8_t *ptr;
3462     unsigned long pd;
3463     PhysPageDesc *p;
3464 
3465     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3466     if (!p) {
3467         pd = IO_MEM_UNASSIGNED;
3468     } else {
3469         pd = p->phys_offset;
3470     }
3471 
3472     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3473         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3474         if (p)
3475             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3476         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3477     } else {
3478         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3479         ptr = qemu_get_ram_ptr(addr1);
3480         stl_p(ptr, val);
3481 
3482         if (unlikely(in_migration)) {
3483             if (!cpu_physical_memory_is_dirty(addr1)) {
3484                 /* invalidate code */
3485                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3486                 /* set dirty bit */
3487                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3488                     (0xff & ~CODE_DIRTY_FLAG);
3489             }
3490         }
3491     }
3492 }
3493 
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)3494 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3495 {
3496     int io_index;
3497     uint8_t *ptr;
3498     unsigned long pd;
3499     PhysPageDesc *p;
3500 
3501     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3502     if (!p) {
3503         pd = IO_MEM_UNASSIGNED;
3504     } else {
3505         pd = p->phys_offset;
3506     }
3507 
3508     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3509         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3510         if (p)
3511             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3512 #ifdef TARGET_WORDS_BIGENDIAN
3513         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3514         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3515 #else
3516         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3517         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3518 #endif
3519     } else {
3520         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3521             (addr & ~TARGET_PAGE_MASK);
3522         stq_p(ptr, val);
3523     }
3524 }
3525 
3526 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)3527 void stl_phys(target_phys_addr_t addr, uint32_t val)
3528 {
3529     int io_index;
3530     uint8_t *ptr;
3531     unsigned long pd;
3532     PhysPageDesc *p;
3533 
3534     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3535     if (!p) {
3536         pd = IO_MEM_UNASSIGNED;
3537     } else {
3538         pd = p->phys_offset;
3539     }
3540 
3541     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3542         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3543         if (p)
3544             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3545         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3546     } else {
3547         unsigned long addr1;
3548         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3549         /* RAM case */
3550         ptr = qemu_get_ram_ptr(addr1);
3551         stl_p(ptr, val);
3552         if (!cpu_physical_memory_is_dirty(addr1)) {
3553             /* invalidate code */
3554             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3555             /* set dirty bit */
3556             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3557                 (0xff & ~CODE_DIRTY_FLAG);
3558         }
3559     }
3560 }
3561 
3562 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3563 void stb_phys(target_phys_addr_t addr, uint32_t val)
3564 {
3565     uint8_t v = val;
3566     cpu_physical_memory_write(addr, &v, 1);
3567 }
3568 
3569 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3570 void stw_phys(target_phys_addr_t addr, uint32_t val)
3571 {
3572     uint16_t v = tswap16(val);
3573     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3574 }
3575 
3576 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3577 void stq_phys(target_phys_addr_t addr, uint64_t val)
3578 {
3579     val = tswap64(val);
3580     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3581 }
3582 
3583 #endif
3584 
3585 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3586 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3587                         uint8_t *buf, int len, int is_write)
3588 {
3589     int l;
3590     target_phys_addr_t phys_addr;
3591     target_ulong page;
3592 
3593     while (len > 0) {
3594         page = addr & TARGET_PAGE_MASK;
3595         phys_addr = cpu_get_phys_page_debug(env, page);
3596         /* if no physical page mapped, return an error */
3597         if (phys_addr == -1)
3598             return -1;
3599         l = (page + TARGET_PAGE_SIZE) - addr;
3600         if (l > len)
3601             l = len;
3602         phys_addr += (addr & ~TARGET_PAGE_MASK);
3603 #if !defined(CONFIG_USER_ONLY)
3604         if (is_write)
3605             cpu_physical_memory_write_rom(phys_addr, buf, l);
3606         else
3607 #endif
3608             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3609         len -= l;
3610         buf += l;
3611         addr += l;
3612     }
3613     return 0;
3614 }
3615 
3616 /* in deterministic execution mode, instructions doing device I/Os
3617    must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3618 void cpu_io_recompile(CPUState *env, void *retaddr)
3619 {
3620     TranslationBlock *tb;
3621     uint32_t n, cflags;
3622     target_ulong pc, cs_base;
3623     uint64_t flags;
3624 
3625     tb = tb_find_pc((unsigned long)retaddr);
3626     if (!tb) {
3627         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3628                   retaddr);
3629     }
3630     n = env->icount_decr.u16.low + tb->icount;
3631     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3632     /* Calculate how many instructions had been executed before the fault
3633        occurred.  */
3634     n = n - env->icount_decr.u16.low;
3635     /* Generate a new TB ending on the I/O insn.  */
3636     n++;
3637     /* On MIPS and SH, delay slot instructions can only be restarted if
3638        they were already the first instruction in the TB.  If this is not
3639        the first instruction in a TB then re-execute the preceding
3640        branch.  */
3641 #if defined(TARGET_MIPS)
3642     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3643         env->active_tc.PC -= 4;
3644         env->icount_decr.u16.low++;
3645         env->hflags &= ~MIPS_HFLAG_BMASK;
3646     }
3647 #elif defined(TARGET_SH4)
3648     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3649             && n > 1) {
3650         env->pc -= 2;
3651         env->icount_decr.u16.low++;
3652         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3653     }
3654 #endif
3655     /* This should never happen.  */
3656     if (n > CF_COUNT_MASK)
3657         cpu_abort(env, "TB too big during recompile");
3658 
3659     cflags = n | CF_LAST_IO;
3660     pc = tb->pc;
3661     cs_base = tb->cs_base;
3662     flags = tb->flags;
3663     tb_phys_invalidate(tb, -1);
3664     /* FIXME: In theory this could raise an exception.  In practice
3665        we have already translated the block once so it's probably ok.  */
3666     tb_gen_code(env, pc, cs_base, flags, cflags);
3667     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3668        the first in the TB) then we end up generating a whole new TB and
3669        repeating the fault, which is horribly inefficient.
3670        Better would be to execute just this insn uncached, or generate a
3671        second new TB.  */
3672     cpu_resume_from_signal(env, NULL);
3673 }
3674 
dump_exec_info(FILE * f,int (* cpu_fprintf)(FILE * f,const char * fmt,...))3675 void dump_exec_info(FILE *f,
3676                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3677 {
3678     int i, target_code_size, max_target_code_size;
3679     int direct_jmp_count, direct_jmp2_count, cross_page;
3680     TranslationBlock *tb;
3681 
3682     target_code_size = 0;
3683     max_target_code_size = 0;
3684     cross_page = 0;
3685     direct_jmp_count = 0;
3686     direct_jmp2_count = 0;
3687     for(i = 0; i < nb_tbs; i++) {
3688         tb = &tbs[i];
3689         target_code_size += tb->size;
3690         if (tb->size > max_target_code_size)
3691             max_target_code_size = tb->size;
3692         if (tb->page_addr[1] != -1)
3693             cross_page++;
3694         if (tb->tb_next_offset[0] != 0xffff) {
3695             direct_jmp_count++;
3696             if (tb->tb_next_offset[1] != 0xffff) {
3697                 direct_jmp2_count++;
3698             }
3699         }
3700     }
3701     /* XXX: avoid using doubles ? */
3702     cpu_fprintf(f, "Translation buffer state:\n");
3703     cpu_fprintf(f, "gen code size       %ld/%ld\n",
3704                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3705     cpu_fprintf(f, "TB count            %d/%d\n",
3706                 nb_tbs, code_gen_max_blocks);
3707     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3708                 nb_tbs ? target_code_size / nb_tbs : 0,
3709                 max_target_code_size);
3710     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3711                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3712                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3713     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3714             cross_page,
3715             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3716     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3717                 direct_jmp_count,
3718                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3719                 direct_jmp2_count,
3720                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3721     cpu_fprintf(f, "\nStatistics:\n");
3722     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3723     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3724     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3725     tcg_dump_info(f, cpu_fprintf);
3726 }
3727 
3728 #if !defined(CONFIG_USER_ONLY)
3729 
3730 #define MMUSUFFIX _cmmu
3731 #define GETPC() NULL
3732 #define env cpu_single_env
3733 #define SOFTMMU_CODE_ACCESS
3734 
3735 #define SHIFT 0
3736 #include "softmmu_template.h"
3737 
3738 #define SHIFT 1
3739 #include "softmmu_template.h"
3740 
3741 #define SHIFT 2
3742 #include "softmmu_template.h"
3743 
3744 #define SHIFT 3
3745 #include "softmmu_template.h"
3746 
3747 #undef env
3748 
3749 #endif
3750