• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  virtual page mapping and translated block handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19  */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35 
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #include "osdep.h"
42 #include "kvm.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
46 
47 //#define DEBUG_TB_INVALIDATE
48 //#define DEBUG_FLUSH
49 //#define DEBUG_TLB
50 //#define DEBUG_UNASSIGNED
51 
52 /* make various TB consistency checks */
53 //#define DEBUG_TB_CHECK
54 //#define DEBUG_TLB_CHECK
55 
56 //#define DEBUG_IOPORT
57 //#define DEBUG_SUBPAGE
58 
59 #if !defined(CONFIG_USER_ONLY)
60 /* TB consistency checks only implemented for usermode emulation.  */
61 #undef DEBUG_TB_CHECK
62 #endif
63 
64 #define SMC_BITMAP_USE_THRESHOLD 10
65 
66 #if defined(TARGET_SPARC64)
67 #define TARGET_PHYS_ADDR_SPACE_BITS 41
68 #elif defined(TARGET_SPARC)
69 #define TARGET_PHYS_ADDR_SPACE_BITS 36
70 #elif defined(TARGET_ALPHA)
71 #define TARGET_PHYS_ADDR_SPACE_BITS 42
72 #define TARGET_VIRT_ADDR_SPACE_BITS 42
73 #elif defined(TARGET_PPC64)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
76 #define TARGET_PHYS_ADDR_SPACE_BITS 42
77 #elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
78 #define TARGET_PHYS_ADDR_SPACE_BITS 36
79 #else
80 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81 #define TARGET_PHYS_ADDR_SPACE_BITS 32
82 #endif
83 
84 static TranslationBlock *tbs;
85 int code_gen_max_blocks;
86 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87 static int nb_tbs;
88 /* any access to the tbs or the page table must use this lock */
89 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90 
91 #if defined(__arm__) || defined(__sparc_v9__)
92 /* The prologue must be reachable with a direct jump. ARM and Sparc64
93  have limited branch ranges (possibly also PPC) so place it in a
94  section close to code segment. */
95 #define code_gen_section                                \
96     __attribute__((__section__(".gen_code")))           \
97     __attribute__((aligned (32)))
98 #else
99 #define code_gen_section                                \
100     __attribute__((aligned (32)))
101 #endif
102 
103 uint8_t code_gen_prologue[1024] code_gen_section;
104 static uint8_t *code_gen_buffer;
105 static unsigned long code_gen_buffer_size;
106 /* threshold to flush the translated code buffer */
107 static unsigned long code_gen_buffer_max_size;
108 uint8_t *code_gen_ptr;
109 
110 #if !defined(CONFIG_USER_ONLY)
111 int phys_ram_fd;
112 uint8_t *phys_ram_dirty;
113 static int in_migration;
114 
115 typedef struct RAMBlock {
116     uint8_t *host;
117     ram_addr_t offset;
118     ram_addr_t length;
119     struct RAMBlock *next;
120 } RAMBlock;
121 
122 static RAMBlock *ram_blocks;
123 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
124    then we can no longer assume contiguous ram offsets, and external uses
125    of this variable will break.  */
126 ram_addr_t last_ram_offset;
127 #endif
128 
129 CPUState *first_cpu;
130 /* current CPU in the current thread. It is only valid inside
131    cpu_exec() */
132 CPUState *cpu_single_env;
133 /* 0 = Do not count executed instructions.
134    1 = Precise instruction counting.
135    2 = Adaptive rate instruction counting.  */
136 int use_icount = 0;
137 /* Current instruction counter.  While executing translated code this may
138    include some instructions that have not yet been executed.  */
139 int64_t qemu_icount;
140 
141 typedef struct PageDesc {
142     /* list of TBs intersecting this ram page */
143     TranslationBlock *first_tb;
144     /* in order to optimize self modifying code, we count the number
145        of lookups we do to a given page to use a bitmap */
146     unsigned int code_write_count;
147     uint8_t *code_bitmap;
148 #if defined(CONFIG_USER_ONLY)
149     unsigned long flags;
150 #endif
151 } PageDesc;
152 
153 typedef struct PhysPageDesc {
154     /* offset in host memory of the page + io_index in the low bits */
155     ram_addr_t phys_offset;
156     ram_addr_t region_offset;
157 } PhysPageDesc;
158 
159 #define L2_BITS 10
160 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
161 /* XXX: this is a temporary hack for alpha target.
162  *      In the future, this is to be replaced by a multi-level table
163  *      to actually be able to handle the complete 64 bits address space.
164  */
165 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
166 #else
167 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
168 #endif
169 
170 #define L1_SIZE (1 << L1_BITS)
171 #define L2_SIZE (1 << L2_BITS)
172 
173 unsigned long qemu_real_host_page_size;
174 unsigned long qemu_host_page_bits;
175 unsigned long qemu_host_page_size;
176 unsigned long qemu_host_page_mask;
177 
178 /* XXX: for system emulation, it could just be an array */
179 static PageDesc *l1_map[L1_SIZE];
180 static PhysPageDesc **l1_phys_map;
181 
182 #if !defined(CONFIG_USER_ONLY)
183 static void io_mem_init(void);
184 
185 /* io memory support */
186 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
187 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
188 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
189 static char io_mem_used[IO_MEM_NB_ENTRIES];
190 static int io_mem_watch;
191 #endif
192 
193 /* log support */
194 static const char *logfilename = "/tmp/qemu.log";
195 FILE *logfile;
196 int loglevel;
197 static int log_append = 0;
198 
199 /* statistics */
200 static int tlb_flush_count;
201 static int tb_flush_count;
202 static int tb_phys_invalidate_count;
203 
204 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
205 typedef struct subpage_t {
206     target_phys_addr_t base;
207     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
208     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
209     void *opaque[TARGET_PAGE_SIZE][2][4];
210     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
211 } subpage_t;
212 
213 #ifdef _WIN32
map_exec(void * addr,long size)214 static void map_exec(void *addr, long size)
215 {
216     DWORD old_protect;
217     VirtualProtect(addr, size,
218                    PAGE_EXECUTE_READWRITE, &old_protect);
219 
220 }
221 #else
map_exec(void * addr,long size)222 static void map_exec(void *addr, long size)
223 {
224     unsigned long start, end, page_size;
225 
226     page_size = getpagesize();
227     start = (unsigned long)addr;
228     start &= ~(page_size - 1);
229 
230     end = (unsigned long)addr + size;
231     end += page_size - 1;
232     end &= ~(page_size - 1);
233 
234     mprotect((void *)start, end - start,
235              PROT_READ | PROT_WRITE | PROT_EXEC);
236 }
237 #endif
238 
page_init(void)239 static void page_init(void)
240 {
241     /* NOTE: we can always suppose that qemu_host_page_size >=
242        TARGET_PAGE_SIZE */
243 #ifdef _WIN32
244     {
245         SYSTEM_INFO system_info;
246 
247         GetSystemInfo(&system_info);
248         qemu_real_host_page_size = system_info.dwPageSize;
249     }
250 #else
251     qemu_real_host_page_size = getpagesize();
252 #endif
253     if (qemu_host_page_size == 0)
254         qemu_host_page_size = qemu_real_host_page_size;
255     if (qemu_host_page_size < TARGET_PAGE_SIZE)
256         qemu_host_page_size = TARGET_PAGE_SIZE;
257     qemu_host_page_bits = 0;
258     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
259         qemu_host_page_bits++;
260     qemu_host_page_mask = ~(qemu_host_page_size - 1);
261     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
262     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
263 
264 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265     {
266         long long startaddr, endaddr;
267         FILE *f;
268         int n;
269 
270         mmap_lock();
271         last_brk = (unsigned long)sbrk(0);
272         f = fopen("/proc/self/maps", "r");
273         if (f) {
274             do {
275                 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276                 if (n == 2) {
277                     startaddr = MIN(startaddr,
278                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279                     endaddr = MIN(endaddr,
280                                     (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281                     page_set_flags(startaddr & TARGET_PAGE_MASK,
282                                    TARGET_PAGE_ALIGN(endaddr),
283                                    PAGE_RESERVED);
284                 }
285             } while (!feof(f));
286             fclose(f);
287         }
288         mmap_unlock();
289     }
290 #endif
291 }
292 
page_l1_map(target_ulong index)293 static inline PageDesc **page_l1_map(target_ulong index)
294 {
295 #if TARGET_LONG_BITS > 32
296     /* Host memory outside guest VM.  For 32-bit targets we have already
297        excluded high addresses.  */
298     if (index > ((target_ulong)L2_SIZE * L1_SIZE))
299         return NULL;
300 #endif
301     return &l1_map[index >> L2_BITS];
302 }
303 
page_find_alloc(target_ulong index)304 static inline PageDesc *page_find_alloc(target_ulong index)
305 {
306     PageDesc **lp, *p;
307     lp = page_l1_map(index);
308     if (!lp)
309         return NULL;
310 
311     p = *lp;
312     if (!p) {
313         /* allocate if not found */
314 #if defined(CONFIG_USER_ONLY)
315         size_t len = sizeof(PageDesc) * L2_SIZE;
316         /* Don't use qemu_malloc because it may recurse.  */
317         p = mmap(0, len, PROT_READ | PROT_WRITE,
318                  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
319         *lp = p;
320         if (h2g_valid(p)) {
321             unsigned long addr = h2g(p);
322             page_set_flags(addr & TARGET_PAGE_MASK,
323                            TARGET_PAGE_ALIGN(addr + len),
324                            PAGE_RESERVED);
325         }
326 #else
327         p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328         *lp = p;
329 #endif
330     }
331     return p + (index & (L2_SIZE - 1));
332 }
333 
page_find(target_ulong index)334 static inline PageDesc *page_find(target_ulong index)
335 {
336     PageDesc **lp, *p;
337     lp = page_l1_map(index);
338     if (!lp)
339         return NULL;
340 
341     p = *lp;
342     if (!p)
343         return 0;
344     return p + (index & (L2_SIZE - 1));
345 }
346 
phys_page_find_alloc(target_phys_addr_t index,int alloc)347 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
348 {
349     void **lp, **p;
350     PhysPageDesc *pd;
351 
352     p = (void **)l1_phys_map;
353 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
354 
355 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
356 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
357 #endif
358     lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
359     p = *lp;
360     if (!p) {
361         /* allocate if not found */
362         if (!alloc)
363             return NULL;
364         p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
365         memset(p, 0, sizeof(void *) * L1_SIZE);
366         *lp = p;
367     }
368 #endif
369     lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
370     pd = *lp;
371     if (!pd) {
372         int i;
373         /* allocate if not found */
374         if (!alloc)
375             return NULL;
376         pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
377         *lp = pd;
378         for (i = 0; i < L2_SIZE; i++) {
379           pd[i].phys_offset = IO_MEM_UNASSIGNED;
380           pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
381         }
382     }
383     return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
384 }
385 
phys_page_find(target_phys_addr_t index)386 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
387 {
388     return phys_page_find_alloc(index, 0);
389 }
390 
391 #if !defined(CONFIG_USER_ONLY)
392 static void tlb_protect_code(ram_addr_t ram_addr);
393 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
394                                     target_ulong vaddr);
395 #define mmap_lock() do { } while(0)
396 #define mmap_unlock() do { } while(0)
397 #endif
398 
399 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
400 
401 #if defined(CONFIG_USER_ONLY)
402 /* Currently it is not recommended to allocate big chunks of data in
403    user mode. It will change when a dedicated libc will be used */
404 #define USE_STATIC_CODE_GEN_BUFFER
405 #endif
406 
407 #ifdef USE_STATIC_CODE_GEN_BUFFER
408 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
409 #endif
410 
code_gen_alloc(unsigned long tb_size)411 static void code_gen_alloc(unsigned long tb_size)
412 {
413 #ifdef USE_STATIC_CODE_GEN_BUFFER
414     code_gen_buffer = static_code_gen_buffer;
415     code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416     map_exec(code_gen_buffer, code_gen_buffer_size);
417 #else
418     code_gen_buffer_size = tb_size;
419     if (code_gen_buffer_size == 0) {
420 #if defined(CONFIG_USER_ONLY)
421         /* in user mode, phys_ram_size is not meaningful */
422         code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 #else
424         /* XXX: needs adjustments */
425         code_gen_buffer_size = (unsigned long)(ram_size / 4);
426 #endif
427     }
428     if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
429         code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
430     /* The code gen buffer location may have constraints depending on
431        the host cpu and OS */
432 #if defined(__linux__)
433     {
434         int flags;
435         void *start = NULL;
436 
437         flags = MAP_PRIVATE | MAP_ANONYMOUS;
438 #if defined(__x86_64__)
439         flags |= MAP_32BIT;
440         /* Cannot map more than that */
441         if (code_gen_buffer_size > (800 * 1024 * 1024))
442             code_gen_buffer_size = (800 * 1024 * 1024);
443 #elif defined(__sparc_v9__)
444         // Map the buffer below 2G, so we can use direct calls and branches
445         flags |= MAP_FIXED;
446         start = (void *) 0x60000000UL;
447         if (code_gen_buffer_size > (512 * 1024 * 1024))
448             code_gen_buffer_size = (512 * 1024 * 1024);
449 #elif defined(__arm__)
450         /* Map the buffer below 32M, so we can use direct calls and branches */
451         flags |= MAP_FIXED;
452         start = (void *) 0x01000000UL;
453         if (code_gen_buffer_size > 16 * 1024 * 1024)
454             code_gen_buffer_size = 16 * 1024 * 1024;
455 #endif
456         code_gen_buffer = mmap(start, code_gen_buffer_size,
457                                PROT_WRITE | PROT_READ | PROT_EXEC,
458                                flags, -1, 0);
459         if (code_gen_buffer == MAP_FAILED) {
460             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
461             exit(1);
462         }
463     }
464 #elif defined(__FreeBSD__) || defined(__DragonFly__)
465     {
466         int flags;
467         void *addr = NULL;
468         flags = MAP_PRIVATE | MAP_ANONYMOUS;
469 #if defined(__x86_64__)
470         /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
471          * 0x40000000 is free */
472         flags |= MAP_FIXED;
473         addr = (void *)0x40000000;
474         /* Cannot map more than that */
475         if (code_gen_buffer_size > (800 * 1024 * 1024))
476             code_gen_buffer_size = (800 * 1024 * 1024);
477 #endif
478         code_gen_buffer = mmap(addr, code_gen_buffer_size,
479                                PROT_WRITE | PROT_READ | PROT_EXEC,
480                                flags, -1, 0);
481         if (code_gen_buffer == MAP_FAILED) {
482             fprintf(stderr, "Could not allocate dynamic translator buffer\n");
483             exit(1);
484         }
485     }
486 #else
487     code_gen_buffer = qemu_malloc(code_gen_buffer_size);
488     map_exec(code_gen_buffer, code_gen_buffer_size);
489 #endif
490 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
491     map_exec(code_gen_prologue, sizeof(code_gen_prologue));
492     code_gen_buffer_max_size = code_gen_buffer_size -
493         code_gen_max_block_size();
494     code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
495     tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
496 }
497 
498 /* Must be called before using the QEMU cpus. 'tb_size' is the size
499    (in bytes) allocated to the translation buffer. Zero means default
500    size. */
cpu_exec_init_all(unsigned long tb_size)501 void cpu_exec_init_all(unsigned long tb_size)
502 {
503     cpu_gen_init();
504     code_gen_alloc(tb_size);
505     code_gen_ptr = code_gen_buffer;
506     page_init();
507 #if !defined(CONFIG_USER_ONLY)
508     io_mem_init();
509 #endif
510 }
511 
512 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
513 
514 #define CPU_COMMON_SAVE_VERSION 1
515 
cpu_common_save(QEMUFile * f,void * opaque)516 static void cpu_common_save(QEMUFile *f, void *opaque)
517 {
518     CPUState *env = opaque;
519 
520     cpu_synchronize_state(env, 0);
521 
522     qemu_put_be32s(f, &env->halted);
523     qemu_put_be32s(f, &env->interrupt_request);
524 }
525 
cpu_common_load(QEMUFile * f,void * opaque,int version_id)526 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
527 {
528     CPUState *env = opaque;
529 
530     if (version_id != CPU_COMMON_SAVE_VERSION)
531         return -EINVAL;
532 
533     qemu_get_be32s(f, &env->halted);
534     qemu_get_be32s(f, &env->interrupt_request);
535     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
536        version_id is increased. */
537     env->interrupt_request &= ~0x01;
538     tlb_flush(env, 1);
539     cpu_synchronize_state(env, 1);
540 
541     return 0;
542 }
543 #endif
544 
qemu_get_cpu(int cpu)545 CPUState *qemu_get_cpu(int cpu)
546 {
547     CPUState *env = first_cpu;
548 
549     while (env) {
550         if (env->cpu_index == cpu)
551             break;
552         env = env->next_cpu;
553     }
554 
555     return env;
556 }
557 
cpu_exec_init(CPUState * env)558 void cpu_exec_init(CPUState *env)
559 {
560     CPUState **penv;
561     int cpu_index;
562 
563 #if defined(CONFIG_USER_ONLY)
564     cpu_list_lock();
565 #endif
566     env->next_cpu = NULL;
567     penv = &first_cpu;
568     cpu_index = 0;
569     while (*penv != NULL) {
570         penv = &(*penv)->next_cpu;
571         cpu_index++;
572     }
573     env->cpu_index = cpu_index;
574     env->numa_node = 0;
575     TAILQ_INIT(&env->breakpoints);
576     TAILQ_INIT(&env->watchpoints);
577     *penv = env;
578 #if defined(CONFIG_USER_ONLY)
579     cpu_list_unlock();
580 #endif
581 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582     register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
583                     cpu_common_save, cpu_common_load, env);
584     register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
585                     cpu_save, cpu_load, env);
586 #endif
587 }
588 
invalidate_page_bitmap(PageDesc * p)589 static inline void invalidate_page_bitmap(PageDesc *p)
590 {
591     if (p->code_bitmap) {
592         qemu_free(p->code_bitmap);
593         p->code_bitmap = NULL;
594     }
595     p->code_write_count = 0;
596 }
597 
598 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)599 static void page_flush_tb(void)
600 {
601     int i, j;
602     PageDesc *p;
603 
604     for(i = 0; i < L1_SIZE; i++) {
605         p = l1_map[i];
606         if (p) {
607             for(j = 0; j < L2_SIZE; j++) {
608                 p->first_tb = NULL;
609                 invalidate_page_bitmap(p);
610                 p++;
611             }
612         }
613     }
614 }
615 
616 /* flush all the translation blocks */
617 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)618 void tb_flush(CPUState *env1)
619 {
620     CPUState *env;
621 #if defined(DEBUG_FLUSH)
622     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
623            (unsigned long)(code_gen_ptr - code_gen_buffer),
624            nb_tbs, nb_tbs > 0 ?
625            ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
626 #endif
627     if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
628         cpu_abort(env1, "Internal error: code buffer overflow\n");
629 
630     nb_tbs = 0;
631 
632     for(env = first_cpu; env != NULL; env = env->next_cpu) {
633         memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
634     }
635 
636     memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
637     page_flush_tb();
638 
639     code_gen_ptr = code_gen_buffer;
640     /* XXX: flush processor icache at this point if cache flush is
641        expensive */
642     tb_flush_count++;
643 }
644 
645 #ifdef DEBUG_TB_CHECK
646 
tb_invalidate_check(target_ulong address)647 static void tb_invalidate_check(target_ulong address)
648 {
649     TranslationBlock *tb;
650     int i;
651     address &= TARGET_PAGE_MASK;
652     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
653         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
654             if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
655                   address >= tb->pc + tb->size)) {
656                 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
657                        address, (long)tb->pc, tb->size);
658             }
659         }
660     }
661 }
662 
663 /* verify that all the pages have correct rights for code */
tb_page_check(void)664 static void tb_page_check(void)
665 {
666     TranslationBlock *tb;
667     int i, flags1, flags2;
668 
669     for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
670         for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
671             flags1 = page_get_flags(tb->pc);
672             flags2 = page_get_flags(tb->pc + tb->size - 1);
673             if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
674                 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
675                        (long)tb->pc, tb->size, flags1, flags2);
676             }
677         }
678     }
679 }
680 
tb_jmp_check(TranslationBlock * tb)681 static void tb_jmp_check(TranslationBlock *tb)
682 {
683     TranslationBlock *tb1;
684     unsigned int n1;
685 
686     /* suppress any remaining jumps to this TB */
687     tb1 = tb->jmp_first;
688     for(;;) {
689         n1 = (long)tb1 & 3;
690         tb1 = (TranslationBlock *)((long)tb1 & ~3);
691         if (n1 == 2)
692             break;
693         tb1 = tb1->jmp_next[n1];
694     }
695     /* check end of list */
696     if (tb1 != tb) {
697         printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
698     }
699 }
700 
701 #endif
702 
703 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)704 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
705                              int next_offset)
706 {
707     TranslationBlock *tb1;
708     for(;;) {
709         tb1 = *ptb;
710         if (tb1 == tb) {
711             *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
712             break;
713         }
714         ptb = (TranslationBlock **)((char *)tb1 + next_offset);
715     }
716 }
717 
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)718 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
719 {
720     TranslationBlock *tb1;
721     unsigned int n1;
722 
723     for(;;) {
724         tb1 = *ptb;
725         n1 = (long)tb1 & 3;
726         tb1 = (TranslationBlock *)((long)tb1 & ~3);
727         if (tb1 == tb) {
728             *ptb = tb1->page_next[n1];
729             break;
730         }
731         ptb = &tb1->page_next[n1];
732     }
733 }
734 
tb_jmp_remove(TranslationBlock * tb,int n)735 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
736 {
737     TranslationBlock *tb1, **ptb;
738     unsigned int n1;
739 
740     ptb = &tb->jmp_next[n];
741     tb1 = *ptb;
742     if (tb1) {
743         /* find tb(n) in circular list */
744         for(;;) {
745             tb1 = *ptb;
746             n1 = (long)tb1 & 3;
747             tb1 = (TranslationBlock *)((long)tb1 & ~3);
748             if (n1 == n && tb1 == tb)
749                 break;
750             if (n1 == 2) {
751                 ptb = &tb1->jmp_first;
752             } else {
753                 ptb = &tb1->jmp_next[n1];
754             }
755         }
756         /* now we can suppress tb(n) from the list */
757         *ptb = tb->jmp_next[n];
758 
759         tb->jmp_next[n] = NULL;
760     }
761 }
762 
763 /* reset the jump entry 'n' of a TB so that it is not chained to
764    another TB */
tb_reset_jump(TranslationBlock * tb,int n)765 static inline void tb_reset_jump(TranslationBlock *tb, int n)
766 {
767     tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
768 }
769 
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)770 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
771 {
772     CPUState *env;
773     PageDesc *p;
774     unsigned int h, n1;
775     target_phys_addr_t phys_pc;
776     TranslationBlock *tb1, *tb2;
777 
778     /* remove the TB from the hash list */
779     phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
780     h = tb_phys_hash_func(phys_pc);
781     tb_remove(&tb_phys_hash[h], tb,
782               offsetof(TranslationBlock, phys_hash_next));
783 
784     /* remove the TB from the page list */
785     if (tb->page_addr[0] != page_addr) {
786         p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
787         tb_page_remove(&p->first_tb, tb);
788         invalidate_page_bitmap(p);
789     }
790     if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
791         p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
792         tb_page_remove(&p->first_tb, tb);
793         invalidate_page_bitmap(p);
794     }
795 
796     tb_invalidated_flag = 1;
797 
798     /* remove the TB from the hash list */
799     h = tb_jmp_cache_hash_func(tb->pc);
800     for(env = first_cpu; env != NULL; env = env->next_cpu) {
801         if (env->tb_jmp_cache[h] == tb)
802             env->tb_jmp_cache[h] = NULL;
803     }
804 
805     /* suppress this TB from the two jump lists */
806     tb_jmp_remove(tb, 0);
807     tb_jmp_remove(tb, 1);
808 
809     /* suppress any remaining jumps to this TB */
810     tb1 = tb->jmp_first;
811     for(;;) {
812         n1 = (long)tb1 & 3;
813         if (n1 == 2)
814             break;
815         tb1 = (TranslationBlock *)((long)tb1 & ~3);
816         tb2 = tb1->jmp_next[n1];
817         tb_reset_jump(tb1, n1);
818         tb1->jmp_next[n1] = NULL;
819         tb1 = tb2;
820     }
821     tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
822 
823     tb_phys_invalidate_count++;
824 }
825 
set_bits(uint8_t * tab,int start,int len)826 static inline void set_bits(uint8_t *tab, int start, int len)
827 {
828     int end, mask, end1;
829 
830     end = start + len;
831     tab += start >> 3;
832     mask = 0xff << (start & 7);
833     if ((start & ~7) == (end & ~7)) {
834         if (start < end) {
835             mask &= ~(0xff << (end & 7));
836             *tab |= mask;
837         }
838     } else {
839         *tab++ |= mask;
840         start = (start + 8) & ~7;
841         end1 = end & ~7;
842         while (start < end1) {
843             *tab++ = 0xff;
844             start += 8;
845         }
846         if (start < end) {
847             mask = ~(0xff << (end & 7));
848             *tab |= mask;
849         }
850     }
851 }
852 
build_page_bitmap(PageDesc * p)853 static void build_page_bitmap(PageDesc *p)
854 {
855     int n, tb_start, tb_end;
856     TranslationBlock *tb;
857 
858     p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
859 
860     tb = p->first_tb;
861     while (tb != NULL) {
862         n = (long)tb & 3;
863         tb = (TranslationBlock *)((long)tb & ~3);
864         /* NOTE: this is subtle as a TB may span two physical pages */
865         if (n == 0) {
866             /* NOTE: tb_end may be after the end of the page, but
867                it is not a problem */
868             tb_start = tb->pc & ~TARGET_PAGE_MASK;
869             tb_end = tb_start + tb->size;
870             if (tb_end > TARGET_PAGE_SIZE)
871                 tb_end = TARGET_PAGE_SIZE;
872         } else {
873             tb_start = 0;
874             tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
875         }
876         set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
877         tb = tb->page_next[n];
878     }
879 }
880 
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)881 TranslationBlock *tb_gen_code(CPUState *env,
882                               target_ulong pc, target_ulong cs_base,
883                               int flags, int cflags)
884 {
885     TranslationBlock *tb;
886     uint8_t *tc_ptr;
887     target_ulong phys_pc, phys_page2, virt_page2;
888     int code_gen_size;
889 
890     phys_pc = get_phys_addr_code(env, pc);
891     tb = tb_alloc(pc);
892     if (!tb) {
893         /* flush must be done */
894         tb_flush(env);
895         /* cannot fail at this point */
896         tb = tb_alloc(pc);
897         /* Don't forget to invalidate previous TB info.  */
898         tb_invalidated_flag = 1;
899     }
900     tc_ptr = code_gen_ptr;
901     tb->tc_ptr = tc_ptr;
902     tb->cs_base = cs_base;
903     tb->flags = flags;
904     tb->cflags = cflags;
905 #ifdef CONFIG_TRACE
906     tb->bb_rec = NULL;
907     tb->prev_time = 0;
908 #endif
909     cpu_gen_code(env, tb, &code_gen_size);
910     code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
911 
912     /* check next page if needed */
913     virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
914     phys_page2 = -1;
915     if ((pc & TARGET_PAGE_MASK) != virt_page2) {
916         phys_page2 = get_phys_addr_code(env, virt_page2);
917     }
918     tb_link_phys(tb, phys_pc, phys_page2);
919     return tb;
920 }
921 
922 /* invalidate all TBs which intersect with the target physical page
923    starting in range [start;end[. NOTE: start and end must refer to
924    the same physical page. 'is_cpu_write_access' should be true if called
925    from a real cpu write access: the virtual CPU will exit the current
926    TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)927 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
928                                    int is_cpu_write_access)
929 {
930     TranslationBlock *tb, *tb_next, *saved_tb;
931     CPUState *env = cpu_single_env;
932     target_ulong tb_start, tb_end;
933     PageDesc *p;
934     int n;
935 #ifdef TARGET_HAS_PRECISE_SMC
936     int current_tb_not_found = is_cpu_write_access;
937     TranslationBlock *current_tb = NULL;
938     int current_tb_modified = 0;
939     target_ulong current_pc = 0;
940     target_ulong current_cs_base = 0;
941     int current_flags = 0;
942 #endif /* TARGET_HAS_PRECISE_SMC */
943 
944     p = page_find(start >> TARGET_PAGE_BITS);
945     if (!p)
946         return;
947     if (!p->code_bitmap &&
948         ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
949         is_cpu_write_access) {
950         /* build code bitmap */
951         build_page_bitmap(p);
952     }
953 
954     /* we remove all the TBs in the range [start, end[ */
955     /* XXX: see if in some cases it could be faster to invalidate all the code */
956     tb = p->first_tb;
957     while (tb != NULL) {
958         n = (long)tb & 3;
959         tb = (TranslationBlock *)((long)tb & ~3);
960         tb_next = tb->page_next[n];
961         /* NOTE: this is subtle as a TB may span two physical pages */
962         if (n == 0) {
963             /* NOTE: tb_end may be after the end of the page, but
964                it is not a problem */
965             tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
966             tb_end = tb_start + tb->size;
967         } else {
968             tb_start = tb->page_addr[1];
969             tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
970         }
971         if (!(tb_end <= start || tb_start >= end)) {
972 #ifdef TARGET_HAS_PRECISE_SMC
973             if (current_tb_not_found) {
974                 current_tb_not_found = 0;
975                 current_tb = NULL;
976                 if (env->mem_io_pc) {
977                     /* now we have a real cpu fault */
978                     current_tb = tb_find_pc(env->mem_io_pc);
979                 }
980             }
981             if (current_tb == tb &&
982                 (current_tb->cflags & CF_COUNT_MASK) != 1) {
983                 /* If we are modifying the current TB, we must stop
984                 its execution. We could be more precise by checking
985                 that the modification is after the current PC, but it
986                 would require a specialized function to partially
987                 restore the CPU state */
988 
989                 current_tb_modified = 1;
990                 cpu_restore_state(current_tb, env,
991                                   env->mem_io_pc, NULL);
992                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
993                                      &current_flags);
994             }
995 #endif /* TARGET_HAS_PRECISE_SMC */
996             /* we need to do that to handle the case where a signal
997                occurs while doing tb_phys_invalidate() */
998             saved_tb = NULL;
999             if (env) {
1000                 saved_tb = env->current_tb;
1001                 env->current_tb = NULL;
1002             }
1003             tb_phys_invalidate(tb, -1);
1004             if (env) {
1005                 env->current_tb = saved_tb;
1006                 if (env->interrupt_request && env->current_tb)
1007                     cpu_interrupt(env, env->interrupt_request);
1008             }
1009         }
1010         tb = tb_next;
1011     }
1012 #if !defined(CONFIG_USER_ONLY)
1013     /* if no code remaining, no need to continue to use slow writes */
1014     if (!p->first_tb) {
1015         invalidate_page_bitmap(p);
1016         if (is_cpu_write_access) {
1017             tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1018         }
1019     }
1020 #endif
1021 #ifdef TARGET_HAS_PRECISE_SMC
1022     if (current_tb_modified) {
1023         /* we generate a block containing just the instruction
1024            modifying the memory. It will ensure that it cannot modify
1025            itself */
1026         env->current_tb = NULL;
1027         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1028         cpu_resume_from_signal(env, NULL);
1029     }
1030 #endif
1031 }
1032 
1033 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)1034 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1035 {
1036     PageDesc *p;
1037     int offset, b;
1038 #if 0
1039     if (1) {
1040         qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1041                   cpu_single_env->mem_io_vaddr, len,
1042                   cpu_single_env->eip,
1043                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1044     }
1045 #endif
1046     p = page_find(start >> TARGET_PAGE_BITS);
1047     if (!p)
1048         return;
1049     if (p->code_bitmap) {
1050         offset = start & ~TARGET_PAGE_MASK;
1051         b = p->code_bitmap[offset >> 3] >> (offset & 7);
1052         if (b & ((1 << len) - 1))
1053             goto do_invalidate;
1054     } else {
1055     do_invalidate:
1056         tb_invalidate_phys_page_range(start, start + len, 1);
1057     }
1058 }
1059 
1060 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1061 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1062                                     unsigned long pc, void *puc)
1063 {
1064     TranslationBlock *tb;
1065     PageDesc *p;
1066     int n;
1067 #ifdef TARGET_HAS_PRECISE_SMC
1068     TranslationBlock *current_tb = NULL;
1069     CPUState *env = cpu_single_env;
1070     int current_tb_modified = 0;
1071     target_ulong current_pc = 0;
1072     target_ulong current_cs_base = 0;
1073     int current_flags = 0;
1074 #endif
1075 
1076     addr &= TARGET_PAGE_MASK;
1077     p = page_find(addr >> TARGET_PAGE_BITS);
1078     if (!p)
1079         return;
1080     tb = p->first_tb;
1081 #ifdef TARGET_HAS_PRECISE_SMC
1082     if (tb && pc != 0) {
1083         current_tb = tb_find_pc(pc);
1084     }
1085 #endif
1086     while (tb != NULL) {
1087         n = (long)tb & 3;
1088         tb = (TranslationBlock *)((long)tb & ~3);
1089 #ifdef TARGET_HAS_PRECISE_SMC
1090         if (current_tb == tb &&
1091             (current_tb->cflags & CF_COUNT_MASK) != 1) {
1092                 /* If we are modifying the current TB, we must stop
1093                    its execution. We could be more precise by checking
1094                    that the modification is after the current PC, but it
1095                    would require a specialized function to partially
1096                    restore the CPU state */
1097 
1098             current_tb_modified = 1;
1099             cpu_restore_state(current_tb, env, pc, puc);
1100             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1101                                  &current_flags);
1102         }
1103 #endif /* TARGET_HAS_PRECISE_SMC */
1104         tb_phys_invalidate(tb, addr);
1105         tb = tb->page_next[n];
1106     }
1107     p->first_tb = NULL;
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109     if (current_tb_modified) {
1110         /* we generate a block containing just the instruction
1111            modifying the memory. It will ensure that it cannot modify
1112            itself */
1113         env->current_tb = NULL;
1114         tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1115         cpu_resume_from_signal(env, puc);
1116     }
1117 #endif
1118 }
1119 #endif
1120 
1121 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1122 static inline void tb_alloc_page(TranslationBlock *tb,
1123                                  unsigned int n, target_ulong page_addr)
1124 {
1125     PageDesc *p;
1126     TranslationBlock *last_first_tb;
1127 
1128     tb->page_addr[n] = page_addr;
1129     p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1130     tb->page_next[n] = p->first_tb;
1131     last_first_tb = p->first_tb;
1132     p->first_tb = (TranslationBlock *)((long)tb | n);
1133     invalidate_page_bitmap(p);
1134 
1135 #if defined(TARGET_HAS_SMC) || 1
1136 
1137 #if defined(CONFIG_USER_ONLY)
1138     if (p->flags & PAGE_WRITE) {
1139         target_ulong addr;
1140         PageDesc *p2;
1141         int prot;
1142 
1143         /* force the host page as non writable (writes will have a
1144            page fault + mprotect overhead) */
1145         page_addr &= qemu_host_page_mask;
1146         prot = 0;
1147         for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1148             addr += TARGET_PAGE_SIZE) {
1149 
1150             p2 = page_find (addr >> TARGET_PAGE_BITS);
1151             if (!p2)
1152                 continue;
1153             prot |= p2->flags;
1154             p2->flags &= ~PAGE_WRITE;
1155             page_get_flags(addr);
1156           }
1157         mprotect(g2h(page_addr), qemu_host_page_size,
1158                  (prot & PAGE_BITS) & ~PAGE_WRITE);
1159 #ifdef DEBUG_TB_INVALIDATE
1160         printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1161                page_addr);
1162 #endif
1163     }
1164 #else
1165     /* if some code is already present, then the pages are already
1166        protected. So we handle the case where only the first TB is
1167        allocated in a physical page */
1168     if (!last_first_tb) {
1169         tlb_protect_code(page_addr);
1170     }
1171 #endif
1172 
1173 #endif /* TARGET_HAS_SMC */
1174 }
1175 
1176 /* Allocate a new translation block. Flush the translation buffer if
1177    too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1178 TranslationBlock *tb_alloc(target_ulong pc)
1179 {
1180     TranslationBlock *tb;
1181 
1182     if (nb_tbs >= code_gen_max_blocks ||
1183         (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1184         return NULL;
1185     tb = &tbs[nb_tbs++];
1186     tb->pc = pc;
1187     tb->cflags = 0;
1188     return tb;
1189 }
1190 
tb_free(TranslationBlock * tb)1191 void tb_free(TranslationBlock *tb)
1192 {
1193     /* In practice this is mostly used for single use temporary TB
1194        Ignore the hard cases and just back up if this TB happens to
1195        be the last one generated.  */
1196     if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1197         code_gen_ptr = tb->tc_ptr;
1198         nb_tbs--;
1199     }
1200 }
1201 
1202 /* add a new TB and link it to the physical page tables. phys_page2 is
1203    (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1204 void tb_link_phys(TranslationBlock *tb,
1205                   target_ulong phys_pc, target_ulong phys_page2)
1206 {
1207     unsigned int h;
1208     TranslationBlock **ptb;
1209 
1210     /* Grab the mmap lock to stop another thread invalidating this TB
1211        before we are done.  */
1212     mmap_lock();
1213     /* add in the physical hash table */
1214     h = tb_phys_hash_func(phys_pc);
1215     ptb = &tb_phys_hash[h];
1216     tb->phys_hash_next = *ptb;
1217     *ptb = tb;
1218 
1219     /* add in the page list */
1220     tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1221     if (phys_page2 != -1)
1222         tb_alloc_page(tb, 1, phys_page2);
1223     else
1224         tb->page_addr[1] = -1;
1225 
1226     tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1227     tb->jmp_next[0] = NULL;
1228     tb->jmp_next[1] = NULL;
1229 
1230     /* init original jump addresses */
1231     if (tb->tb_next_offset[0] != 0xffff)
1232         tb_reset_jump(tb, 0);
1233     if (tb->tb_next_offset[1] != 0xffff)
1234         tb_reset_jump(tb, 1);
1235 
1236 #ifdef DEBUG_TB_CHECK
1237     tb_page_check();
1238 #endif
1239     mmap_unlock();
1240 }
1241 
1242 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1243    tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1244 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1245 {
1246     int m_min, m_max, m;
1247     unsigned long v;
1248     TranslationBlock *tb;
1249 
1250     if (nb_tbs <= 0)
1251         return NULL;
1252     if (tc_ptr < (unsigned long)code_gen_buffer ||
1253         tc_ptr >= (unsigned long)code_gen_ptr)
1254         return NULL;
1255     /* binary search (cf Knuth) */
1256     m_min = 0;
1257     m_max = nb_tbs - 1;
1258     while (m_min <= m_max) {
1259         m = (m_min + m_max) >> 1;
1260         tb = &tbs[m];
1261         v = (unsigned long)tb->tc_ptr;
1262         if (v == tc_ptr)
1263             return tb;
1264         else if (tc_ptr < v) {
1265             m_max = m - 1;
1266         } else {
1267             m_min = m + 1;
1268         }
1269     }
1270     return &tbs[m_max];
1271 }
1272 
1273 static void tb_reset_jump_recursive(TranslationBlock *tb);
1274 
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1275 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1276 {
1277     TranslationBlock *tb1, *tb_next, **ptb;
1278     unsigned int n1;
1279 
1280     tb1 = tb->jmp_next[n];
1281     if (tb1 != NULL) {
1282         /* find head of list */
1283         for(;;) {
1284             n1 = (long)tb1 & 3;
1285             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1286             if (n1 == 2)
1287                 break;
1288             tb1 = tb1->jmp_next[n1];
1289         }
1290         /* we are now sure now that tb jumps to tb1 */
1291         tb_next = tb1;
1292 
1293         /* remove tb from the jmp_first list */
1294         ptb = &tb_next->jmp_first;
1295         for(;;) {
1296             tb1 = *ptb;
1297             n1 = (long)tb1 & 3;
1298             tb1 = (TranslationBlock *)((long)tb1 & ~3);
1299             if (n1 == n && tb1 == tb)
1300                 break;
1301             ptb = &tb1->jmp_next[n1];
1302         }
1303         *ptb = tb->jmp_next[n];
1304         tb->jmp_next[n] = NULL;
1305 
1306         /* suppress the jump to next tb in generated code */
1307         tb_reset_jump(tb, n);
1308 
1309         /* suppress jumps in the tb on which we could have jumped */
1310         tb_reset_jump_recursive(tb_next);
1311     }
1312 }
1313 
tb_reset_jump_recursive(TranslationBlock * tb)1314 static void tb_reset_jump_recursive(TranslationBlock *tb)
1315 {
1316     tb_reset_jump_recursive2(tb, 0);
1317     tb_reset_jump_recursive2(tb, 1);
1318 }
1319 
1320 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1321 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1322 {
1323     target_phys_addr_t addr;
1324     target_ulong pd;
1325     ram_addr_t ram_addr;
1326     PhysPageDesc *p;
1327 
1328     addr = cpu_get_phys_page_debug(env, pc);
1329     p = phys_page_find(addr >> TARGET_PAGE_BITS);
1330     if (!p) {
1331         pd = IO_MEM_UNASSIGNED;
1332     } else {
1333         pd = p->phys_offset;
1334     }
1335     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1336     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1337 }
1338 #endif
1339 
1340 /* Add a watchpoint.  */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)1341 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1342                           int flags, CPUWatchpoint **watchpoint)
1343 {
1344     target_ulong len_mask = ~(len - 1);
1345     CPUWatchpoint *wp;
1346 
1347     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1348     if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1349         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1350                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1351         return -EINVAL;
1352     }
1353     wp = qemu_malloc(sizeof(*wp));
1354 
1355     wp->vaddr = addr;
1356     wp->len_mask = len_mask;
1357     wp->flags = flags;
1358 
1359     /* keep all GDB-injected watchpoints in front */
1360     if (flags & BP_GDB)
1361         TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1362     else
1363         TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1364 
1365     tlb_flush_page(env, addr);
1366 
1367     if (watchpoint)
1368         *watchpoint = wp;
1369     return 0;
1370 }
1371 
1372 /* Remove a specific watchpoint.  */
cpu_watchpoint_remove(CPUState * env,target_ulong addr,target_ulong len,int flags)1373 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1374                           int flags)
1375 {
1376     target_ulong len_mask = ~(len - 1);
1377     CPUWatchpoint *wp;
1378 
1379     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1380         if (addr == wp->vaddr && len_mask == wp->len_mask
1381                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1382             cpu_watchpoint_remove_by_ref(env, wp);
1383             return 0;
1384         }
1385     }
1386     return -ENOENT;
1387 }
1388 
1389 /* Remove a specific watchpoint by reference.  */
cpu_watchpoint_remove_by_ref(CPUState * env,CPUWatchpoint * watchpoint)1390 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1391 {
1392     TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1393 
1394     tlb_flush_page(env, watchpoint->vaddr);
1395 
1396     qemu_free(watchpoint);
1397 }
1398 
1399 /* Remove all matching watchpoints.  */
cpu_watchpoint_remove_all(CPUState * env,int mask)1400 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1401 {
1402     CPUWatchpoint *wp, *next;
1403 
1404     TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1405         if (wp->flags & mask)
1406             cpu_watchpoint_remove_by_ref(env, wp);
1407     }
1408 }
1409 
1410 /* Add a breakpoint.  */
cpu_breakpoint_insert(CPUState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)1411 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1412                           CPUBreakpoint **breakpoint)
1413 {
1414 #if defined(TARGET_HAS_ICE)
1415     CPUBreakpoint *bp;
1416 
1417     bp = qemu_malloc(sizeof(*bp));
1418 
1419     bp->pc = pc;
1420     bp->flags = flags;
1421 
1422     /* keep all GDB-injected breakpoints in front */
1423     if (flags & BP_GDB)
1424         TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1425     else
1426         TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1427 
1428     breakpoint_invalidate(env, pc);
1429 
1430     if (breakpoint)
1431         *breakpoint = bp;
1432     return 0;
1433 #else
1434     return -ENOSYS;
1435 #endif
1436 }
1437 
1438 /* Remove a specific breakpoint.  */
cpu_breakpoint_remove(CPUState * env,target_ulong pc,int flags)1439 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1440 {
1441 #if defined(TARGET_HAS_ICE)
1442     CPUBreakpoint *bp;
1443 
1444     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1445         if (bp->pc == pc && bp->flags == flags) {
1446             cpu_breakpoint_remove_by_ref(env, bp);
1447             return 0;
1448         }
1449     }
1450     return -ENOENT;
1451 #else
1452     return -ENOSYS;
1453 #endif
1454 }
1455 
1456 /* Remove a specific breakpoint by reference.  */
cpu_breakpoint_remove_by_ref(CPUState * env,CPUBreakpoint * breakpoint)1457 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1458 {
1459 #if defined(TARGET_HAS_ICE)
1460     TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1461 
1462     breakpoint_invalidate(env, breakpoint->pc);
1463 
1464     qemu_free(breakpoint);
1465 #endif
1466 }
1467 
1468 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * env,int mask)1469 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1470 {
1471 #if defined(TARGET_HAS_ICE)
1472     CPUBreakpoint *bp, *next;
1473 
1474     TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1475         if (bp->flags & mask)
1476             cpu_breakpoint_remove_by_ref(env, bp);
1477     }
1478 #endif
1479 }
1480 
1481 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1482    CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1483 void cpu_single_step(CPUState *env, int enabled)
1484 {
1485 #if defined(TARGET_HAS_ICE)
1486     if (env->singlestep_enabled != enabled) {
1487         env->singlestep_enabled = enabled;
1488         if (kvm_enabled())
1489             kvm_update_guest_debug(env, 0);
1490         else {
1491             /* must flush all the translated code to avoid inconsistencies */
1492             /* XXX: only flush what is necessary */
1493             tb_flush(env);
1494         }
1495     }
1496 #endif
1497 }
1498 
1499 /* enable or disable low levels log */
cpu_set_log(int log_flags)1500 void cpu_set_log(int log_flags)
1501 {
1502     loglevel = log_flags;
1503     if (loglevel && !logfile) {
1504         logfile = fopen(logfilename, log_append ? "a" : "w");
1505         if (!logfile) {
1506             perror(logfilename);
1507             _exit(1);
1508         }
1509 #if !defined(CONFIG_SOFTMMU)
1510         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1511         {
1512             static char logfile_buf[4096];
1513             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1514         }
1515 #else
1516         setvbuf(logfile, NULL, _IOLBF, 0);
1517 #endif
1518         log_append = 1;
1519     }
1520     if (!loglevel && logfile) {
1521         fclose(logfile);
1522         logfile = NULL;
1523     }
1524 }
1525 
cpu_set_log_filename(const char * filename)1526 void cpu_set_log_filename(const char *filename)
1527 {
1528     logfilename = strdup(filename);
1529     if (logfile) {
1530         fclose(logfile);
1531         logfile = NULL;
1532     }
1533     cpu_set_log(loglevel);
1534 }
1535 
cpu_unlink_tb(CPUState * env)1536 static void cpu_unlink_tb(CPUState *env)
1537 {
1538 #if defined(USE_NPTL)
1539     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1540        problem and hope the cpu will stop of its own accord.  For userspace
1541        emulation this often isn't actually as bad as it sounds.  Often
1542        signals are used primarily to interrupt blocking syscalls.  */
1543 #else
1544     TranslationBlock *tb;
1545     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1546 
1547     tb = env->current_tb;
1548     /* if the cpu is currently executing code, we must unlink it and
1549        all the potentially executing TB */
1550     if (tb && !testandset(&interrupt_lock)) {
1551         env->current_tb = NULL;
1552         tb_reset_jump_recursive(tb);
1553         resetlock(&interrupt_lock);
1554     }
1555 #endif
1556 }
1557 
1558 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1559 void cpu_interrupt(CPUState *env, int mask)
1560 {
1561     int old_mask;
1562 
1563     old_mask = env->interrupt_request;
1564     env->interrupt_request |= mask;
1565 
1566 #ifndef CONFIG_USER_ONLY
1567     /*
1568      * If called from iothread context, wake the target cpu in
1569      * case its halted.
1570      */
1571     if (!qemu_cpu_self(env)) {
1572         qemu_cpu_kick(env);
1573         return;
1574     }
1575 #endif
1576 
1577     if (use_icount) {
1578         env->icount_decr.u16.high = 0xffff;
1579 #ifndef CONFIG_USER_ONLY
1580         if (!can_do_io(env)
1581             && (mask & ~old_mask) != 0) {
1582             cpu_abort(env, "Raised interrupt while not in I/O function");
1583         }
1584 #endif
1585     } else {
1586         cpu_unlink_tb(env);
1587     }
1588 }
1589 
cpu_reset_interrupt(CPUState * env,int mask)1590 void cpu_reset_interrupt(CPUState *env, int mask)
1591 {
1592     env->interrupt_request &= ~mask;
1593 }
1594 
cpu_exit(CPUState * env)1595 void cpu_exit(CPUState *env)
1596 {
1597     env->exit_request = 1;
1598     cpu_unlink_tb(env);
1599 }
1600 
1601 const CPULogItem cpu_log_items[] = {
1602     { CPU_LOG_TB_OUT_ASM, "out_asm",
1603       "show generated host assembly code for each compiled TB" },
1604     { CPU_LOG_TB_IN_ASM, "in_asm",
1605       "show target assembly code for each compiled TB" },
1606     { CPU_LOG_TB_OP, "op",
1607       "show micro ops for each compiled TB" },
1608     { CPU_LOG_TB_OP_OPT, "op_opt",
1609       "show micro ops "
1610 #ifdef TARGET_I386
1611       "before eflags optimization and "
1612 #endif
1613       "after liveness analysis" },
1614     { CPU_LOG_INT, "int",
1615       "show interrupts/exceptions in short format" },
1616     { CPU_LOG_EXEC, "exec",
1617       "show trace before each executed TB (lots of logs)" },
1618     { CPU_LOG_TB_CPU, "cpu",
1619       "show CPU state before block translation" },
1620 #ifdef TARGET_I386
1621     { CPU_LOG_PCALL, "pcall",
1622       "show protected mode far calls/returns/exceptions" },
1623     { CPU_LOG_RESET, "cpu_reset",
1624       "show CPU state before CPU resets" },
1625 #endif
1626 #ifdef DEBUG_IOPORT
1627     { CPU_LOG_IOPORT, "ioport",
1628       "show all i/o ports accesses" },
1629 #endif
1630     { 0, NULL, NULL },
1631 };
1632 
cmp1(const char * s1,int n,const char * s2)1633 static int cmp1(const char *s1, int n, const char *s2)
1634 {
1635     if (strlen(s2) != n)
1636         return 0;
1637     return memcmp(s1, s2, n) == 0;
1638 }
1639 
1640 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1641 int cpu_str_to_log_mask(const char *str)
1642 {
1643     const CPULogItem *item;
1644     int mask;
1645     const char *p, *p1;
1646 
1647     p = str;
1648     mask = 0;
1649     for(;;) {
1650         p1 = strchr(p, ',');
1651         if (!p1)
1652             p1 = p + strlen(p);
1653 	if(cmp1(p,p1-p,"all")) {
1654 		for(item = cpu_log_items; item->mask != 0; item++) {
1655 			mask |= item->mask;
1656 		}
1657 	} else {
1658         for(item = cpu_log_items; item->mask != 0; item++) {
1659             if (cmp1(p, p1 - p, item->name))
1660                 goto found;
1661         }
1662         return 0;
1663 	}
1664     found:
1665         mask |= item->mask;
1666         if (*p1 != ',')
1667             break;
1668         p = p1 + 1;
1669     }
1670     return mask;
1671 }
1672 
cpu_abort(CPUState * env,const char * fmt,...)1673 void cpu_abort(CPUState *env, const char *fmt, ...)
1674 {
1675     va_list ap;
1676     va_list ap2;
1677 
1678     va_start(ap, fmt);
1679     va_copy(ap2, ap);
1680     fprintf(stderr, "qemu: fatal: ");
1681     vfprintf(stderr, fmt, ap);
1682     fprintf(stderr, "\n");
1683 #ifdef TARGET_I386
1684     cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1685 #else
1686     cpu_dump_state(env, stderr, fprintf, 0);
1687 #endif
1688     if (qemu_log_enabled()) {
1689         qemu_log("qemu: fatal: ");
1690         qemu_log_vprintf(fmt, ap2);
1691         qemu_log("\n");
1692 #ifdef TARGET_I386
1693         log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1694 #else
1695         log_cpu_state(env, 0);
1696 #endif
1697         qemu_log_flush();
1698         qemu_log_close();
1699     }
1700     va_end(ap2);
1701     va_end(ap);
1702     abort();
1703 }
1704 
cpu_copy(CPUState * env)1705 CPUState *cpu_copy(CPUState *env)
1706 {
1707     CPUState *new_env = cpu_init(env->cpu_model_str);
1708     CPUState *next_cpu = new_env->next_cpu;
1709     int cpu_index = new_env->cpu_index;
1710 #if defined(TARGET_HAS_ICE)
1711     CPUBreakpoint *bp;
1712     CPUWatchpoint *wp;
1713 #endif
1714 
1715     memcpy(new_env, env, sizeof(CPUState));
1716 
1717     /* Preserve chaining and index. */
1718     new_env->next_cpu = next_cpu;
1719     new_env->cpu_index = cpu_index;
1720 
1721     /* Clone all break/watchpoints.
1722        Note: Once we support ptrace with hw-debug register access, make sure
1723        BP_CPU break/watchpoints are handled correctly on clone. */
1724     TAILQ_INIT(&env->breakpoints);
1725     TAILQ_INIT(&env->watchpoints);
1726 #if defined(TARGET_HAS_ICE)
1727     TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1728         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1729     }
1730     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1731         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1732                               wp->flags, NULL);
1733     }
1734 #endif
1735 
1736     return new_env;
1737 }
1738 
1739 #if !defined(CONFIG_USER_ONLY)
1740 
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1741 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1742 {
1743     unsigned int i;
1744 
1745     /* Discard jump cache entries for any tb which might potentially
1746        overlap the flushed page.  */
1747     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1748     memset (&env->tb_jmp_cache[i], 0,
1749 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1750 
1751     i = tb_jmp_cache_hash_page(addr);
1752     memset (&env->tb_jmp_cache[i], 0,
1753 	    TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1754 }
1755 
1756 /* NOTE: if flush_global is true, also flush global entries (not
1757    implemented yet) */
tlb_flush(CPUState * env,int flush_global)1758 void tlb_flush(CPUState *env, int flush_global)
1759 {
1760     int i;
1761 
1762 #if defined(DEBUG_TLB)
1763     printf("tlb_flush:\n");
1764 #endif
1765     /* must reset current TB so that interrupts cannot modify the
1766        links while we are modifying them */
1767     env->current_tb = NULL;
1768 
1769     for(i = 0; i < CPU_TLB_SIZE; i++) {
1770         int mmu_idx;
1771         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1772             env->tlb_table[mmu_idx][i].addr_read = -1;
1773             env->tlb_table[mmu_idx][i].addr_write = -1;
1774             env->tlb_table[mmu_idx][i].addr_code = -1;
1775         }
1776     }
1777 
1778     memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1779 
1780 #ifdef CONFIG_KQEMU
1781     if (env->kqemu_enabled) {
1782         kqemu_flush(env, flush_global);
1783     }
1784 #endif
1785     tlb_flush_count++;
1786 }
1787 
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1788 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1789 {
1790     if (addr == (tlb_entry->addr_read &
1791                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1792         addr == (tlb_entry->addr_write &
1793                  (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1794         addr == (tlb_entry->addr_code &
1795                  (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1796         tlb_entry->addr_read = -1;
1797         tlb_entry->addr_write = -1;
1798         tlb_entry->addr_code = -1;
1799     }
1800 }
1801 
tlb_flush_page(CPUState * env,target_ulong addr)1802 void tlb_flush_page(CPUState *env, target_ulong addr)
1803 {
1804     int i;
1805     int mmu_idx;
1806 
1807 #if defined(DEBUG_TLB)
1808     printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1809 #endif
1810     /* must reset current TB so that interrupts cannot modify the
1811        links while we are modifying them */
1812     env->current_tb = NULL;
1813 
1814     addr &= TARGET_PAGE_MASK;
1815     i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1816     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1817         tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1818 
1819     tlb_flush_jmp_cache(env, addr);
1820 
1821 #ifdef CONFIG_KQEMU
1822     if (env->kqemu_enabled) {
1823         kqemu_flush_page(env, addr);
1824     }
1825 #endif
1826 }
1827 
1828 /* update the TLBs so that writes to code in the virtual page 'addr'
1829    can be detected */
tlb_protect_code(ram_addr_t ram_addr)1830 static void tlb_protect_code(ram_addr_t ram_addr)
1831 {
1832     cpu_physical_memory_reset_dirty(ram_addr,
1833                                     ram_addr + TARGET_PAGE_SIZE,
1834                                     CODE_DIRTY_FLAG);
1835 }
1836 
1837 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1838    tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1839 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1840                                     target_ulong vaddr)
1841 {
1842     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1843 }
1844 
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1845 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1846                                          unsigned long start, unsigned long length)
1847 {
1848     unsigned long addr;
1849     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1850         addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1851         if ((addr - start) < length) {
1852             tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1853         }
1854     }
1855 }
1856 
1857 /* Note: start and end must be within the same ram block.  */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1858 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1859                                      int dirty_flags)
1860 {
1861     CPUState *env;
1862     unsigned long length, start1;
1863     int i, mask, len;
1864     uint8_t *p;
1865 
1866     start &= TARGET_PAGE_MASK;
1867     end = TARGET_PAGE_ALIGN(end);
1868 
1869     length = end - start;
1870     if (length == 0)
1871         return;
1872     len = length >> TARGET_PAGE_BITS;
1873 #ifdef CONFIG_KQEMU
1874     /* XXX: should not depend on cpu context */
1875     env = first_cpu;
1876     if (env->kqemu_enabled) {
1877         ram_addr_t addr;
1878         addr = start;
1879         for(i = 0; i < len; i++) {
1880             kqemu_set_notdirty(env, addr);
1881             addr += TARGET_PAGE_SIZE;
1882         }
1883     }
1884 #endif
1885     mask = ~dirty_flags;
1886     p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1887     for(i = 0; i < len; i++)
1888         p[i] &= mask;
1889 
1890     /* we modify the TLB cache so that the dirty bit will be set again
1891        when accessing the range */
1892     start1 = (unsigned long)qemu_get_ram_ptr(start);
1893     /* Chek that we don't span multiple blocks - this breaks the
1894        address comparisons below.  */
1895     if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1896             != (end - 1) - start) {
1897         abort();
1898     }
1899 
1900     for(env = first_cpu; env != NULL; env = env->next_cpu) {
1901         int mmu_idx;
1902         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1903             for(i = 0; i < CPU_TLB_SIZE; i++)
1904                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1905                                       start1, length);
1906         }
1907     }
1908 }
1909 
cpu_physical_memory_set_dirty_tracking(int enable)1910 int cpu_physical_memory_set_dirty_tracking(int enable)
1911 {
1912     in_migration = enable;
1913     if (kvm_enabled()) {
1914         return kvm_set_migration_log(enable);
1915     }
1916     return 0;
1917 }
1918 
cpu_physical_memory_get_dirty_tracking(void)1919 int cpu_physical_memory_get_dirty_tracking(void)
1920 {
1921     return in_migration;
1922 }
1923 
cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)1924 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1925                                    target_phys_addr_t end_addr)
1926 {
1927     int ret = 0;
1928 
1929     if (kvm_enabled())
1930         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1931     return ret;
1932 }
1933 
tlb_update_dirty(CPUTLBEntry * tlb_entry)1934 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1935 {
1936     ram_addr_t ram_addr;
1937     void *p;
1938 
1939     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1940         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1941             + tlb_entry->addend);
1942         ram_addr = qemu_ram_addr_from_host(p);
1943         if (!cpu_physical_memory_is_dirty(ram_addr)) {
1944             tlb_entry->addr_write |= TLB_NOTDIRTY;
1945         }
1946     }
1947 }
1948 
1949 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1950 void cpu_tlb_update_dirty(CPUState *env)
1951 {
1952     int i;
1953     int mmu_idx;
1954     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1955         for(i = 0; i < CPU_TLB_SIZE; i++)
1956             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1957     }
1958 }
1959 
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1960 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1961 {
1962     if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1963         tlb_entry->addr_write = vaddr;
1964 }
1965 
1966 /* update the TLB corresponding to virtual page vaddr
1967    so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1968 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1969 {
1970     int i;
1971     int mmu_idx;
1972 
1973     vaddr &= TARGET_PAGE_MASK;
1974     i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1975     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1976         tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1977 }
1978 
1979 /* add a new TLB entry. At most one entry for a given virtual address
1980    is permitted. Return 0 if OK or 2 if the page could not be mapped
1981    (can only happen in non SOFTMMU mode for I/O pages or pages
1982    conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1983 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1984                       target_phys_addr_t paddr, int prot,
1985                       int mmu_idx, int is_softmmu)
1986 {
1987     PhysPageDesc *p;
1988     unsigned long pd;
1989     unsigned int index;
1990     target_ulong address;
1991     target_ulong code_address;
1992     target_phys_addr_t addend;
1993     int ret;
1994     CPUTLBEntry *te;
1995     CPUWatchpoint *wp;
1996     target_phys_addr_t iotlb;
1997 
1998     p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1999     if (!p) {
2000         pd = IO_MEM_UNASSIGNED;
2001     } else {
2002         pd = p->phys_offset;
2003     }
2004 #if defined(DEBUG_TLB)
2005     printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2006            vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2007 #endif
2008 
2009     ret = 0;
2010     address = vaddr;
2011     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2012         /* IO memory case (romd handled later) */
2013         address |= TLB_MMIO;
2014     }
2015     addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2016     if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2017         /* Normal RAM.  */
2018         iotlb = pd & TARGET_PAGE_MASK;
2019         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2020             iotlb |= IO_MEM_NOTDIRTY;
2021         else
2022             iotlb |= IO_MEM_ROM;
2023     } else {
2024         /* IO handlers are currently passed a physical address.
2025            It would be nice to pass an offset from the base address
2026            of that region.  This would avoid having to special case RAM,
2027            and avoid full address decoding in every device.
2028            We can't use the high bits of pd for this because
2029            IO_MEM_ROMD uses these as a ram address.  */
2030         iotlb = (pd & ~TARGET_PAGE_MASK);
2031         if (p) {
2032             iotlb += p->region_offset;
2033         } else {
2034             iotlb += paddr;
2035         }
2036     }
2037 
2038     code_address = address;
2039     /* Make accesses to pages with watchpoints go via the
2040        watchpoint trap routines.  */
2041     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2042         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2043             iotlb = io_mem_watch + paddr;
2044             /* TODO: The memory case can be optimized by not trapping
2045                reads of pages with a write breakpoint.  */
2046             address |= TLB_MMIO;
2047         }
2048     }
2049 
2050     index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2051     env->iotlb[mmu_idx][index] = iotlb - vaddr;
2052     te = &env->tlb_table[mmu_idx][index];
2053     te->addend = addend - vaddr;
2054     if (prot & PAGE_READ) {
2055         te->addr_read = address;
2056     } else {
2057         te->addr_read = -1;
2058     }
2059 
2060     if (prot & PAGE_EXEC) {
2061         te->addr_code = code_address;
2062     } else {
2063         te->addr_code = -1;
2064     }
2065     if (prot & PAGE_WRITE) {
2066         if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2067             (pd & IO_MEM_ROMD)) {
2068             /* Write access calls the I/O callback.  */
2069             te->addr_write = address | TLB_MMIO;
2070         } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2071                    !cpu_physical_memory_is_dirty(pd)) {
2072             te->addr_write = address | TLB_NOTDIRTY;
2073         } else {
2074             te->addr_write = address;
2075         }
2076     } else {
2077         te->addr_write = -1;
2078     }
2079     return ret;
2080 }
2081 
2082 #else
2083 
tlb_flush(CPUState * env,int flush_global)2084 void tlb_flush(CPUState *env, int flush_global)
2085 {
2086 }
2087 
tlb_flush_page(CPUState * env,target_ulong addr)2088 void tlb_flush_page(CPUState *env, target_ulong addr)
2089 {
2090 }
2091 
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)2092 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2093                       target_phys_addr_t paddr, int prot,
2094                       int mmu_idx, int is_softmmu)
2095 {
2096     return 0;
2097 }
2098 
2099 /*
2100  * Walks guest process memory "regions" one by one
2101  * and calls callback function 'fn' for each region.
2102  */
walk_memory_regions(void * priv,int (* fn)(void *,unsigned long,unsigned long,unsigned long))2103 int walk_memory_regions(void *priv,
2104     int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2105 {
2106     unsigned long start, end;
2107     PageDesc *p = NULL;
2108     int i, j, prot, prot1;
2109     int rc = 0;
2110 
2111     start = end = -1;
2112     prot = 0;
2113 
2114     for (i = 0; i <= L1_SIZE; i++) {
2115         p = (i < L1_SIZE) ? l1_map[i] : NULL;
2116         for (j = 0; j < L2_SIZE; j++) {
2117             prot1 = (p == NULL) ? 0 : p[j].flags;
2118             /*
2119              * "region" is one continuous chunk of memory
2120              * that has same protection flags set.
2121              */
2122             if (prot1 != prot) {
2123                 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2124                 if (start != -1) {
2125                     rc = (*fn)(priv, start, end, prot);
2126                     /* callback can stop iteration by returning != 0 */
2127                     if (rc != 0)
2128                         return (rc);
2129                 }
2130                 if (prot1 != 0)
2131                     start = end;
2132                 else
2133                     start = -1;
2134                 prot = prot1;
2135             }
2136             if (p == NULL)
2137                 break;
2138         }
2139     }
2140     return (rc);
2141 }
2142 
dump_region(void * priv,unsigned long start,unsigned long end,unsigned long prot)2143 static int dump_region(void *priv, unsigned long start,
2144     unsigned long end, unsigned long prot)
2145 {
2146     FILE *f = (FILE *)priv;
2147 
2148     (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2149         start, end, end - start,
2150         ((prot & PAGE_READ) ? 'r' : '-'),
2151         ((prot & PAGE_WRITE) ? 'w' : '-'),
2152         ((prot & PAGE_EXEC) ? 'x' : '-'));
2153 
2154     return (0);
2155 }
2156 
2157 /* dump memory mappings */
page_dump(FILE * f)2158 void page_dump(FILE *f)
2159 {
2160     (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2161             "start", "end", "size", "prot");
2162     walk_memory_regions(f, dump_region);
2163 }
2164 
page_get_flags(target_ulong address)2165 int page_get_flags(target_ulong address)
2166 {
2167     PageDesc *p;
2168 
2169     p = page_find(address >> TARGET_PAGE_BITS);
2170     if (!p)
2171         return 0;
2172     return p->flags;
2173 }
2174 
2175 /* modify the flags of a page and invalidate the code if
2176    necessary. The flag PAGE_WRITE_ORG is positioned automatically
2177    depending on PAGE_WRITE */
page_set_flags(target_ulong start,target_ulong end,int flags)2178 void page_set_flags(target_ulong start, target_ulong end, int flags)
2179 {
2180     PageDesc *p;
2181     target_ulong addr;
2182 
2183     /* mmap_lock should already be held.  */
2184     start = start & TARGET_PAGE_MASK;
2185     end = TARGET_PAGE_ALIGN(end);
2186     if (flags & PAGE_WRITE)
2187         flags |= PAGE_WRITE_ORG;
2188     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2189         p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2190         /* We may be called for host regions that are outside guest
2191            address space.  */
2192         if (!p)
2193             return;
2194         /* if the write protection is set, then we invalidate the code
2195            inside */
2196         if (!(p->flags & PAGE_WRITE) &&
2197             (flags & PAGE_WRITE) &&
2198             p->first_tb) {
2199             tb_invalidate_phys_page(addr, 0, NULL);
2200         }
2201         p->flags = flags;
2202     }
2203 }
2204 
page_check_range(target_ulong start,target_ulong len,int flags)2205 int page_check_range(target_ulong start, target_ulong len, int flags)
2206 {
2207     PageDesc *p;
2208     target_ulong end;
2209     target_ulong addr;
2210 
2211     if (start + len < start)
2212         /* we've wrapped around */
2213         return -1;
2214 
2215     end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2216     start = start & TARGET_PAGE_MASK;
2217 
2218     for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2219         p = page_find(addr >> TARGET_PAGE_BITS);
2220         if( !p )
2221             return -1;
2222         if( !(p->flags & PAGE_VALID) )
2223             return -1;
2224 
2225         if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2226             return -1;
2227         if (flags & PAGE_WRITE) {
2228             if (!(p->flags & PAGE_WRITE_ORG))
2229                 return -1;
2230             /* unprotect the page if it was put read-only because it
2231                contains translated code */
2232             if (!(p->flags & PAGE_WRITE)) {
2233                 if (!page_unprotect(addr, 0, NULL))
2234                     return -1;
2235             }
2236             return 0;
2237         }
2238     }
2239     return 0;
2240 }
2241 
2242 /* called from signal handler: invalidate the code and unprotect the
2243    page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2244 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2245 {
2246     unsigned int page_index, prot, pindex;
2247     PageDesc *p, *p1;
2248     target_ulong host_start, host_end, addr;
2249 
2250     /* Technically this isn't safe inside a signal handler.  However we
2251        know this only ever happens in a synchronous SEGV handler, so in
2252        practice it seems to be ok.  */
2253     mmap_lock();
2254 
2255     host_start = address & qemu_host_page_mask;
2256     page_index = host_start >> TARGET_PAGE_BITS;
2257     p1 = page_find(page_index);
2258     if (!p1) {
2259         mmap_unlock();
2260         return 0;
2261     }
2262     host_end = host_start + qemu_host_page_size;
2263     p = p1;
2264     prot = 0;
2265     for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2266         prot |= p->flags;
2267         p++;
2268     }
2269     /* if the page was really writable, then we change its
2270        protection back to writable */
2271     if (prot & PAGE_WRITE_ORG) {
2272         pindex = (address - host_start) >> TARGET_PAGE_BITS;
2273         if (!(p1[pindex].flags & PAGE_WRITE)) {
2274             mprotect((void *)g2h(host_start), qemu_host_page_size,
2275                      (prot & PAGE_BITS) | PAGE_WRITE);
2276             p1[pindex].flags |= PAGE_WRITE;
2277             /* and since the content will be modified, we must invalidate
2278                the corresponding translated code. */
2279             tb_invalidate_phys_page(address, pc, puc);
2280 #ifdef DEBUG_TB_CHECK
2281             tb_invalidate_check(address);
2282 #endif
2283             mmap_unlock();
2284             return 1;
2285         }
2286     }
2287     mmap_unlock();
2288     return 0;
2289 }
2290 
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2291 static inline void tlb_set_dirty(CPUState *env,
2292                                  unsigned long addr, target_ulong vaddr)
2293 {
2294 }
2295 #endif /* defined(CONFIG_USER_ONLY) */
2296 
2297 #if !defined(CONFIG_USER_ONLY)
2298 
2299 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2300                              ram_addr_t memory, ram_addr_t region_offset);
2301 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2302                            ram_addr_t orig_memory, ram_addr_t region_offset);
2303 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2304                       need_subpage)                                     \
2305     do {                                                                \
2306         if (addr > start_addr)                                          \
2307             start_addr2 = 0;                                            \
2308         else {                                                          \
2309             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2310             if (start_addr2 > 0)                                        \
2311                 need_subpage = 1;                                       \
2312         }                                                               \
2313                                                                         \
2314         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2315             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2316         else {                                                          \
2317             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2318             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2319                 need_subpage = 1;                                       \
2320         }                                                               \
2321     } while (0)
2322 
2323 /* register physical memory. 'size' must be a multiple of the target
2324    page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2325    io memory page.  The address used when calling the IO function is
2326    the offset from the start of the region, plus region_offset.  Both
2327    start_addr and region_offset are rounded down to a page boundary
2328    before calculating this offset.  This should not be a problem unless
2329    the low bits of start_addr and region_offset differ.  */
cpu_register_physical_memory_offset(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset)2330 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2331                                          ram_addr_t size,
2332                                          ram_addr_t phys_offset,
2333                                          ram_addr_t region_offset)
2334 {
2335     target_phys_addr_t addr, end_addr;
2336     PhysPageDesc *p;
2337     CPUState *env;
2338     ram_addr_t orig_size = size;
2339     void *subpage;
2340 
2341 #ifdef CONFIG_KQEMU
2342     /* XXX: should not depend on cpu context */
2343     env = first_cpu;
2344     if (env->kqemu_enabled) {
2345         kqemu_set_phys_mem(start_addr, size, phys_offset);
2346     }
2347 #endif
2348     if (kvm_enabled())
2349         kvm_set_phys_mem(start_addr, size, phys_offset);
2350 
2351     if (phys_offset == IO_MEM_UNASSIGNED) {
2352         region_offset = start_addr;
2353     }
2354     region_offset &= TARGET_PAGE_MASK;
2355     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2356     end_addr = start_addr + (target_phys_addr_t)size;
2357     for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2358         p = phys_page_find(addr >> TARGET_PAGE_BITS);
2359         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2360             ram_addr_t orig_memory = p->phys_offset;
2361             target_phys_addr_t start_addr2, end_addr2;
2362             int need_subpage = 0;
2363 
2364             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2365                           need_subpage);
2366             if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2367                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2368                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2369                                            &p->phys_offset, orig_memory,
2370                                            p->region_offset);
2371                 } else {
2372                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2373                                             >> IO_MEM_SHIFT];
2374                 }
2375                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2376                                  region_offset);
2377                 p->region_offset = 0;
2378             } else {
2379                 p->phys_offset = phys_offset;
2380                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2381                     (phys_offset & IO_MEM_ROMD))
2382                     phys_offset += TARGET_PAGE_SIZE;
2383             }
2384         } else {
2385             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2386             p->phys_offset = phys_offset;
2387             p->region_offset = region_offset;
2388             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2389                 (phys_offset & IO_MEM_ROMD)) {
2390                 phys_offset += TARGET_PAGE_SIZE;
2391             } else {
2392                 target_phys_addr_t start_addr2, end_addr2;
2393                 int need_subpage = 0;
2394 
2395                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2396                               end_addr2, need_subpage);
2397 
2398                 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2399                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
2400                                            &p->phys_offset, IO_MEM_UNASSIGNED,
2401                                            addr & TARGET_PAGE_MASK);
2402                     subpage_register(subpage, start_addr2, end_addr2,
2403                                      phys_offset, region_offset);
2404                     p->region_offset = 0;
2405                 }
2406             }
2407         }
2408         region_offset += TARGET_PAGE_SIZE;
2409     }
2410 
2411     /* since each CPU stores ram addresses in its TLB cache, we must
2412        reset the modified entries */
2413     /* XXX: slow ! */
2414     for(env = first_cpu; env != NULL; env = env->next_cpu) {
2415         tlb_flush(env, 1);
2416     }
2417 }
2418 
2419 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2420 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2421 {
2422     PhysPageDesc *p;
2423 
2424     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2425     if (!p)
2426         return IO_MEM_UNASSIGNED;
2427     return p->phys_offset;
2428 }
2429 
qemu_register_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2430 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2431 {
2432     if (kvm_enabled())
2433         kvm_coalesce_mmio_region(addr, size);
2434 }
2435 
qemu_unregister_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2436 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2437 {
2438     if (kvm_enabled())
2439         kvm_uncoalesce_mmio_region(addr, size);
2440 }
2441 
2442 #ifdef CONFIG_KQEMU
2443 /* XXX: better than nothing */
kqemu_ram_alloc(ram_addr_t size)2444 static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2445 {
2446     ram_addr_t addr;
2447     if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2448         fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2449                 (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2450         abort();
2451     }
2452     addr = last_ram_offset;
2453     last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2454     return addr;
2455 }
2456 #endif
2457 
qemu_ram_alloc(ram_addr_t size)2458 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2459 {
2460     RAMBlock *new_block;
2461 
2462 #ifdef CONFIG_KQEMU
2463     if (kqemu_phys_ram_base) {
2464         return kqemu_ram_alloc(size);
2465     }
2466 #endif
2467 
2468     size = TARGET_PAGE_ALIGN(size);
2469     new_block = qemu_malloc(sizeof(*new_block));
2470 
2471     new_block->host = qemu_vmalloc(size);
2472     new_block->offset = last_ram_offset;
2473     new_block->length = size;
2474 
2475     new_block->next = ram_blocks;
2476     ram_blocks = new_block;
2477 
2478     phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2479         (last_ram_offset + size) >> TARGET_PAGE_BITS);
2480     memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2481            0xff, size >> TARGET_PAGE_BITS);
2482 
2483     last_ram_offset += size;
2484 
2485     if (kvm_enabled())
2486         kvm_setup_guest_memory(new_block->host, size);
2487 
2488     return new_block->offset;
2489 }
2490 
qemu_ram_free(ram_addr_t addr)2491 void qemu_ram_free(ram_addr_t addr)
2492 {
2493     /* TODO: implement this.  */
2494 }
2495 
2496 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2497    With the exception of the softmmu code in this file, this should
2498    only be used for local memory (e.g. video ram) that the device owns,
2499    and knows it isn't going to access beyond the end of the block.
2500 
2501    It should not be used for general purpose DMA.
2502    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2503  */
qemu_get_ram_ptr(ram_addr_t addr)2504 void *qemu_get_ram_ptr(ram_addr_t addr)
2505 {
2506     RAMBlock *prev;
2507     RAMBlock **prevp;
2508     RAMBlock *block;
2509 
2510 #ifdef CONFIG_KQEMU
2511     if (kqemu_phys_ram_base) {
2512         return kqemu_phys_ram_base + addr;
2513     }
2514 #endif
2515 
2516     prev = NULL;
2517     prevp = &ram_blocks;
2518     block = ram_blocks;
2519     while (block && (block->offset > addr
2520                      || block->offset + block->length <= addr)) {
2521         if (prev)
2522           prevp = &prev->next;
2523         prev = block;
2524         block = block->next;
2525     }
2526     if (!block) {
2527         fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2528         abort();
2529     }
2530     /* Move this entry to to start of the list.  */
2531     if (prev) {
2532         prev->next = block->next;
2533         block->next = *prevp;
2534         *prevp = block;
2535     }
2536     return block->host + (addr - block->offset);
2537 }
2538 
2539 /* Some of the softmmu routines need to translate from a host pointer
2540    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host(void * ptr)2541 ram_addr_t qemu_ram_addr_from_host(void *ptr)
2542 {
2543     RAMBlock *prev;
2544     RAMBlock **prevp;
2545     RAMBlock *block;
2546     uint8_t *host = ptr;
2547 
2548 #ifdef CONFIG_KQEMU
2549     if (kqemu_phys_ram_base) {
2550         return host - kqemu_phys_ram_base;
2551     }
2552 #endif
2553 
2554     prev = NULL;
2555     prevp = &ram_blocks;
2556     block = ram_blocks;
2557     while (block && (block->host > host
2558                      || block->host + block->length <= host)) {
2559         if (prev)
2560           prevp = &prev->next;
2561         prev = block;
2562         block = block->next;
2563     }
2564     if (!block) {
2565         fprintf(stderr, "Bad ram pointer %p\n", ptr);
2566         abort();
2567     }
2568     return block->offset + (host - block->host);
2569 }
2570 
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2571 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2572 {
2573 #ifdef DEBUG_UNASSIGNED
2574     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2575 #endif
2576 #if defined(TARGET_SPARC)
2577     do_unassigned_access(addr, 0, 0, 0, 1);
2578 #endif
2579     return 0;
2580 }
2581 
unassigned_mem_readw(void * opaque,target_phys_addr_t addr)2582 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2583 {
2584 #ifdef DEBUG_UNASSIGNED
2585     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2586 #endif
2587 #if defined(TARGET_SPARC)
2588     do_unassigned_access(addr, 0, 0, 0, 2);
2589 #endif
2590     return 0;
2591 }
2592 
unassigned_mem_readl(void * opaque,target_phys_addr_t addr)2593 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2594 {
2595 #ifdef DEBUG_UNASSIGNED
2596     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2597 #endif
2598 #if defined(TARGET_SPARC)
2599     do_unassigned_access(addr, 0, 0, 0, 4);
2600 #endif
2601     return 0;
2602 }
2603 
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2604 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2605 {
2606 #ifdef DEBUG_UNASSIGNED
2607     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2608 #endif
2609 #if defined(TARGET_SPARC)
2610     do_unassigned_access(addr, 1, 0, 0, 1);
2611 #endif
2612 }
2613 
unassigned_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2614 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2615 {
2616 #ifdef DEBUG_UNASSIGNED
2617     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2618 #endif
2619 #if defined(TARGET_SPARC)
2620     do_unassigned_access(addr, 1, 0, 0, 2);
2621 #endif
2622 }
2623 
unassigned_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2624 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2625 {
2626 #ifdef DEBUG_UNASSIGNED
2627     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2628 #endif
2629 #if defined(TARGET_SPARC)
2630     do_unassigned_access(addr, 1, 0, 0, 4);
2631 #endif
2632 }
2633 
2634 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2635     unassigned_mem_readb,
2636     unassigned_mem_readw,
2637     unassigned_mem_readl,
2638 };
2639 
2640 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2641     unassigned_mem_writeb,
2642     unassigned_mem_writew,
2643     unassigned_mem_writel,
2644 };
2645 
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2646 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2647                                 uint32_t val)
2648 {
2649     int dirty_flags;
2650     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2651     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2652 #if !defined(CONFIG_USER_ONLY)
2653         tb_invalidate_phys_page_fast(ram_addr, 1);
2654         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2655 #endif
2656     }
2657     stb_p(qemu_get_ram_ptr(ram_addr), val);
2658 #ifdef CONFIG_KQEMU
2659     if (cpu_single_env->kqemu_enabled &&
2660         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2661         kqemu_modify_page(cpu_single_env, ram_addr);
2662 #endif
2663     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2664     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2665     /* we remove the notdirty callback only if the code has been
2666        flushed */
2667     if (dirty_flags == 0xff)
2668         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2669 }
2670 
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2671 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2672                                 uint32_t val)
2673 {
2674     int dirty_flags;
2675     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2676     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2677 #if !defined(CONFIG_USER_ONLY)
2678         tb_invalidate_phys_page_fast(ram_addr, 2);
2679         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2680 #endif
2681     }
2682     stw_p(qemu_get_ram_ptr(ram_addr), val);
2683 #ifdef CONFIG_KQEMU
2684     if (cpu_single_env->kqemu_enabled &&
2685         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2686         kqemu_modify_page(cpu_single_env, ram_addr);
2687 #endif
2688     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2689     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2690     /* we remove the notdirty callback only if the code has been
2691        flushed */
2692     if (dirty_flags == 0xff)
2693         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2694 }
2695 
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2696 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2697                                 uint32_t val)
2698 {
2699     int dirty_flags;
2700     dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2701     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2702 #if !defined(CONFIG_USER_ONLY)
2703         tb_invalidate_phys_page_fast(ram_addr, 4);
2704         dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2705 #endif
2706     }
2707     stl_p(qemu_get_ram_ptr(ram_addr), val);
2708 #ifdef CONFIG_KQEMU
2709     if (cpu_single_env->kqemu_enabled &&
2710         (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2711         kqemu_modify_page(cpu_single_env, ram_addr);
2712 #endif
2713     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2714     phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2715     /* we remove the notdirty callback only if the code has been
2716        flushed */
2717     if (dirty_flags == 0xff)
2718         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2719 }
2720 
2721 static CPUReadMemoryFunc *error_mem_read[3] = {
2722     NULL, /* never used */
2723     NULL, /* never used */
2724     NULL, /* never used */
2725 };
2726 
2727 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2728     notdirty_mem_writeb,
2729     notdirty_mem_writew,
2730     notdirty_mem_writel,
2731 };
2732 
2733 /* Generate a debug exception if a watchpoint has been hit.  */
check_watchpoint(int offset,int len_mask,int flags)2734 static void check_watchpoint(int offset, int len_mask, int flags)
2735 {
2736     CPUState *env = cpu_single_env;
2737     target_ulong pc, cs_base;
2738     TranslationBlock *tb;
2739     target_ulong vaddr;
2740     CPUWatchpoint *wp;
2741     int cpu_flags;
2742 
2743     if (env->watchpoint_hit) {
2744         /* We re-entered the check after replacing the TB. Now raise
2745          * the debug interrupt so that is will trigger after the
2746          * current instruction. */
2747         cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2748         return;
2749     }
2750     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2751     TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2752         if ((vaddr == (wp->vaddr & len_mask) ||
2753              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2754             wp->flags |= BP_WATCHPOINT_HIT;
2755             if (!env->watchpoint_hit) {
2756                 env->watchpoint_hit = wp;
2757                 tb = tb_find_pc(env->mem_io_pc);
2758                 if (!tb) {
2759                     cpu_abort(env, "check_watchpoint: could not find TB for "
2760                               "pc=%p", (void *)env->mem_io_pc);
2761                 }
2762                 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2763                 tb_phys_invalidate(tb, -1);
2764                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2765                     env->exception_index = EXCP_DEBUG;
2766                 } else {
2767                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2768                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2769                 }
2770                 cpu_resume_from_signal(env, NULL);
2771             }
2772         } else {
2773             wp->flags &= ~BP_WATCHPOINT_HIT;
2774         }
2775     }
2776 }
2777 
2778 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2779    so these check for a hit then pass through to the normal out-of-line
2780    phys routines.  */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2781 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2782 {
2783     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2784     return ldub_phys(addr);
2785 }
2786 
watch_mem_readw(void * opaque,target_phys_addr_t addr)2787 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2788 {
2789     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2790     return lduw_phys(addr);
2791 }
2792 
watch_mem_readl(void * opaque,target_phys_addr_t addr)2793 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2794 {
2795     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2796     return ldl_phys(addr);
2797 }
2798 
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2799 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2800                              uint32_t val)
2801 {
2802     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2803     stb_phys(addr, val);
2804 }
2805 
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2806 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2807                              uint32_t val)
2808 {
2809     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2810     stw_phys(addr, val);
2811 }
2812 
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2813 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2814                              uint32_t val)
2815 {
2816     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2817     stl_phys(addr, val);
2818 }
2819 
2820 static CPUReadMemoryFunc *watch_mem_read[3] = {
2821     watch_mem_readb,
2822     watch_mem_readw,
2823     watch_mem_readl,
2824 };
2825 
2826 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2827     watch_mem_writeb,
2828     watch_mem_writew,
2829     watch_mem_writel,
2830 };
2831 
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)2832 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2833                                  unsigned int len)
2834 {
2835     uint32_t ret;
2836     unsigned int idx;
2837 
2838     idx = SUBPAGE_IDX(addr);
2839 #if defined(DEBUG_SUBPAGE)
2840     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2841            mmio, len, addr, idx);
2842 #endif
2843     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2844                                        addr + mmio->region_offset[idx][0][len]);
2845 
2846     return ret;
2847 }
2848 
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)2849 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2850                               uint32_t value, unsigned int len)
2851 {
2852     unsigned int idx;
2853 
2854     idx = SUBPAGE_IDX(addr);
2855 #if defined(DEBUG_SUBPAGE)
2856     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2857            mmio, len, addr, idx, value);
2858 #endif
2859     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2860                                   addr + mmio->region_offset[idx][1][len],
2861                                   value);
2862 }
2863 
subpage_readb(void * opaque,target_phys_addr_t addr)2864 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2865 {
2866 #if defined(DEBUG_SUBPAGE)
2867     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2868 #endif
2869 
2870     return subpage_readlen(opaque, addr, 0);
2871 }
2872 
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)2873 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2874                             uint32_t value)
2875 {
2876 #if defined(DEBUG_SUBPAGE)
2877     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2878 #endif
2879     subpage_writelen(opaque, addr, value, 0);
2880 }
2881 
subpage_readw(void * opaque,target_phys_addr_t addr)2882 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2883 {
2884 #if defined(DEBUG_SUBPAGE)
2885     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2886 #endif
2887 
2888     return subpage_readlen(opaque, addr, 1);
2889 }
2890 
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)2891 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2892                             uint32_t value)
2893 {
2894 #if defined(DEBUG_SUBPAGE)
2895     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2896 #endif
2897     subpage_writelen(opaque, addr, value, 1);
2898 }
2899 
subpage_readl(void * opaque,target_phys_addr_t addr)2900 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2901 {
2902 #if defined(DEBUG_SUBPAGE)
2903     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2904 #endif
2905 
2906     return subpage_readlen(opaque, addr, 2);
2907 }
2908 
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)2909 static void subpage_writel (void *opaque,
2910                          target_phys_addr_t addr, uint32_t value)
2911 {
2912 #if defined(DEBUG_SUBPAGE)
2913     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2914 #endif
2915     subpage_writelen(opaque, addr, value, 2);
2916 }
2917 
2918 static CPUReadMemoryFunc *subpage_read[] = {
2919     &subpage_readb,
2920     &subpage_readw,
2921     &subpage_readl,
2922 };
2923 
2924 static CPUWriteMemoryFunc *subpage_write[] = {
2925     &subpage_writeb,
2926     &subpage_writew,
2927     &subpage_writel,
2928 };
2929 
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)2930 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2931                              ram_addr_t memory, ram_addr_t region_offset)
2932 {
2933     int idx, eidx;
2934     unsigned int i;
2935 
2936     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2937         return -1;
2938     idx = SUBPAGE_IDX(start);
2939     eidx = SUBPAGE_IDX(end);
2940 #if defined(DEBUG_SUBPAGE)
2941     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2942            mmio, start, end, idx, eidx, memory);
2943 #endif
2944     memory >>= IO_MEM_SHIFT;
2945     for (; idx <= eidx; idx++) {
2946         for (i = 0; i < 4; i++) {
2947             if (io_mem_read[memory][i]) {
2948                 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2949                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2950                 mmio->region_offset[idx][0][i] = region_offset;
2951             }
2952             if (io_mem_write[memory][i]) {
2953                 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2954                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2955                 mmio->region_offset[idx][1][i] = region_offset;
2956             }
2957         }
2958     }
2959 
2960     return 0;
2961 }
2962 
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)2963 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2964                            ram_addr_t orig_memory, ram_addr_t region_offset)
2965 {
2966     subpage_t *mmio;
2967     int subpage_memory;
2968 
2969     mmio = qemu_mallocz(sizeof(subpage_t));
2970 
2971     mmio->base = base;
2972     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2973 #if defined(DEBUG_SUBPAGE)
2974     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2975            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2976 #endif
2977     *phys = subpage_memory | IO_MEM_SUBPAGE;
2978     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2979                          region_offset);
2980 
2981     return mmio;
2982 }
2983 
get_free_io_mem_idx(void)2984 static int get_free_io_mem_idx(void)
2985 {
2986     int i;
2987 
2988     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2989         if (!io_mem_used[i]) {
2990             io_mem_used[i] = 1;
2991             return i;
2992         }
2993 
2994     return -1;
2995 }
2996 
2997 /* mem_read and mem_write are arrays of functions containing the
2998    function to access byte (index 0), word (index 1) and dword (index
2999    2). Functions can be omitted with a NULL function pointer.
3000    If io_index is non zero, the corresponding io zone is
3001    modified. If it is zero, a new io zone is allocated. The return
3002    value can be used with cpu_register_physical_memory(). (-1) is
3003    returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc ** mem_read,CPUWriteMemoryFunc ** mem_write,void * opaque)3004 static int cpu_register_io_memory_fixed(int io_index,
3005                                         CPUReadMemoryFunc **mem_read,
3006                                         CPUWriteMemoryFunc **mem_write,
3007                                         void *opaque)
3008 {
3009     int i, subwidth = 0;
3010 
3011     if (io_index <= 0) {
3012         io_index = get_free_io_mem_idx();
3013         if (io_index == -1)
3014             return io_index;
3015     } else {
3016         io_index >>= IO_MEM_SHIFT;
3017         if (io_index >= IO_MEM_NB_ENTRIES)
3018             return -1;
3019     }
3020 
3021     for(i = 0;i < 3; i++) {
3022         if (!mem_read[i] || !mem_write[i])
3023             subwidth = IO_MEM_SUBWIDTH;
3024         io_mem_read[io_index][i] = mem_read[i];
3025         io_mem_write[io_index][i] = mem_write[i];
3026     }
3027     io_mem_opaque[io_index] = opaque;
3028     return (io_index << IO_MEM_SHIFT) | subwidth;
3029 }
3030 
cpu_register_io_memory(CPUReadMemoryFunc ** mem_read,CPUWriteMemoryFunc ** mem_write,void * opaque)3031 int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3032                            CPUWriteMemoryFunc **mem_write,
3033                            void *opaque)
3034 {
3035     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3036 }
3037 
cpu_unregister_io_memory(int io_table_address)3038 void cpu_unregister_io_memory(int io_table_address)
3039 {
3040     int i;
3041     int io_index = io_table_address >> IO_MEM_SHIFT;
3042 
3043     for (i=0;i < 3; i++) {
3044         io_mem_read[io_index][i] = unassigned_mem_read[i];
3045         io_mem_write[io_index][i] = unassigned_mem_write[i];
3046     }
3047     io_mem_opaque[io_index] = NULL;
3048     io_mem_used[io_index] = 0;
3049 }
3050 
io_mem_init(void)3051 static void io_mem_init(void)
3052 {
3053     int i;
3054 
3055     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3056     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3057     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3058     for (i=0; i<5; i++)
3059         io_mem_used[i] = 1;
3060 
3061     io_mem_watch = cpu_register_io_memory(watch_mem_read,
3062                                           watch_mem_write, NULL);
3063 #ifdef CONFIG_KQEMU
3064     if (kqemu_phys_ram_base) {
3065         /* alloc dirty bits array */
3066         phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3067         memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3068     }
3069 #endif
3070 }
3071 
3072 #endif /* !defined(CONFIG_USER_ONLY) */
3073 
3074 /* physical memory access (slow version, mainly for debug) */
3075 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3076 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3077                             int len, int is_write)
3078 {
3079     int l, flags;
3080     target_ulong page;
3081     void * p;
3082 
3083     while (len > 0) {
3084         page = addr & TARGET_PAGE_MASK;
3085         l = (page + TARGET_PAGE_SIZE) - addr;
3086         if (l > len)
3087             l = len;
3088         flags = page_get_flags(page);
3089         if (!(flags & PAGE_VALID))
3090             return;
3091         if (is_write) {
3092             if (!(flags & PAGE_WRITE))
3093                 return;
3094             /* XXX: this code should not depend on lock_user */
3095             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3096                 /* FIXME - should this return an error rather than just fail? */
3097                 return;
3098             memcpy(p, buf, l);
3099             unlock_user(p, addr, l);
3100         } else {
3101             if (!(flags & PAGE_READ))
3102                 return;
3103             /* XXX: this code should not depend on lock_user */
3104             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3105                 /* FIXME - should this return an error rather than just fail? */
3106                 return;
3107             memcpy(buf, p, l);
3108             unlock_user(p, addr, 0);
3109         }
3110         len -= l;
3111         buf += l;
3112         addr += l;
3113     }
3114 }
3115 
3116 #else
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3117 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3118                             int len, int is_write)
3119 {
3120     int l, io_index;
3121     uint8_t *ptr;
3122     uint32_t val;
3123     target_phys_addr_t page;
3124     unsigned long pd;
3125     PhysPageDesc *p;
3126 
3127     while (len > 0) {
3128         page = addr & TARGET_PAGE_MASK;
3129         l = (page + TARGET_PAGE_SIZE) - addr;
3130         if (l > len)
3131             l = len;
3132         p = phys_page_find(page >> TARGET_PAGE_BITS);
3133         if (!p) {
3134             pd = IO_MEM_UNASSIGNED;
3135         } else {
3136             pd = p->phys_offset;
3137         }
3138 
3139         if (is_write) {
3140             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3141                 target_phys_addr_t addr1 = addr;
3142                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3143                 if (p)
3144                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3145                 /* XXX: could force cpu_single_env to NULL to avoid
3146                    potential bugs */
3147                 if (l >= 4 && ((addr1 & 3) == 0)) {
3148                     /* 32 bit write access */
3149                     val = ldl_p(buf);
3150                     io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3151                     l = 4;
3152                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3153                     /* 16 bit write access */
3154                     val = lduw_p(buf);
3155                     io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3156                     l = 2;
3157                 } else {
3158                     /* 8 bit write access */
3159                     val = ldub_p(buf);
3160                     io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3161                     l = 1;
3162                 }
3163             } else {
3164                 unsigned long addr1;
3165                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3166                 /* RAM case */
3167                 ptr = qemu_get_ram_ptr(addr1);
3168                 memcpy(ptr, buf, l);
3169                 if (!cpu_physical_memory_is_dirty(addr1)) {
3170                     /* invalidate code */
3171                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3172                     /* set dirty bit */
3173                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3174                         (0xff & ~CODE_DIRTY_FLAG);
3175                 }
3176             }
3177         } else {
3178             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3179                 !(pd & IO_MEM_ROMD)) {
3180                 target_phys_addr_t addr1 = addr;
3181                 /* I/O case */
3182                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3183                 if (p)
3184                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3185                 if (l >= 4 && ((addr1 & 3) == 0)) {
3186                     /* 32 bit read access */
3187                     val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3188                     stl_p(buf, val);
3189                     l = 4;
3190                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3191                     /* 16 bit read access */
3192                     val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3193                     stw_p(buf, val);
3194                     l = 2;
3195                 } else {
3196                     /* 8 bit read access */
3197                     val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3198                     stb_p(buf, val);
3199                     l = 1;
3200                 }
3201             } else {
3202                 /* RAM case */
3203                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3204                     (addr & ~TARGET_PAGE_MASK);
3205                 memcpy(buf, ptr, l);
3206             }
3207         }
3208         len -= l;
3209         buf += l;
3210         addr += l;
3211     }
3212 }
3213 
3214 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)3215 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3216                                    const uint8_t *buf, int len)
3217 {
3218     int l;
3219     uint8_t *ptr;
3220     target_phys_addr_t page;
3221     unsigned long pd;
3222     PhysPageDesc *p;
3223 
3224     while (len > 0) {
3225         page = addr & TARGET_PAGE_MASK;
3226         l = (page + TARGET_PAGE_SIZE) - addr;
3227         if (l > len)
3228             l = len;
3229         p = phys_page_find(page >> TARGET_PAGE_BITS);
3230         if (!p) {
3231             pd = IO_MEM_UNASSIGNED;
3232         } else {
3233             pd = p->phys_offset;
3234         }
3235 
3236         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3237             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3238             !(pd & IO_MEM_ROMD)) {
3239             /* do nothing */
3240         } else {
3241             unsigned long addr1;
3242             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3243             /* ROM/RAM case */
3244             ptr = qemu_get_ram_ptr(addr1);
3245             memcpy(ptr, buf, l);
3246         }
3247         len -= l;
3248         buf += l;
3249         addr += l;
3250     }
3251 }
3252 
3253 typedef struct {
3254     void *buffer;
3255     target_phys_addr_t addr;
3256     target_phys_addr_t len;
3257 } BounceBuffer;
3258 
3259 static BounceBuffer bounce;
3260 
3261 typedef struct MapClient {
3262     void *opaque;
3263     void (*callback)(void *opaque);
3264     LIST_ENTRY(MapClient) link;
3265 } MapClient;
3266 
3267 static LIST_HEAD(map_client_list, MapClient) map_client_list
3268     = LIST_HEAD_INITIALIZER(map_client_list);
3269 
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))3270 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3271 {
3272     MapClient *client = qemu_malloc(sizeof(*client));
3273 
3274     client->opaque = opaque;
3275     client->callback = callback;
3276     LIST_INSERT_HEAD(&map_client_list, client, link);
3277     return client;
3278 }
3279 
cpu_unregister_map_client(void * _client)3280 void cpu_unregister_map_client(void *_client)
3281 {
3282     MapClient *client = (MapClient *)_client;
3283 
3284     LIST_REMOVE(client, link);
3285 }
3286 
cpu_notify_map_clients(void)3287 static void cpu_notify_map_clients(void)
3288 {
3289     MapClient *client;
3290 
3291     while (!LIST_EMPTY(&map_client_list)) {
3292         client = LIST_FIRST(&map_client_list);
3293         client->callback(client->opaque);
3294         LIST_REMOVE(client, link);
3295     }
3296 }
3297 
3298 /* Map a physical memory region into a host virtual address.
3299  * May map a subset of the requested range, given by and returned in *plen.
3300  * May return NULL if resources needed to perform the mapping are exhausted.
3301  * Use only for reads OR writes - not for read-modify-write operations.
3302  * Use cpu_register_map_client() to know when retrying the map operation is
3303  * likely to succeed.
3304  */
cpu_physical_memory_map(target_phys_addr_t addr,target_phys_addr_t * plen,int is_write)3305 void *cpu_physical_memory_map(target_phys_addr_t addr,
3306                               target_phys_addr_t *plen,
3307                               int is_write)
3308 {
3309     target_phys_addr_t len = *plen;
3310     target_phys_addr_t done = 0;
3311     int l;
3312     uint8_t *ret = NULL;
3313     uint8_t *ptr;
3314     target_phys_addr_t page;
3315     unsigned long pd;
3316     PhysPageDesc *p;
3317     unsigned long addr1;
3318 
3319     while (len > 0) {
3320         page = addr & TARGET_PAGE_MASK;
3321         l = (page + TARGET_PAGE_SIZE) - addr;
3322         if (l > len)
3323             l = len;
3324         p = phys_page_find(page >> TARGET_PAGE_BITS);
3325         if (!p) {
3326             pd = IO_MEM_UNASSIGNED;
3327         } else {
3328             pd = p->phys_offset;
3329         }
3330 
3331         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3332             if (done || bounce.buffer) {
3333                 break;
3334             }
3335             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3336             bounce.addr = addr;
3337             bounce.len = l;
3338             if (!is_write) {
3339                 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3340             }
3341             ptr = bounce.buffer;
3342         } else {
3343             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3344             ptr = qemu_get_ram_ptr(addr1);
3345         }
3346         if (!done) {
3347             ret = ptr;
3348         } else if (ret + done != ptr) {
3349             break;
3350         }
3351 
3352         len -= l;
3353         addr += l;
3354         done += l;
3355     }
3356     *plen = done;
3357     return ret;
3358 }
3359 
3360 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3361  * Will also mark the memory as dirty if is_write == 1.  access_len gives
3362  * the amount of memory that was actually read or written by the caller.
3363  */
cpu_physical_memory_unmap(void * buffer,target_phys_addr_t len,int is_write,target_phys_addr_t access_len)3364 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3365                                int is_write, target_phys_addr_t access_len)
3366 {
3367     if (buffer != bounce.buffer) {
3368         if (is_write) {
3369             ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3370             while (access_len) {
3371                 unsigned l;
3372                 l = TARGET_PAGE_SIZE;
3373                 if (l > access_len)
3374                     l = access_len;
3375                 if (!cpu_physical_memory_is_dirty(addr1)) {
3376                     /* invalidate code */
3377                     tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3378                     /* set dirty bit */
3379                     phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3380                         (0xff & ~CODE_DIRTY_FLAG);
3381                 }
3382                 addr1 += l;
3383                 access_len -= l;
3384             }
3385         }
3386         return;
3387     }
3388     if (is_write) {
3389         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3390     }
3391     qemu_free(bounce.buffer);
3392     bounce.buffer = NULL;
3393     cpu_notify_map_clients();
3394 }
3395 
3396 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)3397 uint32_t ldl_phys(target_phys_addr_t addr)
3398 {
3399     int io_index;
3400     uint8_t *ptr;
3401     uint32_t val;
3402     unsigned long pd;
3403     PhysPageDesc *p;
3404 
3405     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3406     if (!p) {
3407         pd = IO_MEM_UNASSIGNED;
3408     } else {
3409         pd = p->phys_offset;
3410     }
3411 
3412     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3413         !(pd & IO_MEM_ROMD)) {
3414         /* I/O case */
3415         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3416         if (p)
3417             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3418         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3419     } else {
3420         /* RAM case */
3421         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3422             (addr & ~TARGET_PAGE_MASK);
3423         val = ldl_p(ptr);
3424     }
3425     return val;
3426 }
3427 
3428 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)3429 uint64_t ldq_phys(target_phys_addr_t addr)
3430 {
3431     int io_index;
3432     uint8_t *ptr;
3433     uint64_t val;
3434     unsigned long pd;
3435     PhysPageDesc *p;
3436 
3437     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438     if (!p) {
3439         pd = IO_MEM_UNASSIGNED;
3440     } else {
3441         pd = p->phys_offset;
3442     }
3443 
3444     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3445         !(pd & IO_MEM_ROMD)) {
3446         /* I/O case */
3447         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3448         if (p)
3449             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3450 #ifdef TARGET_WORDS_BIGENDIAN
3451         val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3452         val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3453 #else
3454         val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3455         val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3456 #endif
3457     } else {
3458         /* RAM case */
3459         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3460             (addr & ~TARGET_PAGE_MASK);
3461         val = ldq_p(ptr);
3462     }
3463     return val;
3464 }
3465 
3466 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)3467 uint32_t ldub_phys(target_phys_addr_t addr)
3468 {
3469     uint8_t val;
3470     cpu_physical_memory_read(addr, &val, 1);
3471     return val;
3472 }
3473 
3474 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)3475 uint32_t lduw_phys(target_phys_addr_t addr)
3476 {
3477     uint16_t val;
3478     cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3479     return tswap16(val);
3480 }
3481 
3482 /* warning: addr must be aligned. The ram page is not masked as dirty
3483    and the code inside is not invalidated. It is useful if the dirty
3484    bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)3485 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3486 {
3487     int io_index;
3488     uint8_t *ptr;
3489     unsigned long pd;
3490     PhysPageDesc *p;
3491 
3492     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3493     if (!p) {
3494         pd = IO_MEM_UNASSIGNED;
3495     } else {
3496         pd = p->phys_offset;
3497     }
3498 
3499     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3500         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3501         if (p)
3502             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3503         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3504     } else {
3505         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3506         ptr = qemu_get_ram_ptr(addr1);
3507         stl_p(ptr, val);
3508 
3509         if (unlikely(in_migration)) {
3510             if (!cpu_physical_memory_is_dirty(addr1)) {
3511                 /* invalidate code */
3512                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3513                 /* set dirty bit */
3514                 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3515                     (0xff & ~CODE_DIRTY_FLAG);
3516             }
3517         }
3518     }
3519 }
3520 
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)3521 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3522 {
3523     int io_index;
3524     uint8_t *ptr;
3525     unsigned long pd;
3526     PhysPageDesc *p;
3527 
3528     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3529     if (!p) {
3530         pd = IO_MEM_UNASSIGNED;
3531     } else {
3532         pd = p->phys_offset;
3533     }
3534 
3535     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3536         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3537         if (p)
3538             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3539 #ifdef TARGET_WORDS_BIGENDIAN
3540         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3541         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3542 #else
3543         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3544         io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3545 #endif
3546     } else {
3547         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3548             (addr & ~TARGET_PAGE_MASK);
3549         stq_p(ptr, val);
3550     }
3551 }
3552 
3553 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)3554 void stl_phys(target_phys_addr_t addr, uint32_t val)
3555 {
3556     int io_index;
3557     uint8_t *ptr;
3558     unsigned long pd;
3559     PhysPageDesc *p;
3560 
3561     p = phys_page_find(addr >> TARGET_PAGE_BITS);
3562     if (!p) {
3563         pd = IO_MEM_UNASSIGNED;
3564     } else {
3565         pd = p->phys_offset;
3566     }
3567 
3568     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3569         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3570         if (p)
3571             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3572         io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3573     } else {
3574         unsigned long addr1;
3575         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3576         /* RAM case */
3577         ptr = qemu_get_ram_ptr(addr1);
3578         stl_p(ptr, val);
3579         if (!cpu_physical_memory_is_dirty(addr1)) {
3580             /* invalidate code */
3581             tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3582             /* set dirty bit */
3583             phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3584                 (0xff & ~CODE_DIRTY_FLAG);
3585         }
3586     }
3587 }
3588 
3589 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3590 void stb_phys(target_phys_addr_t addr, uint32_t val)
3591 {
3592     uint8_t v = val;
3593     cpu_physical_memory_write(addr, &v, 1);
3594 }
3595 
3596 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3597 void stw_phys(target_phys_addr_t addr, uint32_t val)
3598 {
3599     uint16_t v = tswap16(val);
3600     cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3601 }
3602 
3603 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3604 void stq_phys(target_phys_addr_t addr, uint64_t val)
3605 {
3606     val = tswap64(val);
3607     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3608 }
3609 
3610 #endif
3611 
3612 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3613 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3614                         uint8_t *buf, int len, int is_write)
3615 {
3616     int l;
3617     target_phys_addr_t phys_addr;
3618     target_ulong page;
3619 
3620     while (len > 0) {
3621         page = addr & TARGET_PAGE_MASK;
3622         phys_addr = cpu_get_phys_page_debug(env, page);
3623         /* if no physical page mapped, return an error */
3624         if (phys_addr == -1)
3625             return -1;
3626         l = (page + TARGET_PAGE_SIZE) - addr;
3627         if (l > len)
3628             l = len;
3629         phys_addr += (addr & ~TARGET_PAGE_MASK);
3630 #if !defined(CONFIG_USER_ONLY)
3631         if (is_write)
3632             cpu_physical_memory_write_rom(phys_addr, buf, l);
3633         else
3634 #endif
3635             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3636         len -= l;
3637         buf += l;
3638         addr += l;
3639     }
3640     return 0;
3641 }
3642 
3643 /* in deterministic execution mode, instructions doing device I/Os
3644    must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3645 void cpu_io_recompile(CPUState *env, void *retaddr)
3646 {
3647     TranslationBlock *tb;
3648     uint32_t n, cflags;
3649     target_ulong pc, cs_base;
3650     uint64_t flags;
3651 
3652     tb = tb_find_pc((unsigned long)retaddr);
3653     if (!tb) {
3654         cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3655                   retaddr);
3656     }
3657     n = env->icount_decr.u16.low + tb->icount;
3658     cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3659     /* Calculate how many instructions had been executed before the fault
3660        occurred.  */
3661     n = n - env->icount_decr.u16.low;
3662     /* Generate a new TB ending on the I/O insn.  */
3663     n++;
3664     /* On MIPS and SH, delay slot instructions can only be restarted if
3665        they were already the first instruction in the TB.  If this is not
3666        the first instruction in a TB then re-execute the preceding
3667        branch.  */
3668 #if defined(TARGET_MIPS)
3669     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3670         env->active_tc.PC -= 4;
3671         env->icount_decr.u16.low++;
3672         env->hflags &= ~MIPS_HFLAG_BMASK;
3673     }
3674 #elif defined(TARGET_SH4)
3675     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3676             && n > 1) {
3677         env->pc -= 2;
3678         env->icount_decr.u16.low++;
3679         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3680     }
3681 #endif
3682     /* This should never happen.  */
3683     if (n > CF_COUNT_MASK)
3684         cpu_abort(env, "TB too big during recompile");
3685 
3686     cflags = n | CF_LAST_IO;
3687     pc = tb->pc;
3688     cs_base = tb->cs_base;
3689     flags = tb->flags;
3690     tb_phys_invalidate(tb, -1);
3691     /* FIXME: In theory this could raise an exception.  In practice
3692        we have already translated the block once so it's probably ok.  */
3693     tb_gen_code(env, pc, cs_base, flags, cflags);
3694     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3695        the first in the TB) then we end up generating a whole new TB and
3696        repeating the fault, which is horribly inefficient.
3697        Better would be to execute just this insn uncached, or generate a
3698        second new TB.  */
3699     cpu_resume_from_signal(env, NULL);
3700 }
3701 
dump_exec_info(FILE * f,int (* cpu_fprintf)(FILE * f,const char * fmt,...))3702 void dump_exec_info(FILE *f,
3703                     int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3704 {
3705     int i, target_code_size, max_target_code_size;
3706     int direct_jmp_count, direct_jmp2_count, cross_page;
3707     TranslationBlock *tb;
3708 
3709     target_code_size = 0;
3710     max_target_code_size = 0;
3711     cross_page = 0;
3712     direct_jmp_count = 0;
3713     direct_jmp2_count = 0;
3714     for(i = 0; i < nb_tbs; i++) {
3715         tb = &tbs[i];
3716         target_code_size += tb->size;
3717         if (tb->size > max_target_code_size)
3718             max_target_code_size = tb->size;
3719         if (tb->page_addr[1] != -1)
3720             cross_page++;
3721         if (tb->tb_next_offset[0] != 0xffff) {
3722             direct_jmp_count++;
3723             if (tb->tb_next_offset[1] != 0xffff) {
3724                 direct_jmp2_count++;
3725             }
3726         }
3727     }
3728     /* XXX: avoid using doubles ? */
3729     cpu_fprintf(f, "Translation buffer state:\n");
3730     cpu_fprintf(f, "gen code size       %ld/%ld\n",
3731                 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3732     cpu_fprintf(f, "TB count            %d/%d\n",
3733                 nb_tbs, code_gen_max_blocks);
3734     cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3735                 nb_tbs ? target_code_size / nb_tbs : 0,
3736                 max_target_code_size);
3737     cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3738                 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3739                 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3740     cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3741             cross_page,
3742             nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3743     cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3744                 direct_jmp_count,
3745                 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3746                 direct_jmp2_count,
3747                 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3748     cpu_fprintf(f, "\nStatistics:\n");
3749     cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3750     cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3751     cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3752     tcg_dump_info(f, cpu_fprintf);
3753 }
3754 
3755 #if !defined(CONFIG_USER_ONLY)
3756 
3757 #define MMUSUFFIX _cmmu
3758 #define GETPC() NULL
3759 #define env cpu_single_env
3760 #define SOFTMMU_CODE_ACCESS
3761 
3762 #define SHIFT 0
3763 #include "softmmu_template.h"
3764 
3765 #define SHIFT 1
3766 #include "softmmu_template.h"
3767 
3768 #define SHIFT 2
3769 #include "softmmu_template.h"
3770 
3771 #define SHIFT 3
3772 #include "softmmu_template.h"
3773 
3774 #undef env
3775 
3776 #endif
3777