1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #define WIN32_LEAN_AND_MEAN
23 #include <windows.h>
24 #else
25 #include <sys/types.h>
26 #include <sys/mman.h>
27 #endif
28 #include <stdlib.h>
29 #include <stdio.h>
30 #include <stdarg.h>
31 #include <string.h>
32 #include <errno.h>
33 #include <unistd.h>
34 #include <inttypes.h>
35
36 #include "cpu.h"
37 #include "exec-all.h"
38 #include "qemu-common.h"
39 #include "tcg.h"
40 #include "hw/hw.h"
41 #if defined(CONFIG_USER_ONLY)
42 #include <qemu.h>
43 #endif
44
45 //#define DEBUG_TB_INVALIDATE
46 //#define DEBUG_FLUSH
47 //#define DEBUG_TLB
48 //#define DEBUG_UNASSIGNED
49
50 /* make various TB consistency checks */
51 //#define DEBUG_TB_CHECK
52 //#define DEBUG_TLB_CHECK
53
54 //#define DEBUG_IOPORT
55 //#define DEBUG_SUBPAGE
56
57 #if !defined(CONFIG_USER_ONLY)
58 /* TB consistency checks only implemented for usermode emulation. */
59 #undef DEBUG_TB_CHECK
60 #endif
61
62 #define SMC_BITMAP_USE_THRESHOLD 10
63
64 #define MMAP_AREA_START 0x00000000
65 #define MMAP_AREA_END 0xa8000000
66
67 #if defined(TARGET_SPARC64)
68 #define TARGET_PHYS_ADDR_SPACE_BITS 41
69 #elif defined(TARGET_SPARC)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 36
71 #elif defined(TARGET_ALPHA)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 42
73 #define TARGET_VIRT_ADDR_SPACE_BITS 42
74 #elif defined(TARGET_PPC64)
75 #define TARGET_PHYS_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_I386) && !defined(USE_KQEMU)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 36
80 #else
81 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82 #define TARGET_PHYS_ADDR_SPACE_BITS 32
83 #endif
84
85 TranslationBlock *tbs;
86 int code_gen_max_blocks;
87 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88 int nb_tbs;
89 /* any access to the tbs or the page table must use this lock */
90 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91
92 #if defined(__arm__) || defined(__sparc_v9__)
93 /* The prologue must be reachable with a direct jump. ARM and Sparc64
94 have limited branch ranges (possibly also PPC) so place it in a
95 section close to code segment. */
96 #define code_gen_section \
97 __attribute__((__section__(".gen_code"))) \
98 __attribute__((aligned (32)))
99 #else
100 #define code_gen_section \
101 __attribute__((aligned (32)))
102 #endif
103
104 uint8_t code_gen_prologue[1024] code_gen_section;
105 uint8_t *code_gen_buffer;
106 unsigned long code_gen_buffer_size;
107 /* threshold to flush the translated code buffer */
108 unsigned long code_gen_buffer_max_size;
109 uint8_t *code_gen_ptr;
110
111 #if !defined(CONFIG_USER_ONLY)
112 ram_addr_t phys_ram_size;
113 int phys_ram_fd;
114 uint8_t *phys_ram_base;
115 uint8_t *phys_ram_dirty;
116 static ram_addr_t phys_ram_alloc_offset = 0;
117 #endif
118
119 CPUState *first_cpu;
120 /* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
122 CPUState *cpu_single_env;
123 /* 0 = Do not count executed instructions.
124 1 = Precise instruction counting.
125 2 = Adaptive rate instruction counting. */
126 int use_icount = 0;
127 /* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129 int64_t qemu_icount;
130
131 typedef struct PageDesc {
132 /* list of TBs intersecting this ram page */
133 TranslationBlock *first_tb;
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138 #if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140 #endif
141 } PageDesc;
142
143 typedef struct PhysPageDesc {
144 /* offset in host memory of the page + io_index in the low bits */
145 ram_addr_t phys_offset;
146 } PhysPageDesc;
147
148 #define L2_BITS 10
149 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150 /* XXX: this is a temporary hack for alpha target.
151 * In the future, this is to be replaced by a multi-level table
152 * to actually be able to handle the complete 64 bits address space.
153 */
154 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155 #else
156 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157 #endif
158
159 #define L1_SIZE (1 << L1_BITS)
160 #define L2_SIZE (1 << L2_BITS)
161
162 unsigned long qemu_real_host_page_size;
163 unsigned long qemu_host_page_bits;
164 unsigned long qemu_host_page_size;
165 unsigned long qemu_host_page_mask;
166
167 /* XXX: for system emulation, it could just be an array */
168 static PageDesc *l1_map[L1_SIZE];
169 PhysPageDesc **l1_phys_map;
170
171 #if !defined(CONFIG_USER_ONLY)
172 static void io_mem_init(void);
173
174 /* io memory support */
175 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178 static int io_mem_nb;
179 static int io_mem_watch;
180 #endif
181
182 /* log support */
183 const char *logfilename = "/tmp/qemu.log";
184 FILE *logfile;
185 int loglevel;
186 static int log_append = 0;
187
188 /* statistics */
189 static int tlb_flush_count;
190 static int tb_flush_count;
191 static int tb_phys_invalidate_count;
192
193 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194 typedef struct subpage_t {
195 target_phys_addr_t base;
196 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198 void *opaque[TARGET_PAGE_SIZE][2][4];
199 } subpage_t;
200
201 #ifdef _WIN32
map_exec(void * addr,long size)202 static void map_exec(void *addr, long size)
203 {
204 DWORD old_protect;
205 VirtualProtect(addr, size,
206 PAGE_EXECUTE_READWRITE, &old_protect);
207
208 }
209 #else
map_exec(void * addr,long size)210 static void map_exec(void *addr, long size)
211 {
212 unsigned long start, end, page_size;
213
214 page_size = getpagesize();
215 start = (unsigned long)addr;
216 start &= ~(page_size - 1);
217
218 end = (unsigned long)addr + size;
219 end += page_size - 1;
220 end &= ~(page_size - 1);
221
222 mprotect((void *)start, end - start,
223 PROT_READ | PROT_WRITE | PROT_EXEC);
224 }
225 #endif
226
page_init(void)227 static void page_init(void)
228 {
229 /* NOTE: we can always suppose that qemu_host_page_size >=
230 TARGET_PAGE_SIZE */
231 #ifdef _WIN32
232 {
233 SYSTEM_INFO system_info;
234 DWORD old_protect;
235
236 GetSystemInfo(&system_info);
237 qemu_real_host_page_size = system_info.dwPageSize;
238 }
239 #else
240 qemu_real_host_page_size = getpagesize();
241 #endif
242 if (qemu_host_page_size == 0)
243 qemu_host_page_size = qemu_real_host_page_size;
244 if (qemu_host_page_size < TARGET_PAGE_SIZE)
245 qemu_host_page_size = TARGET_PAGE_SIZE;
246 qemu_host_page_bits = 0;
247 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248 qemu_host_page_bits++;
249 qemu_host_page_mask = ~(qemu_host_page_size - 1);
250 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252
253 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254 {
255 long long startaddr, endaddr;
256 FILE *f;
257 int n;
258
259 mmap_lock();
260 last_brk = (unsigned long)sbrk(0);
261 f = fopen("/proc/self/maps", "r");
262 if (f) {
263 do {
264 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265 if (n == 2) {
266 startaddr = MIN(startaddr,
267 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268 endaddr = MIN(endaddr,
269 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270 page_set_flags(startaddr & TARGET_PAGE_MASK,
271 TARGET_PAGE_ALIGN(endaddr),
272 PAGE_RESERVED);
273 }
274 } while (!feof(f));
275 fclose(f);
276 }
277 mmap_unlock();
278 }
279 #endif
280 }
281
page_l1_map(target_ulong index)282 static inline PageDesc **page_l1_map(target_ulong index)
283 {
284 #if TARGET_LONG_BITS > 32
285 /* Host memory outside guest VM. For 32-bit targets we have already
286 excluded high addresses. */
287 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288 return NULL;
289 #endif
290 return &l1_map[index >> L2_BITS];
291 }
292
page_find_alloc(target_ulong index)293 static inline PageDesc *page_find_alloc(target_ulong index)
294 {
295 PageDesc **lp, *p;
296 lp = page_l1_map(index);
297 if (!lp)
298 return NULL;
299
300 p = *lp;
301 if (!p) {
302 /* allocate if not found */
303 #if defined(CONFIG_USER_ONLY)
304 unsigned long addr;
305 size_t len = sizeof(PageDesc) * L2_SIZE;
306 /* Don't use qemu_malloc because it may recurse. */
307 p = mmap(0, len, PROT_READ | PROT_WRITE,
308 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
309 *lp = p;
310 addr = h2g(p);
311 if (addr == (target_ulong)addr) {
312 page_set_flags(addr & TARGET_PAGE_MASK,
313 TARGET_PAGE_ALIGN(addr + len),
314 PAGE_RESERVED);
315 }
316 #else
317 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318 *lp = p;
319 #endif
320 }
321 return p + (index & (L2_SIZE - 1));
322 }
323
page_find(target_ulong index)324 static inline PageDesc *page_find(target_ulong index)
325 {
326 PageDesc **lp, *p;
327 lp = page_l1_map(index);
328 if (!lp)
329 return NULL;
330
331 p = *lp;
332 if (!p)
333 return 0;
334 return p + (index & (L2_SIZE - 1));
335 }
336
phys_page_find_alloc(target_phys_addr_t index,int alloc)337 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
338 {
339 void **lp, **p;
340 PhysPageDesc *pd;
341
342 p = (void **)l1_phys_map;
343 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
344
345 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347 #endif
348 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
349 p = *lp;
350 if (!p) {
351 /* allocate if not found */
352 if (!alloc)
353 return NULL;
354 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355 memset(p, 0, sizeof(void *) * L1_SIZE);
356 *lp = p;
357 }
358 #endif
359 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
360 pd = *lp;
361 if (!pd) {
362 int i;
363 /* allocate if not found */
364 if (!alloc)
365 return NULL;
366 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367 *lp = pd;
368 for (i = 0; i < L2_SIZE; i++)
369 pd[i].phys_offset = IO_MEM_UNASSIGNED;
370 }
371 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
372 }
373
phys_page_find(target_phys_addr_t index)374 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
375 {
376 return phys_page_find_alloc(index, 0);
377 }
378
379 #if !defined(CONFIG_USER_ONLY)
380 static void tlb_protect_code(ram_addr_t ram_addr);
381 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
382 target_ulong vaddr);
383 #define mmap_lock() do { } while(0)
384 #define mmap_unlock() do { } while(0)
385 #endif
386
387 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
388
389 #if defined(CONFIG_USER_ONLY)
390 /* Currently it is not recommanded to allocate big chunks of data in
391 user mode. It will change when a dedicated libc will be used */
392 #define USE_STATIC_CODE_GEN_BUFFER
393 #endif
394
395 #ifdef USE_STATIC_CODE_GEN_BUFFER
396 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
397 #endif
398
code_gen_alloc(unsigned long tb_size)399 static void code_gen_alloc(unsigned long tb_size)
400 {
401 #ifdef USE_STATIC_CODE_GEN_BUFFER
402 code_gen_buffer = static_code_gen_buffer;
403 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
404 map_exec(code_gen_buffer, code_gen_buffer_size);
405 #else
406 code_gen_buffer_size = tb_size;
407 if (code_gen_buffer_size == 0) {
408 #if defined(CONFIG_USER_ONLY)
409 /* in user mode, phys_ram_size is not meaningful */
410 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
411 #else
412 /* XXX: needs ajustments */
413 code_gen_buffer_size = (int)(phys_ram_size / 4);
414 #endif
415 }
416 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
417 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
418 /* The code gen buffer location may have constraints depending on
419 the host cpu and OS */
420 #if defined(__linux__)
421 {
422 int flags;
423 void *start = NULL;
424
425 flags = MAP_PRIVATE | MAP_ANONYMOUS;
426 #if defined(__x86_64__)
427 flags |= MAP_32BIT;
428 /* Cannot map more than that */
429 if (code_gen_buffer_size > (800 * 1024 * 1024))
430 code_gen_buffer_size = (800 * 1024 * 1024);
431 #elif defined(__sparc_v9__)
432 // Map the buffer below 2G, so we can use direct calls and branches
433 flags |= MAP_FIXED;
434 start = (void *) 0x60000000UL;
435 if (code_gen_buffer_size > (512 * 1024 * 1024))
436 code_gen_buffer_size = (512 * 1024 * 1024);
437 #endif
438 code_gen_buffer = mmap(start, code_gen_buffer_size,
439 PROT_WRITE | PROT_READ | PROT_EXEC,
440 flags, -1, 0);
441 if (code_gen_buffer == MAP_FAILED) {
442 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
443 exit(1);
444 }
445 }
446 #else
447 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
448 if (!code_gen_buffer) {
449 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450 exit(1);
451 }
452 map_exec(code_gen_buffer, code_gen_buffer_size);
453 #endif
454 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
455 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
456 code_gen_buffer_max_size = code_gen_buffer_size -
457 code_gen_max_block_size();
458 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
459 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
460 }
461
462 /* Must be called before using the QEMU cpus. 'tb_size' is the size
463 (in bytes) allocated to the translation buffer. Zero means default
464 size. */
cpu_exec_init_all(unsigned long tb_size)465 void cpu_exec_init_all(unsigned long tb_size)
466 {
467 cpu_gen_init();
468 code_gen_alloc(tb_size);
469 code_gen_ptr = code_gen_buffer;
470 page_init();
471 #if !defined(CONFIG_USER_ONLY)
472 io_mem_init();
473 #endif
474 }
475
476 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
477
478 #define CPU_COMMON_SAVE_VERSION 1
479
cpu_common_save(QEMUFile * f,void * opaque)480 static void cpu_common_save(QEMUFile *f, void *opaque)
481 {
482 CPUState *env = opaque;
483
484 qemu_put_be32s(f, &env->halted);
485 qemu_put_be32s(f, &env->interrupt_request);
486 }
487
cpu_common_load(QEMUFile * f,void * opaque,int version_id)488 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
489 {
490 CPUState *env = opaque;
491
492 if (version_id != CPU_COMMON_SAVE_VERSION)
493 return -EINVAL;
494
495 qemu_get_be32s(f, &env->halted);
496 qemu_get_be32s(f, &env->interrupt_request);
497 tlb_flush(env, 1);
498
499 return 0;
500 }
501 #endif
502
cpu_exec_init(CPUState * env)503 void cpu_exec_init(CPUState *env)
504 {
505 CPUState **penv;
506 int cpu_index;
507
508 env->next_cpu = NULL;
509 penv = &first_cpu;
510 cpu_index = 0;
511 while (*penv != NULL) {
512 penv = (CPUState **)&(*penv)->next_cpu;
513 cpu_index++;
514 }
515 env->cpu_index = cpu_index;
516 env->nb_watchpoints = 0;
517 *penv = env;
518 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
520 cpu_common_save, cpu_common_load, env);
521 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
522 cpu_save, cpu_load, env);
523 #endif
524 }
525
invalidate_page_bitmap(PageDesc * p)526 static inline void invalidate_page_bitmap(PageDesc *p)
527 {
528 if (p->code_bitmap) {
529 qemu_free(p->code_bitmap);
530 p->code_bitmap = NULL;
531 }
532 p->code_write_count = 0;
533 }
534
535 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)536 static void page_flush_tb(void)
537 {
538 int i, j;
539 PageDesc *p;
540
541 for(i = 0; i < L1_SIZE; i++) {
542 p = l1_map[i];
543 if (p) {
544 for(j = 0; j < L2_SIZE; j++) {
545 p->first_tb = NULL;
546 invalidate_page_bitmap(p);
547 p++;
548 }
549 }
550 }
551 }
552
553 /* flush all the translation blocks */
554 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)555 void tb_flush(CPUState *env1)
556 {
557 CPUState *env;
558 #if defined(DEBUG_FLUSH)
559 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
560 (unsigned long)(code_gen_ptr - code_gen_buffer),
561 nb_tbs, nb_tbs > 0 ?
562 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
563 #endif
564 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
565 cpu_abort(env1, "Internal error: code buffer overflow\n");
566
567 nb_tbs = 0;
568
569 for(env = first_cpu; env != NULL; env = env->next_cpu) {
570 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
571 }
572
573 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
574 page_flush_tb();
575
576 code_gen_ptr = code_gen_buffer;
577 /* XXX: flush processor icache at this point if cache flush is
578 expensive */
579 tb_flush_count++;
580 }
581
582 #ifdef DEBUG_TB_CHECK
583
tb_invalidate_check(target_ulong address)584 static void tb_invalidate_check(target_ulong address)
585 {
586 TranslationBlock *tb;
587 int i;
588 address &= TARGET_PAGE_MASK;
589 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
590 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
591 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
592 address >= tb->pc + tb->size)) {
593 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
594 address, (long)tb->pc, tb->size);
595 }
596 }
597 }
598 }
599
600 /* verify that all the pages have correct rights for code */
tb_page_check(void)601 static void tb_page_check(void)
602 {
603 TranslationBlock *tb;
604 int i, flags1, flags2;
605
606 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
607 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
608 flags1 = page_get_flags(tb->pc);
609 flags2 = page_get_flags(tb->pc + tb->size - 1);
610 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
611 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
612 (long)tb->pc, tb->size, flags1, flags2);
613 }
614 }
615 }
616 }
617
tb_jmp_check(TranslationBlock * tb)618 void tb_jmp_check(TranslationBlock *tb)
619 {
620 TranslationBlock *tb1;
621 unsigned int n1;
622
623 /* suppress any remaining jumps to this TB */
624 tb1 = tb->jmp_first;
625 for(;;) {
626 n1 = (long)tb1 & 3;
627 tb1 = (TranslationBlock *)((long)tb1 & ~3);
628 if (n1 == 2)
629 break;
630 tb1 = tb1->jmp_next[n1];
631 }
632 /* check end of list */
633 if (tb1 != tb) {
634 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
635 }
636 }
637
638 #endif
639
640 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)641 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
642 int next_offset)
643 {
644 TranslationBlock *tb1;
645 for(;;) {
646 tb1 = *ptb;
647 if (tb1 == tb) {
648 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
649 break;
650 }
651 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
652 }
653 }
654
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)655 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
656 {
657 TranslationBlock *tb1;
658 unsigned int n1;
659
660 for(;;) {
661 tb1 = *ptb;
662 n1 = (long)tb1 & 3;
663 tb1 = (TranslationBlock *)((long)tb1 & ~3);
664 if (tb1 == tb) {
665 *ptb = tb1->page_next[n1];
666 break;
667 }
668 ptb = &tb1->page_next[n1];
669 }
670 }
671
tb_jmp_remove(TranslationBlock * tb,int n)672 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
673 {
674 TranslationBlock *tb1, **ptb;
675 unsigned int n1;
676
677 ptb = &tb->jmp_next[n];
678 tb1 = *ptb;
679 if (tb1) {
680 /* find tb(n) in circular list */
681 for(;;) {
682 tb1 = *ptb;
683 n1 = (long)tb1 & 3;
684 tb1 = (TranslationBlock *)((long)tb1 & ~3);
685 if (n1 == n && tb1 == tb)
686 break;
687 if (n1 == 2) {
688 ptb = &tb1->jmp_first;
689 } else {
690 ptb = &tb1->jmp_next[n1];
691 }
692 }
693 /* now we can suppress tb(n) from the list */
694 *ptb = tb->jmp_next[n];
695
696 tb->jmp_next[n] = NULL;
697 }
698 }
699
700 /* reset the jump entry 'n' of a TB so that it is not chained to
701 another TB */
tb_reset_jump(TranslationBlock * tb,int n)702 static inline void tb_reset_jump(TranslationBlock *tb, int n)
703 {
704 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
705 }
706
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)707 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
708 {
709 CPUState *env;
710 PageDesc *p;
711 unsigned int h, n1;
712 target_phys_addr_t phys_pc;
713 TranslationBlock *tb1, *tb2;
714
715 /* remove the TB from the hash list */
716 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
717 h = tb_phys_hash_func(phys_pc);
718 tb_remove(&tb_phys_hash[h], tb,
719 offsetof(TranslationBlock, phys_hash_next));
720
721 /* remove the TB from the page list */
722 if (tb->page_addr[0] != page_addr) {
723 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
724 tb_page_remove(&p->first_tb, tb);
725 invalidate_page_bitmap(p);
726 }
727 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
728 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
729 tb_page_remove(&p->first_tb, tb);
730 invalidate_page_bitmap(p);
731 }
732
733 tb_invalidated_flag = 1;
734
735 /* remove the TB from the hash list */
736 h = tb_jmp_cache_hash_func(tb->pc);
737 for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 if (env->tb_jmp_cache[h] == tb)
739 env->tb_jmp_cache[h] = NULL;
740 }
741
742 /* suppress this TB from the two jump lists */
743 tb_jmp_remove(tb, 0);
744 tb_jmp_remove(tb, 1);
745
746 /* suppress any remaining jumps to this TB */
747 tb1 = tb->jmp_first;
748 for(;;) {
749 n1 = (long)tb1 & 3;
750 if (n1 == 2)
751 break;
752 tb1 = (TranslationBlock *)((long)tb1 & ~3);
753 tb2 = tb1->jmp_next[n1];
754 tb_reset_jump(tb1, n1);
755 tb1->jmp_next[n1] = NULL;
756 tb1 = tb2;
757 }
758 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
759
760 tb_phys_invalidate_count++;
761 }
762
set_bits(uint8_t * tab,int start,int len)763 static inline void set_bits(uint8_t *tab, int start, int len)
764 {
765 int end, mask, end1;
766
767 end = start + len;
768 tab += start >> 3;
769 mask = 0xff << (start & 7);
770 if ((start & ~7) == (end & ~7)) {
771 if (start < end) {
772 mask &= ~(0xff << (end & 7));
773 *tab |= mask;
774 }
775 } else {
776 *tab++ |= mask;
777 start = (start + 8) & ~7;
778 end1 = end & ~7;
779 while (start < end1) {
780 *tab++ = 0xff;
781 start += 8;
782 }
783 if (start < end) {
784 mask = ~(0xff << (end & 7));
785 *tab |= mask;
786 }
787 }
788 }
789
build_page_bitmap(PageDesc * p)790 static void build_page_bitmap(PageDesc *p)
791 {
792 int n, tb_start, tb_end;
793 TranslationBlock *tb;
794
795 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
796 if (!p->code_bitmap)
797 return;
798
799 tb = p->first_tb;
800 while (tb != NULL) {
801 n = (long)tb & 3;
802 tb = (TranslationBlock *)((long)tb & ~3);
803 /* NOTE: this is subtle as a TB may span two physical pages */
804 if (n == 0) {
805 /* NOTE: tb_end may be after the end of the page, but
806 it is not a problem */
807 tb_start = tb->pc & ~TARGET_PAGE_MASK;
808 tb_end = tb_start + tb->size;
809 if (tb_end > TARGET_PAGE_SIZE)
810 tb_end = TARGET_PAGE_SIZE;
811 } else {
812 tb_start = 0;
813 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
814 }
815 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
816 tb = tb->page_next[n];
817 }
818 }
819
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)820 TranslationBlock *tb_gen_code(CPUState *env,
821 target_ulong pc, target_ulong cs_base,
822 int flags, int cflags)
823 {
824 TranslationBlock *tb;
825 uint8_t *tc_ptr;
826 target_ulong phys_pc, phys_page2, virt_page2;
827 int code_gen_size;
828
829 phys_pc = get_phys_addr_code(env, pc);
830 tb = tb_alloc(pc);
831 if (!tb) {
832 /* flush must be done */
833 tb_flush(env);
834 /* cannot fail at this point */
835 tb = tb_alloc(pc);
836 /* Don't forget to invalidate previous TB info. */
837 tb_invalidated_flag = 1;
838 }
839 tc_ptr = code_gen_ptr;
840 tb->tc_ptr = tc_ptr;
841 tb->cs_base = cs_base;
842 tb->flags = flags;
843 tb->cflags = cflags;
844 #ifdef CONFIG_TRACE
845 tb->bb_rec = NULL;
846 tb->prev_time = 0;
847 #endif
848 cpu_gen_code(env, tb, &code_gen_size);
849 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
850
851 /* check next page if needed */
852 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
853 phys_page2 = -1;
854 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
855 phys_page2 = get_phys_addr_code(env, virt_page2);
856 }
857 tb_link_phys(tb, phys_pc, phys_page2);
858 return tb;
859 }
860
861 /* invalidate all TBs which intersect with the target physical page
862 starting in range [start;end[. NOTE: start and end must refer to
863 the same physical page. 'is_cpu_write_access' should be true if called
864 from a real cpu write access: the virtual CPU will exit the current
865 TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)866 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
867 int is_cpu_write_access)
868 {
869 int n, current_tb_modified, current_tb_not_found, current_flags;
870 CPUState *env = cpu_single_env;
871 PageDesc *p;
872 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
873 target_ulong tb_start, tb_end;
874 target_ulong current_pc, current_cs_base;
875
876 p = page_find(start >> TARGET_PAGE_BITS);
877 if (!p)
878 return;
879 if (!p->code_bitmap &&
880 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
881 is_cpu_write_access) {
882 /* build code bitmap */
883 build_page_bitmap(p);
884 }
885
886 /* we remove all the TBs in the range [start, end[ */
887 /* XXX: see if in some cases it could be faster to invalidate all the code */
888 current_tb_not_found = is_cpu_write_access;
889 current_tb_modified = 0;
890 current_tb = NULL; /* avoid warning */
891 current_pc = 0; /* avoid warning */
892 current_cs_base = 0; /* avoid warning */
893 current_flags = 0; /* avoid warning */
894 tb = p->first_tb;
895 while (tb != NULL) {
896 n = (long)tb & 3;
897 tb = (TranslationBlock *)((long)tb & ~3);
898 tb_next = tb->page_next[n];
899 /* NOTE: this is subtle as a TB may span two physical pages */
900 if (n == 0) {
901 /* NOTE: tb_end may be after the end of the page, but
902 it is not a problem */
903 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
904 tb_end = tb_start + tb->size;
905 } else {
906 tb_start = tb->page_addr[1];
907 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
908 }
909 if (!(tb_end <= start || tb_start >= end)) {
910 #ifdef TARGET_HAS_PRECISE_SMC
911 if (current_tb_not_found) {
912 current_tb_not_found = 0;
913 current_tb = NULL;
914 if (env->mem_io_pc) {
915 /* now we have a real cpu fault */
916 current_tb = tb_find_pc(env->mem_io_pc);
917 }
918 }
919 if (current_tb == tb &&
920 (current_tb->cflags & CF_COUNT_MASK) != 1) {
921 /* If we are modifying the current TB, we must stop
922 its execution. We could be more precise by checking
923 that the modification is after the current PC, but it
924 would require a specialized function to partially
925 restore the CPU state */
926
927 current_tb_modified = 1;
928 cpu_restore_state(current_tb, env,
929 env->mem_io_pc, NULL);
930 #if defined(TARGET_I386)
931 current_flags = env->hflags;
932 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
933 current_cs_base = (target_ulong)env->segs[R_CS].base;
934 current_pc = current_cs_base + env->eip;
935 #else
936 #error unsupported CPU
937 #endif
938 }
939 #endif /* TARGET_HAS_PRECISE_SMC */
940 /* we need to do that to handle the case where a signal
941 occurs while doing tb_phys_invalidate() */
942 saved_tb = NULL;
943 if (env) {
944 saved_tb = env->current_tb;
945 env->current_tb = NULL;
946 }
947 tb_phys_invalidate(tb, -1);
948 if (env) {
949 env->current_tb = saved_tb;
950 if (env->interrupt_request && env->current_tb)
951 cpu_interrupt(env, env->interrupt_request);
952 }
953 }
954 tb = tb_next;
955 }
956 #if !defined(CONFIG_USER_ONLY)
957 /* if no code remaining, no need to continue to use slow writes */
958 if (!p->first_tb) {
959 invalidate_page_bitmap(p);
960 if (is_cpu_write_access) {
961 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
962 }
963 }
964 #endif
965 #ifdef TARGET_HAS_PRECISE_SMC
966 if (current_tb_modified) {
967 /* we generate a block containing just the instruction
968 modifying the memory. It will ensure that it cannot modify
969 itself */
970 env->current_tb = NULL;
971 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
972 cpu_resume_from_signal(env, NULL);
973 }
974 #endif
975 }
976
977 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)978 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
979 {
980 PageDesc *p;
981 int offset, b;
982 #if 0
983 if (1) {
984 if (loglevel) {
985 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
986 cpu_single_env->mem_io_vaddr, len,
987 cpu_single_env->eip,
988 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
989 }
990 }
991 #endif
992 p = page_find(start >> TARGET_PAGE_BITS);
993 if (!p)
994 return;
995 if (p->code_bitmap) {
996 offset = start & ~TARGET_PAGE_MASK;
997 b = p->code_bitmap[offset >> 3] >> (offset & 7);
998 if (b & ((1 << len) - 1))
999 goto do_invalidate;
1000 } else {
1001 do_invalidate:
1002 tb_invalidate_phys_page_range(start, start + len, 1);
1003 }
1004 }
1005
1006 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1007 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1008 unsigned long pc, void *puc)
1009 {
1010 int n, current_flags, current_tb_modified;
1011 target_ulong current_pc, current_cs_base;
1012 PageDesc *p;
1013 TranslationBlock *tb, *current_tb;
1014 #ifdef TARGET_HAS_PRECISE_SMC
1015 CPUState *env = cpu_single_env;
1016 #endif
1017
1018 addr &= TARGET_PAGE_MASK;
1019 p = page_find(addr >> TARGET_PAGE_BITS);
1020 if (!p)
1021 return;
1022 tb = p->first_tb;
1023 current_tb_modified = 0;
1024 current_tb = NULL;
1025 current_pc = 0; /* avoid warning */
1026 current_cs_base = 0; /* avoid warning */
1027 current_flags = 0; /* avoid warning */
1028 #ifdef TARGET_HAS_PRECISE_SMC
1029 if (tb && pc != 0) {
1030 current_tb = tb_find_pc(pc);
1031 }
1032 #endif
1033 while (tb != NULL) {
1034 n = (long)tb & 3;
1035 tb = (TranslationBlock *)((long)tb & ~3);
1036 #ifdef TARGET_HAS_PRECISE_SMC
1037 if (current_tb == tb &&
1038 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1039 /* If we are modifying the current TB, we must stop
1040 its execution. We could be more precise by checking
1041 that the modification is after the current PC, but it
1042 would require a specialized function to partially
1043 restore the CPU state */
1044
1045 current_tb_modified = 1;
1046 cpu_restore_state(current_tb, env, pc, puc);
1047 #if defined(TARGET_I386)
1048 current_flags = env->hflags;
1049 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1050 current_cs_base = (target_ulong)env->segs[R_CS].base;
1051 current_pc = current_cs_base + env->eip;
1052 #else
1053 #error unsupported CPU
1054 #endif
1055 }
1056 #endif /* TARGET_HAS_PRECISE_SMC */
1057 tb_phys_invalidate(tb, addr);
1058 tb = tb->page_next[n];
1059 }
1060 p->first_tb = NULL;
1061 #ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb_modified) {
1063 /* we generate a block containing just the instruction
1064 modifying the memory. It will ensure that it cannot modify
1065 itself */
1066 env->current_tb = NULL;
1067 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1068 cpu_resume_from_signal(env, puc);
1069 }
1070 #endif
1071 }
1072 #endif
1073
1074 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1075 static inline void tb_alloc_page(TranslationBlock *tb,
1076 unsigned int n, target_ulong page_addr)
1077 {
1078 PageDesc *p;
1079 TranslationBlock *last_first_tb;
1080
1081 tb->page_addr[n] = page_addr;
1082 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1083 tb->page_next[n] = p->first_tb;
1084 last_first_tb = p->first_tb;
1085 p->first_tb = (TranslationBlock *)((long)tb | n);
1086 invalidate_page_bitmap(p);
1087
1088 #if defined(TARGET_HAS_SMC) || 1
1089
1090 #if defined(CONFIG_USER_ONLY)
1091 if (p->flags & PAGE_WRITE) {
1092 target_ulong addr;
1093 PageDesc *p2;
1094 int prot;
1095
1096 /* force the host page as non writable (writes will have a
1097 page fault + mprotect overhead) */
1098 page_addr &= qemu_host_page_mask;
1099 prot = 0;
1100 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1101 addr += TARGET_PAGE_SIZE) {
1102
1103 p2 = page_find (addr >> TARGET_PAGE_BITS);
1104 if (!p2)
1105 continue;
1106 prot |= p2->flags;
1107 p2->flags &= ~PAGE_WRITE;
1108 page_get_flags(addr);
1109 }
1110 mprotect(g2h(page_addr), qemu_host_page_size,
1111 (prot & PAGE_BITS) & ~PAGE_WRITE);
1112 #ifdef DEBUG_TB_INVALIDATE
1113 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1114 page_addr);
1115 #endif
1116 }
1117 #else
1118 /* if some code is already present, then the pages are already
1119 protected. So we handle the case where only the first TB is
1120 allocated in a physical page */
1121 if (!last_first_tb) {
1122 tlb_protect_code(page_addr);
1123 }
1124 #endif
1125
1126 #endif /* TARGET_HAS_SMC */
1127 }
1128
1129 /* Allocate a new translation block. Flush the translation buffer if
1130 too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1131 TranslationBlock *tb_alloc(target_ulong pc)
1132 {
1133 TranslationBlock *tb;
1134
1135 if (nb_tbs >= code_gen_max_blocks ||
1136 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1137 return NULL;
1138 tb = &tbs[nb_tbs++];
1139 tb->pc = pc;
1140 tb->cflags = 0;
1141 return tb;
1142 }
1143
tb_free(TranslationBlock * tb)1144 void tb_free(TranslationBlock *tb)
1145 {
1146 /* In practice this is mostly used for single use temporary TB
1147 Ignore the hard cases and just back up if this TB happens to
1148 be the last one generated. */
1149 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1150 code_gen_ptr = tb->tc_ptr;
1151 nb_tbs--;
1152 }
1153 }
1154
1155 /* add a new TB and link it to the physical page tables. phys_page2 is
1156 (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1157 void tb_link_phys(TranslationBlock *tb,
1158 target_ulong phys_pc, target_ulong phys_page2)
1159 {
1160 unsigned int h;
1161 TranslationBlock **ptb;
1162
1163 /* Grab the mmap lock to stop another thread invalidating this TB
1164 before we are done. */
1165 mmap_lock();
1166 /* add in the physical hash table */
1167 h = tb_phys_hash_func(phys_pc);
1168 ptb = &tb_phys_hash[h];
1169 tb->phys_hash_next = *ptb;
1170 *ptb = tb;
1171
1172 /* add in the page list */
1173 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1174 if (phys_page2 != -1)
1175 tb_alloc_page(tb, 1, phys_page2);
1176 else
1177 tb->page_addr[1] = -1;
1178
1179 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1180 tb->jmp_next[0] = NULL;
1181 tb->jmp_next[1] = NULL;
1182
1183 /* init original jump addresses */
1184 if (tb->tb_next_offset[0] != 0xffff)
1185 tb_reset_jump(tb, 0);
1186 if (tb->tb_next_offset[1] != 0xffff)
1187 tb_reset_jump(tb, 1);
1188
1189 #ifdef DEBUG_TB_CHECK
1190 tb_page_check();
1191 #endif
1192 mmap_unlock();
1193 }
1194
1195 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1196 tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1197 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1198 {
1199 int m_min, m_max, m;
1200 unsigned long v;
1201 TranslationBlock *tb;
1202
1203 if (nb_tbs <= 0)
1204 return NULL;
1205 if (tc_ptr < (unsigned long)code_gen_buffer ||
1206 tc_ptr >= (unsigned long)code_gen_ptr)
1207 return NULL;
1208 /* binary search (cf Knuth) */
1209 m_min = 0;
1210 m_max = nb_tbs - 1;
1211 while (m_min <= m_max) {
1212 m = (m_min + m_max) >> 1;
1213 tb = &tbs[m];
1214 v = (unsigned long)tb->tc_ptr;
1215 if (v == tc_ptr)
1216 return tb;
1217 else if (tc_ptr < v) {
1218 m_max = m - 1;
1219 } else {
1220 m_min = m + 1;
1221 }
1222 }
1223 return &tbs[m_max];
1224 }
1225
1226 static void tb_reset_jump_recursive(TranslationBlock *tb);
1227
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1228 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1229 {
1230 TranslationBlock *tb1, *tb_next, **ptb;
1231 unsigned int n1;
1232
1233 tb1 = tb->jmp_next[n];
1234 if (tb1 != NULL) {
1235 /* find head of list */
1236 for(;;) {
1237 n1 = (long)tb1 & 3;
1238 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1239 if (n1 == 2)
1240 break;
1241 tb1 = tb1->jmp_next[n1];
1242 }
1243 /* we are now sure now that tb jumps to tb1 */
1244 tb_next = tb1;
1245
1246 /* remove tb from the jmp_first list */
1247 ptb = &tb_next->jmp_first;
1248 for(;;) {
1249 tb1 = *ptb;
1250 n1 = (long)tb1 & 3;
1251 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1252 if (n1 == n && tb1 == tb)
1253 break;
1254 ptb = &tb1->jmp_next[n1];
1255 }
1256 *ptb = tb->jmp_next[n];
1257 tb->jmp_next[n] = NULL;
1258
1259 /* suppress the jump to next tb in generated code */
1260 tb_reset_jump(tb, n);
1261
1262 /* suppress jumps in the tb on which we could have jumped */
1263 tb_reset_jump_recursive(tb_next);
1264 }
1265 }
1266
tb_reset_jump_recursive(TranslationBlock * tb)1267 static void tb_reset_jump_recursive(TranslationBlock *tb)
1268 {
1269 tb_reset_jump_recursive2(tb, 0);
1270 tb_reset_jump_recursive2(tb, 1);
1271 }
1272
1273 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1274 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1275 {
1276 target_phys_addr_t addr;
1277 target_ulong pd;
1278 ram_addr_t ram_addr;
1279 PhysPageDesc *p;
1280
1281 addr = cpu_get_phys_page_debug(env, pc);
1282 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1283 if (!p) {
1284 pd = IO_MEM_UNASSIGNED;
1285 } else {
1286 pd = p->phys_offset;
1287 }
1288 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1289 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1290 }
1291 #endif
1292
1293 /* Add a watchpoint. */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,int type)1294 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1295 {
1296 int i;
1297
1298 for (i = 0; i < env->nb_watchpoints; i++) {
1299 if (addr == env->watchpoint[i].vaddr)
1300 return 0;
1301 }
1302 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1303 return -1;
1304
1305 i = env->nb_watchpoints++;
1306 env->watchpoint[i].vaddr = addr;
1307 env->watchpoint[i].type = type;
1308 tlb_flush_page(env, addr);
1309 /* FIXME: This flush is needed because of the hack to make memory ops
1310 terminate the TB. It can be removed once the proper IO trap and
1311 re-execute bits are in. */
1312 tb_flush(env);
1313 return i;
1314 }
1315
1316 /* Remove a watchpoint. */
cpu_watchpoint_remove(CPUState * env,target_ulong addr)1317 int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1318 {
1319 int i;
1320
1321 for (i = 0; i < env->nb_watchpoints; i++) {
1322 if (addr == env->watchpoint[i].vaddr) {
1323 env->nb_watchpoints--;
1324 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1325 tlb_flush_page(env, addr);
1326 return 0;
1327 }
1328 }
1329 return -1;
1330 }
1331
1332 /* Remove all watchpoints. */
cpu_watchpoint_remove_all(CPUState * env)1333 void cpu_watchpoint_remove_all(CPUState *env) {
1334 int i;
1335
1336 for (i = 0; i < env->nb_watchpoints; i++) {
1337 tlb_flush_page(env, env->watchpoint[i].vaddr);
1338 }
1339 env->nb_watchpoints = 0;
1340 }
1341
1342 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1343 breakpoint is reached */
cpu_breakpoint_insert(CPUState * env,target_ulong pc)1344 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1345 {
1346 #if defined(TARGET_HAS_ICE)
1347 int i;
1348
1349 for(i = 0; i < env->nb_breakpoints; i++) {
1350 if (env->breakpoints[i] == pc)
1351 return 0;
1352 }
1353
1354 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1355 return -1;
1356 env->breakpoints[env->nb_breakpoints++] = pc;
1357
1358 breakpoint_invalidate(env, pc);
1359 return 0;
1360 #else
1361 return -1;
1362 #endif
1363 }
1364
1365 /* remove all breakpoints */
cpu_breakpoint_remove_all(CPUState * env)1366 void cpu_breakpoint_remove_all(CPUState *env) {
1367 #if defined(TARGET_HAS_ICE)
1368 int i;
1369 for(i = 0; i < env->nb_breakpoints; i++) {
1370 breakpoint_invalidate(env, env->breakpoints[i]);
1371 }
1372 env->nb_breakpoints = 0;
1373 #endif
1374 }
1375
1376 /* remove a breakpoint */
cpu_breakpoint_remove(CPUState * env,target_ulong pc)1377 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1378 {
1379 #if defined(TARGET_HAS_ICE)
1380 int i;
1381 for(i = 0; i < env->nb_breakpoints; i++) {
1382 if (env->breakpoints[i] == pc)
1383 goto found;
1384 }
1385 return -1;
1386 found:
1387 env->nb_breakpoints--;
1388 if (i < env->nb_breakpoints)
1389 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1390
1391 breakpoint_invalidate(env, pc);
1392 return 0;
1393 #else
1394 return -1;
1395 #endif
1396 }
1397
1398 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1399 CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1400 void cpu_single_step(CPUState *env, int enabled)
1401 {
1402 #if defined(TARGET_HAS_ICE)
1403 if (env->singlestep_enabled != enabled) {
1404 env->singlestep_enabled = enabled;
1405 /* must flush all the translated code to avoid inconsistancies */
1406 /* XXX: only flush what is necessary */
1407 tb_flush(env);
1408 }
1409 #endif
1410 }
1411
1412 /* enable or disable low levels log */
cpu_set_log(int log_flags)1413 void cpu_set_log(int log_flags)
1414 {
1415 loglevel = log_flags;
1416 if (loglevel && !logfile) {
1417 logfile = fopen(logfilename, log_append ? "a" : "w");
1418 if (!logfile) {
1419 perror(logfilename);
1420 _exit(1);
1421 }
1422 #if !defined(CONFIG_SOFTMMU)
1423 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1424 {
1425 static uint8_t logfile_buf[4096];
1426 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1427 }
1428 #else
1429 setvbuf(logfile, NULL, _IOLBF, 0);
1430 #endif
1431 log_append = 1;
1432 }
1433 if (!loglevel && logfile) {
1434 fclose(logfile);
1435 logfile = NULL;
1436 }
1437 }
1438
cpu_set_log_filename(const char * filename)1439 void cpu_set_log_filename(const char *filename)
1440 {
1441 logfilename = strdup(filename);
1442 if (logfile) {
1443 fclose(logfile);
1444 logfile = NULL;
1445 }
1446 cpu_set_log(loglevel);
1447 }
1448
1449 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1450 void cpu_interrupt(CPUState *env, int mask)
1451 {
1452 #if !defined(USE_NPTL)
1453 TranslationBlock *tb;
1454 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1455 #endif
1456 int old_mask;
1457
1458 old_mask = env->interrupt_request;
1459 /* FIXME: This is probably not threadsafe. A different thread could
1460 be in the middle of a read-modify-write operation. */
1461 env->interrupt_request |= mask;
1462 #if defined(USE_NPTL)
1463 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1464 problem and hope the cpu will stop of its own accord. For userspace
1465 emulation this often isn't actually as bad as it sounds. Often
1466 signals are used primarily to interrupt blocking syscalls. */
1467 #else
1468 if (use_icount) {
1469 env->icount_decr.u16.high = 0xffff;
1470 #ifndef CONFIG_USER_ONLY
1471 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1472 an async event happened and we need to process it. */
1473 if (!can_do_io(env)
1474 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1475 cpu_abort(env, "Raised interrupt while not in I/O function");
1476 }
1477 #endif
1478 } else {
1479 tb = env->current_tb;
1480 /* if the cpu is currently executing code, we must unlink it and
1481 all the potentially executing TB */
1482 if (tb && !testandset(&interrupt_lock)) {
1483 env->current_tb = NULL;
1484 tb_reset_jump_recursive(tb);
1485 resetlock(&interrupt_lock);
1486 }
1487 }
1488 #endif
1489 }
1490
cpu_reset_interrupt(CPUState * env,int mask)1491 void cpu_reset_interrupt(CPUState *env, int mask)
1492 {
1493 env->interrupt_request &= ~mask;
1494 }
1495
1496 CPULogItem cpu_log_items[] = {
1497 { CPU_LOG_TB_OUT_ASM, "out_asm",
1498 "show generated host assembly code for each compiled TB" },
1499 { CPU_LOG_TB_IN_ASM, "in_asm",
1500 "show target assembly code for each compiled TB" },
1501 { CPU_LOG_TB_OP, "op",
1502 "show micro ops for each compiled TB" },
1503 { CPU_LOG_TB_OP_OPT, "op_opt",
1504 "show micro ops "
1505 #ifdef TARGET_I386
1506 "before eflags optimization and "
1507 #endif
1508 "after liveness analysis" },
1509 { CPU_LOG_INT, "int",
1510 "show interrupts/exceptions in short format" },
1511 { CPU_LOG_EXEC, "exec",
1512 "show trace before each executed TB (lots of logs)" },
1513 { CPU_LOG_TB_CPU, "cpu",
1514 "show CPU state before block translation" },
1515 #ifdef TARGET_I386
1516 { CPU_LOG_PCALL, "pcall",
1517 "show protected mode far calls/returns/exceptions" },
1518 #endif
1519 #ifdef DEBUG_IOPORT
1520 { CPU_LOG_IOPORT, "ioport",
1521 "show all i/o ports accesses" },
1522 #endif
1523 { 0, NULL, NULL },
1524 };
1525
cmp1(const char * s1,int n,const char * s2)1526 static int cmp1(const char *s1, int n, const char *s2)
1527 {
1528 if (strlen(s2) != n)
1529 return 0;
1530 return memcmp(s1, s2, n) == 0;
1531 }
1532
1533 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1534 int cpu_str_to_log_mask(const char *str)
1535 {
1536 CPULogItem *item;
1537 int mask;
1538 const char *p, *p1;
1539
1540 p = str;
1541 mask = 0;
1542 for(;;) {
1543 p1 = strchr(p, ',');
1544 if (!p1)
1545 p1 = p + strlen(p);
1546 if(cmp1(p,p1-p,"all")) {
1547 for(item = cpu_log_items; item->mask != 0; item++) {
1548 mask |= item->mask;
1549 }
1550 } else {
1551 for(item = cpu_log_items; item->mask != 0; item++) {
1552 if (cmp1(p, p1 - p, item->name))
1553 goto found;
1554 }
1555 return 0;
1556 }
1557 found:
1558 mask |= item->mask;
1559 if (*p1 != ',')
1560 break;
1561 p = p1 + 1;
1562 }
1563 return mask;
1564 }
1565
cpu_abort(CPUState * env,const char * fmt,...)1566 void cpu_abort(CPUState *env, const char *fmt, ...)
1567 {
1568 va_list ap;
1569 va_list ap2;
1570
1571 va_start(ap, fmt);
1572 va_copy(ap2, ap);
1573 fprintf(stderr, "qemu: fatal: ");
1574 vfprintf(stderr, fmt, ap);
1575 fprintf(stderr, "\n");
1576 #ifdef TARGET_I386
1577 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1578 #else
1579 cpu_dump_state(env, stderr, fprintf, 0);
1580 #endif
1581 if (logfile) {
1582 fprintf(logfile, "qemu: fatal: ");
1583 vfprintf(logfile, fmt, ap2);
1584 fprintf(logfile, "\n");
1585 #ifdef TARGET_I386
1586 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1587 #else
1588 cpu_dump_state(env, logfile, fprintf, 0);
1589 #endif
1590 fflush(logfile);
1591 fclose(logfile);
1592 }
1593 va_end(ap2);
1594 va_end(ap);
1595 abort();
1596 }
1597
cpu_copy(CPUState * env)1598 CPUState *cpu_copy(CPUState *env)
1599 {
1600 CPUState *new_env = cpu_init(env->cpu_model_str);
1601 /* preserve chaining and index */
1602 CPUState *next_cpu = new_env->next_cpu;
1603 int cpu_index = new_env->cpu_index;
1604 memcpy(new_env, env, sizeof(CPUState));
1605 new_env->next_cpu = next_cpu;
1606 new_env->cpu_index = cpu_index;
1607 return new_env;
1608 }
1609
1610 #if !defined(CONFIG_USER_ONLY)
1611
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1612 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1613 {
1614 unsigned int i;
1615
1616 /* Discard jump cache entries for any tb which might potentially
1617 overlap the flushed page. */
1618 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1619 memset (&env->tb_jmp_cache[i], 0,
1620 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1621
1622 i = tb_jmp_cache_hash_page(addr);
1623 memset (&env->tb_jmp_cache[i], 0,
1624 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1625 }
1626
1627 /* NOTE: if flush_global is true, also flush global entries (not
1628 implemented yet) */
tlb_flush(CPUState * env,int flush_global)1629 void tlb_flush(CPUState *env, int flush_global)
1630 {
1631 int i;
1632
1633 #if defined(DEBUG_TLB)
1634 printf("tlb_flush:\n");
1635 #endif
1636 /* must reset current TB so that interrupts cannot modify the
1637 links while we are modifying them */
1638 env->current_tb = NULL;
1639
1640 for(i = 0; i < CPU_TLB_SIZE; i++) {
1641 env->tlb_table[0][i].addr_read = -1;
1642 env->tlb_table[0][i].addr_write = -1;
1643 env->tlb_table[0][i].addr_code = -1;
1644 env->tlb_table[1][i].addr_read = -1;
1645 env->tlb_table[1][i].addr_write = -1;
1646 env->tlb_table[1][i].addr_code = -1;
1647 #if (NB_MMU_MODES >= 3)
1648 env->tlb_table[2][i].addr_read = -1;
1649 env->tlb_table[2][i].addr_write = -1;
1650 env->tlb_table[2][i].addr_code = -1;
1651 #if (NB_MMU_MODES == 4)
1652 env->tlb_table[3][i].addr_read = -1;
1653 env->tlb_table[3][i].addr_write = -1;
1654 env->tlb_table[3][i].addr_code = -1;
1655 #endif
1656 #endif
1657 }
1658
1659 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1660
1661 #ifdef USE_KQEMU
1662 if (env->kqemu_enabled) {
1663 kqemu_flush(env, flush_global);
1664 }
1665 #endif
1666 tlb_flush_count++;
1667 }
1668
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1669 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1670 {
1671 if (addr == (tlb_entry->addr_read &
1672 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1673 addr == (tlb_entry->addr_write &
1674 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1675 addr == (tlb_entry->addr_code &
1676 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1677 tlb_entry->addr_read = -1;
1678 tlb_entry->addr_write = -1;
1679 tlb_entry->addr_code = -1;
1680 }
1681 }
1682
tlb_flush_page(CPUState * env,target_ulong addr)1683 void tlb_flush_page(CPUState *env, target_ulong addr)
1684 {
1685 int i;
1686
1687 #if defined(DEBUG_TLB)
1688 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1689 #endif
1690 /* must reset current TB so that interrupts cannot modify the
1691 links while we are modifying them */
1692 env->current_tb = NULL;
1693
1694 addr &= TARGET_PAGE_MASK;
1695 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1696 tlb_flush_entry(&env->tlb_table[0][i], addr);
1697 tlb_flush_entry(&env->tlb_table[1][i], addr);
1698 #if (NB_MMU_MODES >= 3)
1699 tlb_flush_entry(&env->tlb_table[2][i], addr);
1700 #if (NB_MMU_MODES == 4)
1701 tlb_flush_entry(&env->tlb_table[3][i], addr);
1702 #endif
1703 #endif
1704
1705 tlb_flush_jmp_cache(env, addr);
1706
1707 #ifdef USE_KQEMU
1708 if (env->kqemu_enabled) {
1709 kqemu_flush_page(env, addr);
1710 }
1711 #endif
1712 }
1713
1714 /* update the TLBs so that writes to code in the virtual page 'addr'
1715 can be detected */
tlb_protect_code(ram_addr_t ram_addr)1716 static void tlb_protect_code(ram_addr_t ram_addr)
1717 {
1718 cpu_physical_memory_reset_dirty(ram_addr,
1719 ram_addr + TARGET_PAGE_SIZE,
1720 CODE_DIRTY_FLAG);
1721 }
1722
1723 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1724 tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1725 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1726 target_ulong vaddr)
1727 {
1728 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1729 }
1730
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1731 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1732 unsigned long start, unsigned long length)
1733 {
1734 unsigned long addr;
1735 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1736 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1737 if ((addr - start) < length) {
1738 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1739 }
1740 }
1741 }
1742
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1743 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1744 int dirty_flags)
1745 {
1746 CPUState *env;
1747 unsigned long length, start1;
1748 int i, mask, len;
1749 uint8_t *p;
1750
1751 start &= TARGET_PAGE_MASK;
1752 end = TARGET_PAGE_ALIGN(end);
1753
1754 length = end - start;
1755 if (length == 0)
1756 return;
1757 len = length >> TARGET_PAGE_BITS;
1758 #ifdef USE_KQEMU
1759 /* XXX: should not depend on cpu context */
1760 env = first_cpu;
1761 if (env->kqemu_enabled) {
1762 ram_addr_t addr;
1763 addr = start;
1764 for(i = 0; i < len; i++) {
1765 kqemu_set_notdirty(env, addr);
1766 addr += TARGET_PAGE_SIZE;
1767 }
1768 }
1769 #endif
1770 mask = ~dirty_flags;
1771 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1772 for(i = 0; i < len; i++)
1773 p[i] &= mask;
1774
1775 /* we modify the TLB cache so that the dirty bit will be set again
1776 when accessing the range */
1777 start1 = start + (unsigned long)phys_ram_base;
1778 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1779 for(i = 0; i < CPU_TLB_SIZE; i++)
1780 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1781 for(i = 0; i < CPU_TLB_SIZE; i++)
1782 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1783 #if (NB_MMU_MODES >= 3)
1784 for(i = 0; i < CPU_TLB_SIZE; i++)
1785 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1786 #if (NB_MMU_MODES == 4)
1787 for(i = 0; i < CPU_TLB_SIZE; i++)
1788 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1789 #endif
1790 #endif
1791 }
1792 }
1793
tlb_update_dirty(CPUTLBEntry * tlb_entry)1794 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1795 {
1796 ram_addr_t ram_addr;
1797
1798 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1799 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1800 tlb_entry->addend - (unsigned long)phys_ram_base;
1801 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1802 tlb_entry->addr_write |= TLB_NOTDIRTY;
1803 }
1804 }
1805 }
1806
1807 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1808 void cpu_tlb_update_dirty(CPUState *env)
1809 {
1810 int i;
1811 for(i = 0; i < CPU_TLB_SIZE; i++)
1812 tlb_update_dirty(&env->tlb_table[0][i]);
1813 for(i = 0; i < CPU_TLB_SIZE; i++)
1814 tlb_update_dirty(&env->tlb_table[1][i]);
1815 #if (NB_MMU_MODES >= 3)
1816 for(i = 0; i < CPU_TLB_SIZE; i++)
1817 tlb_update_dirty(&env->tlb_table[2][i]);
1818 #if (NB_MMU_MODES == 4)
1819 for(i = 0; i < CPU_TLB_SIZE; i++)
1820 tlb_update_dirty(&env->tlb_table[3][i]);
1821 #endif
1822 #endif
1823 }
1824
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1825 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1826 {
1827 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1828 tlb_entry->addr_write = vaddr;
1829 }
1830
1831 /* update the TLB corresponding to virtual page vaddr
1832 so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1833 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1834 {
1835 int i;
1836
1837 vaddr &= TARGET_PAGE_MASK;
1838 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1839 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1840 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1841 #if (NB_MMU_MODES >= 3)
1842 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1843 #if (NB_MMU_MODES == 4)
1844 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1845 #endif
1846 #endif
1847 }
1848
1849 /* add a new TLB entry. At most one entry for a given virtual address
1850 is permitted. Return 0 if OK or 2 if the page could not be mapped
1851 (can only happen in non SOFTMMU mode for I/O pages or pages
1852 conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1853 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1854 target_phys_addr_t paddr, int prot,
1855 int mmu_idx, int is_softmmu)
1856 {
1857 PhysPageDesc *p;
1858 unsigned long pd;
1859 unsigned int index;
1860 target_ulong address;
1861 target_ulong code_address;
1862 target_phys_addr_t addend;
1863 int ret;
1864 CPUTLBEntry *te;
1865 int i;
1866 target_phys_addr_t iotlb;
1867
1868 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1869 if (!p) {
1870 pd = IO_MEM_UNASSIGNED;
1871 } else {
1872 pd = p->phys_offset;
1873 }
1874 #if defined(DEBUG_TLB)
1875 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1876 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1877 #endif
1878
1879 ret = 0;
1880 address = vaddr;
1881 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1882 /* IO memory case (romd handled later) */
1883 address |= TLB_MMIO;
1884 }
1885 addend = (target_phys_addr_t)phys_ram_base + (pd & TARGET_PAGE_MASK);
1886 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1887 /* Normal RAM. */
1888 iotlb = pd & TARGET_PAGE_MASK;
1889 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1890 iotlb |= IO_MEM_NOTDIRTY;
1891 else
1892 iotlb |= IO_MEM_ROM;
1893 } else {
1894 /* IO handlers are currently passed a phsical address.
1895 It would be nice to pass an offset from the base address
1896 of that region. This would avoid having to special case RAM,
1897 and avoid full address decoding in every device.
1898 We can't use the high bits of pd for this because
1899 IO_MEM_ROMD uses these as a ram address. */
1900 iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1901 }
1902
1903 code_address = address;
1904 /* Make accesses to pages with watchpoints go via the
1905 watchpoint trap routines. */
1906 for (i = 0; i < env->nb_watchpoints; i++) {
1907 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1908 iotlb = io_mem_watch + paddr;
1909 /* TODO: The memory case can be optimized by not trapping
1910 reads of pages with a write breakpoint. */
1911 address |= TLB_MMIO;
1912 }
1913 }
1914
1915 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1916 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1917 te = &env->tlb_table[mmu_idx][index];
1918 te->addend = addend - vaddr;
1919 if (prot & PAGE_READ) {
1920 te->addr_read = address;
1921 } else {
1922 te->addr_read = -1;
1923 }
1924
1925 if (prot & PAGE_EXEC) {
1926 te->addr_code = code_address;
1927 } else {
1928 te->addr_code = -1;
1929 }
1930 if (prot & PAGE_WRITE) {
1931 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1932 (pd & IO_MEM_ROMD)) {
1933 /* Write access calls the I/O callback. */
1934 te->addr_write = address | TLB_MMIO;
1935 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1936 !cpu_physical_memory_is_dirty(pd)) {
1937 te->addr_write = address | TLB_NOTDIRTY;
1938 } else {
1939 te->addr_write = address;
1940 }
1941 } else {
1942 te->addr_write = -1;
1943 }
1944 return ret;
1945 }
1946
1947 #else
1948
tlb_flush(CPUState * env,int flush_global)1949 void tlb_flush(CPUState *env, int flush_global)
1950 {
1951 }
1952
tlb_flush_page(CPUState * env,target_ulong addr)1953 void tlb_flush_page(CPUState *env, target_ulong addr)
1954 {
1955 }
1956
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1957 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958 target_phys_addr_t paddr, int prot,
1959 int mmu_idx, int is_softmmu)
1960 {
1961 return 0;
1962 }
1963
1964 /* dump memory mappings */
page_dump(FILE * f)1965 void page_dump(FILE *f)
1966 {
1967 unsigned long start, end;
1968 int i, j, prot, prot1;
1969 PageDesc *p;
1970
1971 fprintf(f, "%-8s %-8s %-8s %s\n",
1972 "start", "end", "size", "prot");
1973 start = -1;
1974 end = -1;
1975 prot = 0;
1976 for(i = 0; i <= L1_SIZE; i++) {
1977 if (i < L1_SIZE)
1978 p = l1_map[i];
1979 else
1980 p = NULL;
1981 for(j = 0;j < L2_SIZE; j++) {
1982 if (!p)
1983 prot1 = 0;
1984 else
1985 prot1 = p[j].flags;
1986 if (prot1 != prot) {
1987 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1988 if (start != -1) {
1989 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1990 start, end, end - start,
1991 prot & PAGE_READ ? 'r' : '-',
1992 prot & PAGE_WRITE ? 'w' : '-',
1993 prot & PAGE_EXEC ? 'x' : '-');
1994 }
1995 if (prot1 != 0)
1996 start = end;
1997 else
1998 start = -1;
1999 prot = prot1;
2000 }
2001 if (!p)
2002 break;
2003 }
2004 }
2005 }
2006
page_get_flags(target_ulong address)2007 int page_get_flags(target_ulong address)
2008 {
2009 PageDesc *p;
2010
2011 p = page_find(address >> TARGET_PAGE_BITS);
2012 if (!p)
2013 return 0;
2014 return p->flags;
2015 }
2016
2017 /* modify the flags of a page and invalidate the code if
2018 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2019 depending on PAGE_WRITE */
page_set_flags(target_ulong start,target_ulong end,int flags)2020 void page_set_flags(target_ulong start, target_ulong end, int flags)
2021 {
2022 PageDesc *p;
2023 target_ulong addr;
2024
2025 /* mmap_lock should already be held. */
2026 start = start & TARGET_PAGE_MASK;
2027 end = TARGET_PAGE_ALIGN(end);
2028 if (flags & PAGE_WRITE)
2029 flags |= PAGE_WRITE_ORG;
2030 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2031 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2032 /* We may be called for host regions that are outside guest
2033 address space. */
2034 if (!p)
2035 return;
2036 /* if the write protection is set, then we invalidate the code
2037 inside */
2038 if (!(p->flags & PAGE_WRITE) &&
2039 (flags & PAGE_WRITE) &&
2040 p->first_tb) {
2041 tb_invalidate_phys_page(addr, 0, NULL);
2042 }
2043 p->flags = flags;
2044 }
2045 }
2046
page_check_range(target_ulong start,target_ulong len,int flags)2047 int page_check_range(target_ulong start, target_ulong len, int flags)
2048 {
2049 PageDesc *p;
2050 target_ulong end;
2051 target_ulong addr;
2052
2053 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2054 start = start & TARGET_PAGE_MASK;
2055
2056 if( end < start )
2057 /* we've wrapped around */
2058 return -1;
2059 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2060 p = page_find(addr >> TARGET_PAGE_BITS);
2061 if( !p )
2062 return -1;
2063 if( !(p->flags & PAGE_VALID) )
2064 return -1;
2065
2066 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2067 return -1;
2068 if (flags & PAGE_WRITE) {
2069 if (!(p->flags & PAGE_WRITE_ORG))
2070 return -1;
2071 /* unprotect the page if it was put read-only because it
2072 contains translated code */
2073 if (!(p->flags & PAGE_WRITE)) {
2074 if (!page_unprotect(addr, 0, NULL))
2075 return -1;
2076 }
2077 return 0;
2078 }
2079 }
2080 return 0;
2081 }
2082
2083 /* called from signal handler: invalidate the code and unprotect the
2084 page. Return TRUE if the fault was succesfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2085 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2086 {
2087 unsigned int page_index, prot, pindex;
2088 PageDesc *p, *p1;
2089 target_ulong host_start, host_end, addr;
2090
2091 /* Technically this isn't safe inside a signal handler. However we
2092 know this only ever happens in a synchronous SEGV handler, so in
2093 practice it seems to be ok. */
2094 mmap_lock();
2095
2096 host_start = address & qemu_host_page_mask;
2097 page_index = host_start >> TARGET_PAGE_BITS;
2098 p1 = page_find(page_index);
2099 if (!p1) {
2100 mmap_unlock();
2101 return 0;
2102 }
2103 host_end = host_start + qemu_host_page_size;
2104 p = p1;
2105 prot = 0;
2106 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2107 prot |= p->flags;
2108 p++;
2109 }
2110 /* if the page was really writable, then we change its
2111 protection back to writable */
2112 if (prot & PAGE_WRITE_ORG) {
2113 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2114 if (!(p1[pindex].flags & PAGE_WRITE)) {
2115 mprotect((void *)g2h(host_start), qemu_host_page_size,
2116 (prot & PAGE_BITS) | PAGE_WRITE);
2117 p1[pindex].flags |= PAGE_WRITE;
2118 /* and since the content will be modified, we must invalidate
2119 the corresponding translated code. */
2120 tb_invalidate_phys_page(address, pc, puc);
2121 #ifdef DEBUG_TB_CHECK
2122 tb_invalidate_check(address);
2123 #endif
2124 mmap_unlock();
2125 return 1;
2126 }
2127 }
2128 mmap_unlock();
2129 return 0;
2130 }
2131
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2132 static inline void tlb_set_dirty(CPUState *env,
2133 unsigned long addr, target_ulong vaddr)
2134 {
2135 }
2136 #endif /* defined(CONFIG_USER_ONLY) */
2137
2138 #if !defined(CONFIG_USER_ONLY)
2139 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2140 ram_addr_t memory);
2141 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2142 ram_addr_t orig_memory);
2143 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2144 need_subpage) \
2145 do { \
2146 if (addr > start_addr) \
2147 start_addr2 = 0; \
2148 else { \
2149 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2150 if (start_addr2 > 0) \
2151 need_subpage = 1; \
2152 } \
2153 \
2154 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2155 end_addr2 = TARGET_PAGE_SIZE - 1; \
2156 else { \
2157 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2158 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2159 need_subpage = 1; \
2160 } \
2161 } while (0)
2162
2163 /* register physical memory. 'size' must be a multiple of the target
2164 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2165 io memory page */
cpu_register_physical_memory(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset)2166 void cpu_register_physical_memory(target_phys_addr_t start_addr,
2167 ram_addr_t size,
2168 ram_addr_t phys_offset)
2169 {
2170 target_phys_addr_t addr, end_addr;
2171 PhysPageDesc *p;
2172 CPUState *env;
2173 ram_addr_t orig_size = size;
2174 void *subpage;
2175
2176 #ifdef USE_KQEMU
2177 /* XXX: should not depend on cpu context */
2178 env = first_cpu;
2179 if (env->kqemu_enabled) {
2180 kqemu_set_phys_mem(start_addr, size, phys_offset);
2181 }
2182 #endif
2183 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2184 end_addr = start_addr + (target_phys_addr_t)size;
2185 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2186 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2187 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2188 ram_addr_t orig_memory = p->phys_offset;
2189 target_phys_addr_t start_addr2, end_addr2;
2190 int need_subpage = 0;
2191
2192 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2193 need_subpage);
2194 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2195 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2196 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2197 &p->phys_offset, orig_memory);
2198 } else {
2199 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2200 >> IO_MEM_SHIFT];
2201 }
2202 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2203 } else {
2204 p->phys_offset = phys_offset;
2205 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2206 (phys_offset & IO_MEM_ROMD))
2207 phys_offset += TARGET_PAGE_SIZE;
2208 }
2209 } else {
2210 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2211 p->phys_offset = phys_offset;
2212 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2213 (phys_offset & IO_MEM_ROMD))
2214 phys_offset += TARGET_PAGE_SIZE;
2215 else {
2216 target_phys_addr_t start_addr2, end_addr2;
2217 int need_subpage = 0;
2218
2219 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2220 end_addr2, need_subpage);
2221
2222 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2223 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2224 &p->phys_offset, IO_MEM_UNASSIGNED);
2225 subpage_register(subpage, start_addr2, end_addr2,
2226 phys_offset);
2227 }
2228 }
2229 }
2230 }
2231
2232 /* since each CPU stores ram addresses in its TLB cache, we must
2233 reset the modified entries */
2234 /* XXX: slow ! */
2235 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2236 tlb_flush(env, 1);
2237 }
2238 }
2239
2240 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2241 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2242 {
2243 PhysPageDesc *p;
2244
2245 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2246 if (!p)
2247 return IO_MEM_UNASSIGNED;
2248 return p->phys_offset;
2249 }
2250
2251 /* XXX: better than nothing */
qemu_ram_alloc(ram_addr_t size)2252 ram_addr_t qemu_ram_alloc(ram_addr_t size)
2253 {
2254 ram_addr_t addr;
2255 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2256 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2257 (uint64_t)size, (uint64_t)phys_ram_size);
2258 abort();
2259 }
2260 addr = phys_ram_alloc_offset;
2261 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2262 return addr;
2263 }
2264
qemu_ram_free(ram_addr_t addr)2265 void qemu_ram_free(ram_addr_t addr)
2266 {
2267 }
2268
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2269 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2270 {
2271 #ifdef DEBUG_UNASSIGNED
2272 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2273 #endif
2274 #ifdef TARGET_SPARC
2275 do_unassigned_access(addr, 0, 0, 0);
2276 #elif defined(TARGET_CRIS)
2277 do_unassigned_access(addr, 0, 0, 0);
2278 #endif
2279 return 0;
2280 }
2281
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2282 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2283 {
2284 #ifdef DEBUG_UNASSIGNED
2285 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2286 #endif
2287 #ifdef TARGET_SPARC
2288 do_unassigned_access(addr, 1, 0, 0);
2289 #elif defined(TARGET_CRIS)
2290 do_unassigned_access(addr, 1, 0, 0);
2291 #endif
2292 }
2293
2294 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2295 unassigned_mem_readb,
2296 unassigned_mem_readb,
2297 unassigned_mem_readb,
2298 };
2299
2300 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2301 unassigned_mem_writeb,
2302 unassigned_mem_writeb,
2303 unassigned_mem_writeb,
2304 };
2305
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2306 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2307 uint32_t val)
2308 {
2309 int dirty_flags;
2310 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2311 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2312 #if !defined(CONFIG_USER_ONLY)
2313 tb_invalidate_phys_page_fast(ram_addr, 1);
2314 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2315 #endif
2316 }
2317 stb_p(phys_ram_base + ram_addr, val);
2318 #ifdef USE_KQEMU
2319 if (cpu_single_env->kqemu_enabled &&
2320 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2321 kqemu_modify_page(cpu_single_env, ram_addr);
2322 #endif
2323 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2324 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2325 /* we remove the notdirty callback only if the code has been
2326 flushed */
2327 if (dirty_flags == 0xff)
2328 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2329 }
2330
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2331 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2332 uint32_t val)
2333 {
2334 int dirty_flags;
2335 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2336 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2337 #if !defined(CONFIG_USER_ONLY)
2338 tb_invalidate_phys_page_fast(ram_addr, 2);
2339 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2340 #endif
2341 }
2342 stw_p(phys_ram_base + ram_addr, val);
2343 #ifdef USE_KQEMU
2344 if (cpu_single_env->kqemu_enabled &&
2345 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2346 kqemu_modify_page(cpu_single_env, ram_addr);
2347 #endif
2348 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2349 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2350 /* we remove the notdirty callback only if the code has been
2351 flushed */
2352 if (dirty_flags == 0xff)
2353 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2354 }
2355
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2356 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2357 uint32_t val)
2358 {
2359 int dirty_flags;
2360 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2361 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2362 #if !defined(CONFIG_USER_ONLY)
2363 tb_invalidate_phys_page_fast(ram_addr, 4);
2364 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2365 #endif
2366 }
2367 stl_p(phys_ram_base + ram_addr, val);
2368 #ifdef USE_KQEMU
2369 if (cpu_single_env->kqemu_enabled &&
2370 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2371 kqemu_modify_page(cpu_single_env, ram_addr);
2372 #endif
2373 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2374 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2375 /* we remove the notdirty callback only if the code has been
2376 flushed */
2377 if (dirty_flags == 0xff)
2378 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2379 }
2380
2381 static CPUReadMemoryFunc *error_mem_read[3] = {
2382 NULL, /* never used */
2383 NULL, /* never used */
2384 NULL, /* never used */
2385 };
2386
2387 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2388 notdirty_mem_writeb,
2389 notdirty_mem_writew,
2390 notdirty_mem_writel,
2391 };
2392
2393 /* Generate a debug exception if a watchpoint has been hit. */
check_watchpoint(int offset,int flags)2394 static void check_watchpoint(int offset, int flags)
2395 {
2396 CPUState *env = cpu_single_env;
2397 target_ulong vaddr;
2398 int i;
2399
2400 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2401 for (i = 0; i < env->nb_watchpoints; i++) {
2402 if (vaddr == env->watchpoint[i].vaddr
2403 && (env->watchpoint[i].type & flags)) {
2404 env->watchpoint_hit = i + 1;
2405 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2406 break;
2407 }
2408 }
2409 }
2410
2411 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2412 so these check for a hit then pass through to the normal out-of-line
2413 phys routines. */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2414 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2415 {
2416 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2417 return ldub_phys(addr);
2418 }
2419
watch_mem_readw(void * opaque,target_phys_addr_t addr)2420 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2421 {
2422 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2423 return lduw_phys(addr);
2424 }
2425
watch_mem_readl(void * opaque,target_phys_addr_t addr)2426 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2427 {
2428 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2429 return ldl_phys(addr);
2430 }
2431
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2432 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2433 uint32_t val)
2434 {
2435 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2436 stb_phys(addr, val);
2437 }
2438
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2439 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2440 uint32_t val)
2441 {
2442 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2443 stw_phys(addr, val);
2444 }
2445
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2446 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2447 uint32_t val)
2448 {
2449 check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2450 stl_phys(addr, val);
2451 }
2452
2453 static CPUReadMemoryFunc *watch_mem_read[3] = {
2454 watch_mem_readb,
2455 watch_mem_readw,
2456 watch_mem_readl,
2457 };
2458
2459 static CPUWriteMemoryFunc *watch_mem_write[3] = {
2460 watch_mem_writeb,
2461 watch_mem_writew,
2462 watch_mem_writel,
2463 };
2464
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)2465 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2466 unsigned int len)
2467 {
2468 uint32_t ret;
2469 unsigned int idx;
2470
2471 idx = SUBPAGE_IDX(addr - mmio->base);
2472 #if defined(DEBUG_SUBPAGE)
2473 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2474 mmio, len, addr, idx);
2475 #endif
2476 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2477
2478 return ret;
2479 }
2480
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)2481 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2482 uint32_t value, unsigned int len)
2483 {
2484 unsigned int idx;
2485
2486 idx = SUBPAGE_IDX(addr - mmio->base);
2487 #if defined(DEBUG_SUBPAGE)
2488 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2489 mmio, len, addr, idx, value);
2490 #endif
2491 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2492 }
2493
subpage_readb(void * opaque,target_phys_addr_t addr)2494 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2495 {
2496 #if defined(DEBUG_SUBPAGE)
2497 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2498 #endif
2499
2500 return subpage_readlen(opaque, addr, 0);
2501 }
2502
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)2503 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2504 uint32_t value)
2505 {
2506 #if defined(DEBUG_SUBPAGE)
2507 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2508 #endif
2509 subpage_writelen(opaque, addr, value, 0);
2510 }
2511
subpage_readw(void * opaque,target_phys_addr_t addr)2512 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2513 {
2514 #if defined(DEBUG_SUBPAGE)
2515 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2516 #endif
2517
2518 return subpage_readlen(opaque, addr, 1);
2519 }
2520
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)2521 static void subpage_writew (void *opaque, target_phys_addr_t addr,
2522 uint32_t value)
2523 {
2524 #if defined(DEBUG_SUBPAGE)
2525 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2526 #endif
2527 subpage_writelen(opaque, addr, value, 1);
2528 }
2529
subpage_readl(void * opaque,target_phys_addr_t addr)2530 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2531 {
2532 #if defined(DEBUG_SUBPAGE)
2533 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2534 #endif
2535
2536 return subpage_readlen(opaque, addr, 2);
2537 }
2538
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)2539 static void subpage_writel (void *opaque,
2540 target_phys_addr_t addr, uint32_t value)
2541 {
2542 #if defined(DEBUG_SUBPAGE)
2543 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2544 #endif
2545 subpage_writelen(opaque, addr, value, 2);
2546 }
2547
2548 static CPUReadMemoryFunc *subpage_read[] = {
2549 &subpage_readb,
2550 &subpage_readw,
2551 &subpage_readl,
2552 };
2553
2554 static CPUWriteMemoryFunc *subpage_write[] = {
2555 &subpage_writeb,
2556 &subpage_writew,
2557 &subpage_writel,
2558 };
2559
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory)2560 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2561 ram_addr_t memory)
2562 {
2563 int idx, eidx;
2564 unsigned int i;
2565
2566 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2567 return -1;
2568 idx = SUBPAGE_IDX(start);
2569 eidx = SUBPAGE_IDX(end);
2570 #if defined(DEBUG_SUBPAGE)
2571 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2572 mmio, start, end, idx, eidx, memory);
2573 #endif
2574 memory >>= IO_MEM_SHIFT;
2575 for (; idx <= eidx; idx++) {
2576 for (i = 0; i < 4; i++) {
2577 if (io_mem_read[memory][i]) {
2578 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2579 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2580 }
2581 if (io_mem_write[memory][i]) {
2582 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2583 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2584 }
2585 }
2586 }
2587
2588 return 0;
2589 }
2590
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory)2591 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2592 ram_addr_t orig_memory)
2593 {
2594 subpage_t *mmio;
2595 int subpage_memory;
2596
2597 mmio = qemu_mallocz(sizeof(subpage_t));
2598 if (mmio != NULL) {
2599 mmio->base = base;
2600 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2601 #if defined(DEBUG_SUBPAGE)
2602 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2603 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2604 #endif
2605 *phys = subpage_memory | IO_MEM_SUBPAGE;
2606 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2607 }
2608
2609 return mmio;
2610 }
2611
io_mem_init(void)2612 static void io_mem_init(void)
2613 {
2614 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2615 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2616 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2617 io_mem_nb = 5;
2618
2619 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2620 watch_mem_write, NULL);
2621 /* alloc dirty bits array */
2622 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2623 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2624 }
2625
2626 /* mem_read and mem_write are arrays of functions containing the
2627 function to access byte (index 0), word (index 1) and dword (index
2628 2). Functions can be omitted with a NULL function pointer. The
2629 registered functions may be modified dynamically later.
2630 If io_index is non zero, the corresponding io zone is
2631 modified. If it is zero, a new io zone is allocated. The return
2632 value can be used with cpu_register_physical_memory(). (-1) is
2633 returned if error. */
cpu_register_io_memory(int io_index,CPUReadMemoryFunc ** mem_read,CPUWriteMemoryFunc ** mem_write,void * opaque)2634 int cpu_register_io_memory(int io_index,
2635 CPUReadMemoryFunc **mem_read,
2636 CPUWriteMemoryFunc **mem_write,
2637 void *opaque)
2638 {
2639 int i, subwidth = 0;
2640
2641 if (io_index <= 0) {
2642 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2643 return -1;
2644 io_index = io_mem_nb++;
2645 } else {
2646 if (io_index >= IO_MEM_NB_ENTRIES)
2647 return -1;
2648 }
2649
2650 for(i = 0;i < 3; i++) {
2651 if (!mem_read[i] || !mem_write[i])
2652 subwidth = IO_MEM_SUBWIDTH;
2653 io_mem_read[io_index][i] = mem_read[i];
2654 io_mem_write[io_index][i] = mem_write[i];
2655 }
2656 io_mem_opaque[io_index] = opaque;
2657 return (io_index << IO_MEM_SHIFT) | subwidth;
2658 }
2659
cpu_get_io_memory_write(int io_index)2660 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2661 {
2662 return io_mem_write[io_index >> IO_MEM_SHIFT];
2663 }
2664
cpu_get_io_memory_read(int io_index)2665 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2666 {
2667 return io_mem_read[io_index >> IO_MEM_SHIFT];
2668 }
2669
2670 #endif /* !defined(CONFIG_USER_ONLY) */
2671
2672 /* physical memory access (slow version, mainly for debug) */
2673 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)2674 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2675 int len, int is_write)
2676 {
2677 int l, flags;
2678 target_ulong page;
2679 void * p;
2680
2681 while (len > 0) {
2682 page = addr & TARGET_PAGE_MASK;
2683 l = (page + TARGET_PAGE_SIZE) - addr;
2684 if (l > len)
2685 l = len;
2686 flags = page_get_flags(page);
2687 if (!(flags & PAGE_VALID))
2688 return;
2689 if (is_write) {
2690 if (!(flags & PAGE_WRITE))
2691 return;
2692 /* XXX: this code should not depend on lock_user */
2693 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2694 /* FIXME - should this return an error rather than just fail? */
2695 return;
2696 memcpy(p, buf, l);
2697 unlock_user(p, addr, l);
2698 } else {
2699 if (!(flags & PAGE_READ))
2700 return;
2701 /* XXX: this code should not depend on lock_user */
2702 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2703 /* FIXME - should this return an error rather than just fail? */
2704 return;
2705 memcpy(buf, p, l);
2706 unlock_user(p, addr, 0);
2707 }
2708 len -= l;
2709 buf += l;
2710 addr += l;
2711 }
2712 }
2713
2714 #else
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)2715 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2716 int len, int is_write)
2717 {
2718 int l, io_index;
2719 uint8_t *ptr;
2720 uint32_t val;
2721 target_phys_addr_t page;
2722 unsigned long pd;
2723 PhysPageDesc *p;
2724
2725 while (len > 0) {
2726 page = addr & TARGET_PAGE_MASK;
2727 l = (page + TARGET_PAGE_SIZE) - addr;
2728 if (l > len)
2729 l = len;
2730 p = phys_page_find(page >> TARGET_PAGE_BITS);
2731 if (!p) {
2732 pd = IO_MEM_UNASSIGNED;
2733 } else {
2734 pd = p->phys_offset;
2735 }
2736
2737 if (is_write) {
2738 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2739 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2740 /* XXX: could force cpu_single_env to NULL to avoid
2741 potential bugs */
2742 if (l >= 4 && ((addr & 3) == 0)) {
2743 /* 32 bit write access */
2744 val = ldl_p(buf);
2745 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2746 l = 4;
2747 } else if (l >= 2 && ((addr & 1) == 0)) {
2748 /* 16 bit write access */
2749 val = lduw_p(buf);
2750 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2751 l = 2;
2752 } else {
2753 /* 8 bit write access */
2754 val = ldub_p(buf);
2755 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2756 l = 1;
2757 }
2758 } else {
2759 unsigned long addr1;
2760 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2761 /* RAM case */
2762 ptr = phys_ram_base + addr1;
2763 memcpy(ptr, buf, l);
2764 if (!cpu_physical_memory_is_dirty(addr1)) {
2765 /* invalidate code */
2766 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2767 /* set dirty bit */
2768 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2769 (0xff & ~CODE_DIRTY_FLAG);
2770 }
2771 }
2772 } else {
2773 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2774 !(pd & IO_MEM_ROMD)) {
2775 /* I/O case */
2776 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2777 if (l >= 4 && ((addr & 3) == 0)) {
2778 /* 32 bit read access */
2779 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2780 stl_p(buf, val);
2781 l = 4;
2782 } else if (l >= 2 && ((addr & 1) == 0)) {
2783 /* 16 bit read access */
2784 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2785 stw_p(buf, val);
2786 l = 2;
2787 } else {
2788 /* 8 bit read access */
2789 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2790 stb_p(buf, val);
2791 l = 1;
2792 }
2793 } else {
2794 /* RAM case */
2795 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2796 (addr & ~TARGET_PAGE_MASK);
2797 memcpy(buf, ptr, l);
2798 }
2799 }
2800 len -= l;
2801 buf += l;
2802 addr += l;
2803 }
2804 }
2805
2806 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)2807 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2808 const uint8_t *buf, int len)
2809 {
2810 int l;
2811 uint8_t *ptr;
2812 target_phys_addr_t page;
2813 unsigned long pd;
2814 PhysPageDesc *p;
2815
2816 while (len > 0) {
2817 page = addr & TARGET_PAGE_MASK;
2818 l = (page + TARGET_PAGE_SIZE) - addr;
2819 if (l > len)
2820 l = len;
2821 p = phys_page_find(page >> TARGET_PAGE_BITS);
2822 if (!p) {
2823 pd = IO_MEM_UNASSIGNED;
2824 } else {
2825 pd = p->phys_offset;
2826 }
2827
2828 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2829 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2830 !(pd & IO_MEM_ROMD)) {
2831 /* do nothing */
2832 } else {
2833 unsigned long addr1;
2834 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2835 /* ROM/RAM case */
2836 ptr = phys_ram_base + addr1;
2837 memcpy(ptr, buf, l);
2838 }
2839 len -= l;
2840 buf += l;
2841 addr += l;
2842 }
2843 }
2844
2845
2846 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)2847 uint32_t ldl_phys(target_phys_addr_t addr)
2848 {
2849 int io_index;
2850 uint8_t *ptr;
2851 uint32_t val;
2852 unsigned long pd;
2853 PhysPageDesc *p;
2854
2855 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2856 if (!p) {
2857 pd = IO_MEM_UNASSIGNED;
2858 } else {
2859 pd = p->phys_offset;
2860 }
2861
2862 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2863 !(pd & IO_MEM_ROMD)) {
2864 /* I/O case */
2865 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2866 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2867 } else {
2868 /* RAM case */
2869 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2870 (addr & ~TARGET_PAGE_MASK);
2871 val = ldl_p(ptr);
2872 }
2873 return val;
2874 }
2875
2876 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)2877 uint64_t ldq_phys(target_phys_addr_t addr)
2878 {
2879 int io_index;
2880 uint8_t *ptr;
2881 uint64_t val;
2882 unsigned long pd;
2883 PhysPageDesc *p;
2884
2885 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2886 if (!p) {
2887 pd = IO_MEM_UNASSIGNED;
2888 } else {
2889 pd = p->phys_offset;
2890 }
2891
2892 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2893 !(pd & IO_MEM_ROMD)) {
2894 /* I/O case */
2895 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2896 #ifdef TARGET_WORDS_BIGENDIAN
2897 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2898 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2899 #else
2900 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2901 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2902 #endif
2903 } else {
2904 /* RAM case */
2905 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2906 (addr & ~TARGET_PAGE_MASK);
2907 val = ldq_p(ptr);
2908 }
2909 return val;
2910 }
2911
2912 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)2913 uint32_t ldub_phys(target_phys_addr_t addr)
2914 {
2915 uint8_t val;
2916 cpu_physical_memory_read(addr, &val, 1);
2917 return val;
2918 }
2919
2920 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)2921 uint32_t lduw_phys(target_phys_addr_t addr)
2922 {
2923 uint16_t val;
2924 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2925 return tswap16(val);
2926 }
2927
2928 /* warning: addr must be aligned. The ram page is not masked as dirty
2929 and the code inside is not invalidated. It is useful if the dirty
2930 bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)2931 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2932 {
2933 int io_index;
2934 uint8_t *ptr;
2935 unsigned long pd;
2936 PhysPageDesc *p;
2937
2938 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2939 if (!p) {
2940 pd = IO_MEM_UNASSIGNED;
2941 } else {
2942 pd = p->phys_offset;
2943 }
2944
2945 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2946 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2947 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2948 } else {
2949 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2950 (addr & ~TARGET_PAGE_MASK);
2951 stl_p(ptr, val);
2952 }
2953 }
2954
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)2955 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2956 {
2957 int io_index;
2958 uint8_t *ptr;
2959 unsigned long pd;
2960 PhysPageDesc *p;
2961
2962 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2963 if (!p) {
2964 pd = IO_MEM_UNASSIGNED;
2965 } else {
2966 pd = p->phys_offset;
2967 }
2968
2969 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2970 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2971 #ifdef TARGET_WORDS_BIGENDIAN
2972 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2973 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2974 #else
2975 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2976 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2977 #endif
2978 } else {
2979 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2980 (addr & ~TARGET_PAGE_MASK);
2981 stq_p(ptr, val);
2982 }
2983 }
2984
2985 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)2986 void stl_phys(target_phys_addr_t addr, uint32_t val)
2987 {
2988 int io_index;
2989 uint8_t *ptr;
2990 unsigned long pd;
2991 PhysPageDesc *p;
2992
2993 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2994 if (!p) {
2995 pd = IO_MEM_UNASSIGNED;
2996 } else {
2997 pd = p->phys_offset;
2998 }
2999
3000 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3001 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3002 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3003 } else {
3004 unsigned long addr1;
3005 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3006 /* RAM case */
3007 ptr = phys_ram_base + addr1;
3008 stl_p(ptr, val);
3009 if (!cpu_physical_memory_is_dirty(addr1)) {
3010 /* invalidate code */
3011 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3012 /* set dirty bit */
3013 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3014 (0xff & ~CODE_DIRTY_FLAG);
3015 }
3016 }
3017 }
3018
3019 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3020 void stb_phys(target_phys_addr_t addr, uint32_t val)
3021 {
3022 uint8_t v = val;
3023 cpu_physical_memory_write(addr, &v, 1);
3024 }
3025
3026 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3027 void stw_phys(target_phys_addr_t addr, uint32_t val)
3028 {
3029 uint16_t v = tswap16(val);
3030 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3031 }
3032
3033 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3034 void stq_phys(target_phys_addr_t addr, uint64_t val)
3035 {
3036 val = tswap64(val);
3037 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3038 }
3039
3040 #endif
3041
3042 /* virtual memory access for debug */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3043 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3044 uint8_t *buf, int len, int is_write)
3045 {
3046 int l;
3047 target_phys_addr_t phys_addr;
3048 target_ulong page;
3049
3050 while (len > 0) {
3051 page = addr & TARGET_PAGE_MASK;
3052 phys_addr = cpu_get_phys_page_debug(env, page);
3053 /* if no physical page mapped, return an error */
3054 if (phys_addr == -1)
3055 return -1;
3056 l = (page + TARGET_PAGE_SIZE) - addr;
3057 if (l > len)
3058 l = len;
3059 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3060 buf, l, is_write);
3061 len -= l;
3062 buf += l;
3063 addr += l;
3064 }
3065 return 0;
3066 }
3067
3068 /* in deterministic execution mode, instructions doing device I/Os
3069 must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3070 void cpu_io_recompile(CPUState *env, void *retaddr)
3071 {
3072 TranslationBlock *tb;
3073 uint32_t n, cflags;
3074 target_ulong pc, cs_base;
3075 uint64_t flags;
3076
3077 tb = tb_find_pc((unsigned long)retaddr);
3078 if (!tb) {
3079 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3080 retaddr);
3081 }
3082 n = env->icount_decr.u16.low + tb->icount;
3083 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3084 /* Calculate how many instructions had been executed before the fault
3085 occurred. */
3086 n = n - env->icount_decr.u16.low;
3087 /* Generate a new TB ending on the I/O insn. */
3088 n++;
3089 /* On MIPS and SH, delay slot instructions can only be restarted if
3090 they were already the first instruction in the TB. If this is not
3091 the first instruction in a TB then re-execute the preceding
3092 branch. */
3093 #if defined(TARGET_MIPS)
3094 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3095 env->active_tc.PC -= 4;
3096 env->icount_decr.u16.low++;
3097 env->hflags &= ~MIPS_HFLAG_BMASK;
3098 }
3099 #elif defined(TARGET_SH4)
3100 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3101 && n > 1) {
3102 env->pc -= 2;
3103 env->icount_decr.u16.low++;
3104 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3105 }
3106 #endif
3107 /* This should never happen. */
3108 if (n > CF_COUNT_MASK)
3109 cpu_abort(env, "TB too big during recompile");
3110
3111 cflags = n | CF_LAST_IO;
3112 pc = tb->pc;
3113 cs_base = tb->cs_base;
3114 flags = tb->flags;
3115 tb_phys_invalidate(tb, -1);
3116 /* FIXME: In theory this could raise an exception. In practice
3117 we have already translated the block once so it's probably ok. */
3118 tb_gen_code(env, pc, cs_base, flags, cflags);
3119 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3120 the first in the TB) then we end up generating a whole new TB and
3121 repeating the fault, which is horribly inefficient.
3122 Better would be to execute just this insn uncached, or generate a
3123 second new TB. */
3124 cpu_resume_from_signal(env, NULL);
3125 }
3126
dump_exec_info(FILE * f,int (* cpu_fprintf)(FILE * f,const char * fmt,...))3127 void dump_exec_info(FILE *f,
3128 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3129 {
3130 int i, target_code_size, max_target_code_size;
3131 int direct_jmp_count, direct_jmp2_count, cross_page;
3132 TranslationBlock *tb;
3133
3134 target_code_size = 0;
3135 max_target_code_size = 0;
3136 cross_page = 0;
3137 direct_jmp_count = 0;
3138 direct_jmp2_count = 0;
3139 for(i = 0; i < nb_tbs; i++) {
3140 tb = &tbs[i];
3141 target_code_size += tb->size;
3142 if (tb->size > max_target_code_size)
3143 max_target_code_size = tb->size;
3144 if (tb->page_addr[1] != -1)
3145 cross_page++;
3146 if (tb->tb_next_offset[0] != 0xffff) {
3147 direct_jmp_count++;
3148 if (tb->tb_next_offset[1] != 0xffff) {
3149 direct_jmp2_count++;
3150 }
3151 }
3152 }
3153 /* XXX: avoid using doubles ? */
3154 cpu_fprintf(f, "Translation buffer state:\n");
3155 cpu_fprintf(f, "gen code size %ld/%ld\n",
3156 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3157 cpu_fprintf(f, "TB count %d/%d\n",
3158 nb_tbs, code_gen_max_blocks);
3159 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3160 nb_tbs ? target_code_size / nb_tbs : 0,
3161 max_target_code_size);
3162 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3163 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3164 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3165 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3166 cross_page,
3167 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3168 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3169 direct_jmp_count,
3170 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3171 direct_jmp2_count,
3172 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3173 cpu_fprintf(f, "\nStatistics:\n");
3174 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3175 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3176 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3177 tcg_dump_info(f, cpu_fprintf);
3178 }
3179
3180 #if !defined(CONFIG_USER_ONLY)
3181
3182 #define MMUSUFFIX _cmmu
3183 #define GETPC() NULL
3184 #define env cpu_single_env
3185 #define SOFTMMU_CODE_ACCESS
3186
3187 #define SHIFT 0
3188 #include "softmmu_template.h"
3189
3190 #define SHIFT 1
3191 #include "softmmu_template.h"
3192
3193 #define SHIFT 2
3194 #include "softmmu_template.h"
3195
3196 #define SHIFT 3
3197 #include "softmmu_template.h"
3198
3199 #undef env
3200
3201 #endif
3202