1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "config.h"
20 #ifdef _WIN32
21 #define WIN32_LEAN_AND_MEAN
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "osdep.h"
41 #include "kvm.h"
42 #include "qemu-timer.h"
43 #if defined(CONFIG_USER_ONLY)
44 #include <qemu.h>
45 #endif
46 #ifdef CONFIG_MEMCHECK
47 #include "memcheck/memcheck_api.h"
48 #endif // CONFIG_MEMCHECK
49
50 //#define DEBUG_TB_INVALIDATE
51 //#define DEBUG_FLUSH
52 //#define DEBUG_TLB
53 //#define DEBUG_UNASSIGNED
54
55 /* make various TB consistency checks */
56 //#define DEBUG_TB_CHECK
57 //#define DEBUG_TLB_CHECK
58
59 //#define DEBUG_IOPORT
60 //#define DEBUG_SUBPAGE
61
62 #if !defined(CONFIG_USER_ONLY)
63 /* TB consistency checks only implemented for usermode emulation. */
64 #undef DEBUG_TB_CHECK
65 #endif
66
67 #define SMC_BITMAP_USE_THRESHOLD 10
68
69 #if defined(TARGET_SPARC64)
70 #define TARGET_PHYS_ADDR_SPACE_BITS 41
71 #elif defined(TARGET_SPARC)
72 #define TARGET_PHYS_ADDR_SPACE_BITS 36
73 #elif defined(TARGET_ALPHA)
74 #define TARGET_PHYS_ADDR_SPACE_BITS 42
75 #define TARGET_VIRT_ADDR_SPACE_BITS 42
76 #elif defined(TARGET_PPC64)
77 #define TARGET_PHYS_ADDR_SPACE_BITS 42
78 #elif defined(TARGET_X86_64)
79 #define TARGET_PHYS_ADDR_SPACE_BITS 42
80 #elif defined(TARGET_I386)
81 #define TARGET_PHYS_ADDR_SPACE_BITS 36
82 #else
83 #define TARGET_PHYS_ADDR_SPACE_BITS 32
84 #endif
85
86 static TranslationBlock *tbs;
87 int code_gen_max_blocks;
88 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
89 static int nb_tbs;
90 /* any access to the tbs or the page table must use this lock */
91 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92
93 #if defined(__arm__) || defined(__sparc_v9__)
94 /* The prologue must be reachable with a direct jump. ARM and Sparc64
95 have limited branch ranges (possibly also PPC) so place it in a
96 section close to code segment. */
97 #define code_gen_section \
98 __attribute__((__section__(".gen_code"))) \
99 __attribute__((aligned (32)))
100 #elif defined(_WIN32)
101 /* Maximum alignment for Win32 is 16. */
102 #define code_gen_section \
103 __attribute__((aligned (16)))
104 #else
105 #define code_gen_section \
106 __attribute__((aligned (32)))
107 #endif
108
109 uint8_t code_gen_prologue[1024] code_gen_section;
110 static uint8_t *code_gen_buffer;
111 static unsigned long code_gen_buffer_size;
112 /* threshold to flush the translated code buffer */
113 static unsigned long code_gen_buffer_max_size;
114 uint8_t *code_gen_ptr;
115
116 #if !defined(CONFIG_USER_ONLY)
117 int phys_ram_fd;
118 static int in_migration;
119
120 RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
121 #endif
122
123 CPUState *first_cpu;
124 /* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
126 CPUState *cpu_single_env;
127 /* 0 = Do not count executed instructions.
128 1 = Precise instruction counting.
129 2 = Adaptive rate instruction counting. */
130 int use_icount = 0;
131 /* Current instruction counter. While executing translated code this may
132 include some instructions that have not yet been executed. */
133 int64_t qemu_icount;
134
135 typedef struct PageDesc {
136 /* list of TBs intersecting this ram page */
137 TranslationBlock *first_tb;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142 #if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144 #endif
145 } PageDesc;
146
147 typedef struct PhysPageDesc {
148 /* offset in host memory of the page + io_index in the low bits */
149 ram_addr_t phys_offset;
150 ram_addr_t region_offset;
151 } PhysPageDesc;
152
153 #define L2_BITS 10
154 #if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
155 /* XXX: this is a temporary hack for alpha target.
156 * In the future, this is to be replaced by a multi-level table
157 * to actually be able to handle the complete 64 bits address space.
158 */
159 #define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
160 #else
161 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
162 #endif
163
164 #define L1_SIZE (1 << L1_BITS)
165 #define L2_SIZE (1 << L2_BITS)
166
167 unsigned long qemu_real_host_page_size;
168 unsigned long qemu_host_page_bits;
169 unsigned long qemu_host_page_size;
170 unsigned long qemu_host_page_mask;
171
172 /* XXX: for system emulation, it could just be an array */
173 static PageDesc *l1_map[L1_SIZE];
174 static PhysPageDesc **l1_phys_map;
175
176 #if !defined(CONFIG_USER_ONLY)
177 static void io_mem_init(void);
178
179 /* io memory support */
180 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
181 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
182 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
183 static char io_mem_used[IO_MEM_NB_ENTRIES];
184 static int io_mem_watch;
185 #endif
186
187 /* log support */
188 #ifdef WIN32
189 static const char *logfilename = "qemu.log";
190 #else
191 static const char *logfilename = "/tmp/qemu.log";
192 #endif
193 FILE *logfile;
194 int loglevel;
195 static int log_append = 0;
196
197 /* statistics */
198 static int tlb_flush_count;
199 static int tb_flush_count;
200 static int tb_phys_invalidate_count;
201
202 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
203 typedef struct subpage_t {
204 target_phys_addr_t base;
205 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
206 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
207 void *opaque[TARGET_PAGE_SIZE][2][4];
208 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
209 } subpage_t;
210
211 #ifdef _WIN32
map_exec(void * addr,long size)212 static void map_exec(void *addr, long size)
213 {
214 DWORD old_protect;
215 VirtualProtect(addr, size,
216 PAGE_EXECUTE_READWRITE, &old_protect);
217
218 }
219 #else
map_exec(void * addr,long size)220 static void map_exec(void *addr, long size)
221 {
222 unsigned long start, end, page_size;
223
224 page_size = getpagesize();
225 start = (unsigned long)addr;
226 start &= ~(page_size - 1);
227
228 end = (unsigned long)addr + size;
229 end += page_size - 1;
230 end &= ~(page_size - 1);
231
232 mprotect((void *)start, end - start,
233 PROT_READ | PROT_WRITE | PROT_EXEC);
234 }
235 #endif
236
page_init(void)237 static void page_init(void)
238 {
239 /* NOTE: we can always suppose that qemu_host_page_size >=
240 TARGET_PAGE_SIZE */
241 #ifdef _WIN32
242 {
243 SYSTEM_INFO system_info;
244
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
247 }
248 #else
249 qemu_real_host_page_size = getpagesize();
250 #endif
251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
259 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
260 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
261
262 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
263 {
264 long long startaddr, endaddr;
265 FILE *f;
266 int n;
267
268 mmap_lock();
269 last_brk = (unsigned long)sbrk(0);
270 f = fopen("/proc/self/maps", "r");
271 if (f) {
272 do {
273 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
274 if (n == 2) {
275 startaddr = MIN(startaddr,
276 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
277 endaddr = MIN(endaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 page_set_flags(startaddr & TARGET_PAGE_MASK,
280 TARGET_PAGE_ALIGN(endaddr),
281 PAGE_RESERVED);
282 }
283 } while (!feof(f));
284 fclose(f);
285 }
286 mmap_unlock();
287 }
288 #endif
289 }
290
page_l1_map(target_ulong index)291 static inline PageDesc **page_l1_map(target_ulong index)
292 {
293 #if TARGET_LONG_BITS > 32
294 /* Host memory outside guest VM. For 32-bit targets we have already
295 excluded high addresses. */
296 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
297 return NULL;
298 #endif
299 return &l1_map[index >> L2_BITS];
300 }
301
page_find_alloc(target_ulong index)302 static inline PageDesc *page_find_alloc(target_ulong index)
303 {
304 PageDesc **lp, *p;
305 lp = page_l1_map(index);
306 if (!lp)
307 return NULL;
308
309 p = *lp;
310 if (!p) {
311 /* allocate if not found */
312 #if defined(CONFIG_USER_ONLY)
313 size_t len = sizeof(PageDesc) * L2_SIZE;
314 /* Don't use qemu_malloc because it may recurse. */
315 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
316 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
317 *lp = p;
318 if (h2g_valid(p)) {
319 unsigned long addr = h2g(p);
320 page_set_flags(addr & TARGET_PAGE_MASK,
321 TARGET_PAGE_ALIGN(addr + len),
322 PAGE_RESERVED);
323 }
324 #else
325 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
326 *lp = p;
327 #endif
328 }
329 return p + (index & (L2_SIZE - 1));
330 }
331
page_find(target_ulong index)332 static inline PageDesc *page_find(target_ulong index)
333 {
334 PageDesc **lp, *p;
335 lp = page_l1_map(index);
336 if (!lp)
337 return NULL;
338
339 p = *lp;
340 if (!p) {
341 return NULL;
342 }
343 return p + (index & (L2_SIZE - 1));
344 }
345
phys_page_find_alloc(target_phys_addr_t index,int alloc)346 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347 {
348 void **lp, **p;
349 PhysPageDesc *pd;
350
351 p = (void **)l1_phys_map;
352 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
353
354 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356 #endif
357 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358 p = *lp;
359 if (!p) {
360 /* allocate if not found */
361 if (!alloc)
362 return NULL;
363 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364 memset(p, 0, sizeof(void *) * L1_SIZE);
365 *lp = p;
366 }
367 #endif
368 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369 pd = *lp;
370 if (!pd) {
371 int i;
372 /* allocate if not found */
373 if (!alloc)
374 return NULL;
375 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376 *lp = pd;
377 for (i = 0; i < L2_SIZE; i++) {
378 pd[i].phys_offset = IO_MEM_UNASSIGNED;
379 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380 }
381 }
382 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383 }
384
phys_page_find(target_phys_addr_t index)385 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386 {
387 return phys_page_find_alloc(index, 0);
388 }
389
390 #if !defined(CONFIG_USER_ONLY)
391 static void tlb_protect_code(ram_addr_t ram_addr);
392 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393 target_ulong vaddr);
394 #define mmap_lock() do { } while(0)
395 #define mmap_unlock() do { } while(0)
396 #endif
397
398 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399
400 #if defined(CONFIG_USER_ONLY)
401 /* Currently it is not recommended to allocate big chunks of data in
402 user mode. It will change when a dedicated libc will be used */
403 #define USE_STATIC_CODE_GEN_BUFFER
404 #endif
405
406 #ifdef USE_STATIC_CODE_GEN_BUFFER
407 static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408 #endif
409
code_gen_alloc(unsigned long tb_size)410 static void code_gen_alloc(unsigned long tb_size)
411 {
412 #ifdef USE_STATIC_CODE_GEN_BUFFER
413 code_gen_buffer = static_code_gen_buffer;
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415 map_exec(code_gen_buffer, code_gen_buffer_size);
416 #else
417 code_gen_buffer_size = tb_size;
418 if (code_gen_buffer_size == 0) {
419 #if defined(CONFIG_USER_ONLY)
420 /* in user mode, phys_ram_size is not meaningful */
421 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422 #else
423 /* XXX: needs adjustments */
424 code_gen_buffer_size = (unsigned long)(ram_size / 4);
425 #endif
426 }
427 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429 /* The code gen buffer location may have constraints depending on
430 the host cpu and OS */
431 #if defined(__linux__)
432 {
433 int flags;
434 void *start = NULL;
435
436 flags = MAP_PRIVATE | MAP_ANONYMOUS;
437 #if defined(__x86_64__)
438 flags |= MAP_32BIT;
439 /* Cannot map more than that */
440 if (code_gen_buffer_size > (800 * 1024 * 1024))
441 code_gen_buffer_size = (800 * 1024 * 1024);
442 #elif defined(__sparc_v9__)
443 // Map the buffer below 2G, so we can use direct calls and branches
444 flags |= MAP_FIXED;
445 start = (void *) 0x60000000UL;
446 if (code_gen_buffer_size > (512 * 1024 * 1024))
447 code_gen_buffer_size = (512 * 1024 * 1024);
448 #elif defined(__arm__)
449 /* Map the buffer below 32M, so we can use direct calls and branches */
450 flags |= MAP_FIXED;
451 start = (void *) 0x01000000UL;
452 if (code_gen_buffer_size > 16 * 1024 * 1024)
453 code_gen_buffer_size = 16 * 1024 * 1024;
454 #elif defined(__s390x__)
455 /* Map the buffer so that we can use direct calls and branches. */
456 /* We have a +- 4GB range on the branches; leave some slop. */
457 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
458 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
459 }
460 start = (void *)0x90000000UL;
461 #endif
462 code_gen_buffer = mmap(start, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
470 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
471 || defined(__DragonFly__) || defined(__OpenBSD__)
472 {
473 int flags;
474 void *addr = NULL;
475 flags = MAP_PRIVATE | MAP_ANONYMOUS;
476 #if defined(__x86_64__)
477 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478 * 0x40000000 is free */
479 flags |= MAP_FIXED;
480 addr = (void *)0x40000000;
481 /* Cannot map more than that */
482 if (code_gen_buffer_size > (800 * 1024 * 1024))
483 code_gen_buffer_size = (800 * 1024 * 1024);
484 #elif defined(__sparc_v9__)
485 // Map the buffer below 2G, so we can use direct calls and branches
486 flags |= MAP_FIXED;
487 addr = (void *) 0x60000000UL;
488 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
489 code_gen_buffer_size = (512 * 1024 * 1024);
490 }
491 #endif
492 code_gen_buffer = mmap(addr, code_gen_buffer_size,
493 PROT_WRITE | PROT_READ | PROT_EXEC,
494 flags, -1, 0);
495 if (code_gen_buffer == MAP_FAILED) {
496 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
497 exit(1);
498 }
499 }
500 #else
501 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
502 map_exec(code_gen_buffer, code_gen_buffer_size);
503 #endif
504 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
505 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
506 code_gen_buffer_max_size = code_gen_buffer_size -
507 code_gen_max_block_size();
508 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
509 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
510 }
511
512 /* Must be called before using the QEMU cpus. 'tb_size' is the size
513 (in bytes) allocated to the translation buffer. Zero means default
514 size. */
cpu_exec_init_all(unsigned long tb_size)515 void cpu_exec_init_all(unsigned long tb_size)
516 {
517 cpu_gen_init();
518 code_gen_alloc(tb_size);
519 code_gen_ptr = code_gen_buffer;
520 page_init();
521 #if !defined(CONFIG_USER_ONLY)
522 io_mem_init();
523 #endif
524 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
525 /* There's no guest base to take into account, so go ahead and
526 initialize the prologue now. */
527 tcg_prologue_init(&tcg_ctx);
528 #endif
529 }
530
531 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
532
533 #define CPU_COMMON_SAVE_VERSION 1
534
cpu_common_save(QEMUFile * f,void * opaque)535 static void cpu_common_save(QEMUFile *f, void *opaque)
536 {
537 CPUState *env = opaque;
538
539 cpu_synchronize_state(env, 0);
540
541 qemu_put_be32s(f, &env->halted);
542 qemu_put_be32s(f, &env->interrupt_request);
543 }
544
cpu_common_load(QEMUFile * f,void * opaque,int version_id)545 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
546 {
547 CPUState *env = opaque;
548
549 if (version_id != CPU_COMMON_SAVE_VERSION)
550 return -EINVAL;
551
552 qemu_get_be32s(f, &env->halted);
553 qemu_get_be32s(f, &env->interrupt_request);
554 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
555 version_id is increased. */
556 env->interrupt_request &= ~0x01;
557 tlb_flush(env, 1);
558 cpu_synchronize_state(env, 1);
559
560 return 0;
561 }
562 #endif
563
qemu_get_cpu(int cpu)564 CPUState *qemu_get_cpu(int cpu)
565 {
566 CPUState *env = first_cpu;
567
568 while (env) {
569 if (env->cpu_index == cpu)
570 break;
571 env = env->next_cpu;
572 }
573
574 return env;
575 }
576
cpu_exec_init(CPUState * env)577 void cpu_exec_init(CPUState *env)
578 {
579 CPUState **penv;
580 int cpu_index;
581
582 #if defined(CONFIG_USER_ONLY)
583 cpu_list_lock();
584 #endif
585 env->next_cpu = NULL;
586 penv = &first_cpu;
587 cpu_index = 0;
588 while (*penv != NULL) {
589 penv = &(*penv)->next_cpu;
590 cpu_index++;
591 }
592 env->cpu_index = cpu_index;
593 env->numa_node = 0;
594 QTAILQ_INIT(&env->breakpoints);
595 QTAILQ_INIT(&env->watchpoints);
596 *penv = env;
597 #if defined(CONFIG_USER_ONLY)
598 cpu_list_unlock();
599 #endif
600 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
602 cpu_common_save, cpu_common_load, env);
603 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
604 cpu_save, cpu_load, env);
605 #endif
606 }
607
invalidate_page_bitmap(PageDesc * p)608 static inline void invalidate_page_bitmap(PageDesc *p)
609 {
610 if (p->code_bitmap) {
611 qemu_free(p->code_bitmap);
612 p->code_bitmap = NULL;
613 }
614 p->code_write_count = 0;
615 }
616
617 /* set to NULL all the 'first_tb' fields in all PageDescs */
page_flush_tb(void)618 static void page_flush_tb(void)
619 {
620 int i, j;
621 PageDesc *p;
622
623 for(i = 0; i < L1_SIZE; i++) {
624 p = l1_map[i];
625 if (p) {
626 for(j = 0; j < L2_SIZE; j++) {
627 p->first_tb = NULL;
628 invalidate_page_bitmap(p);
629 p++;
630 }
631 }
632 }
633 }
634
635 /* flush all the translation blocks */
636 /* XXX: tb_flush is currently not thread safe */
tb_flush(CPUState * env1)637 void tb_flush(CPUState *env1)
638 {
639 CPUState *env;
640 #if defined(DEBUG_FLUSH)
641 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
642 (unsigned long)(code_gen_ptr - code_gen_buffer),
643 nb_tbs, nb_tbs > 0 ?
644 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
645 #endif
646 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
647 cpu_abort(env1, "Internal error: code buffer overflow\n");
648
649 nb_tbs = 0;
650
651 for(env = first_cpu; env != NULL; env = env->next_cpu) {
652 #ifdef CONFIG_MEMCHECK
653 int tb_to_clean;
654 for (tb_to_clean = 0; tb_to_clean < TB_JMP_CACHE_SIZE; tb_to_clean++) {
655 if (env->tb_jmp_cache[tb_to_clean] != NULL &&
656 env->tb_jmp_cache[tb_to_clean]->tpc2gpc != NULL) {
657 qemu_free(env->tb_jmp_cache[tb_to_clean]->tpc2gpc);
658 env->tb_jmp_cache[tb_to_clean]->tpc2gpc = NULL;
659 env->tb_jmp_cache[tb_to_clean]->tpc2gpc_pairs = 0;
660 }
661 }
662 #endif // CONFIG_MEMCHECK
663 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
664 }
665
666 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
667 page_flush_tb();
668
669 code_gen_ptr = code_gen_buffer;
670 /* XXX: flush processor icache at this point if cache flush is
671 expensive */
672 tb_flush_count++;
673 }
674
675 #ifdef DEBUG_TB_CHECK
676
tb_invalidate_check(target_ulong address)677 static void tb_invalidate_check(target_ulong address)
678 {
679 TranslationBlock *tb;
680 int i;
681 address &= TARGET_PAGE_MASK;
682 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
683 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
684 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
685 address >= tb->pc + tb->size)) {
686 printf("ERROR invalidate: address=" TARGET_FMT_lx
687 " PC=%08lx size=%04x\n",
688 address, (long)tb->pc, tb->size);
689 }
690 }
691 }
692 }
693
694 /* verify that all the pages have correct rights for code */
tb_page_check(void)695 static void tb_page_check(void)
696 {
697 TranslationBlock *tb;
698 int i, flags1, flags2;
699
700 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
701 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
702 flags1 = page_get_flags(tb->pc);
703 flags2 = page_get_flags(tb->pc + tb->size - 1);
704 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
705 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
706 (long)tb->pc, tb->size, flags1, flags2);
707 }
708 }
709 }
710 }
711
712 #endif
713
714 /* invalidate one TB */
tb_remove(TranslationBlock ** ptb,TranslationBlock * tb,int next_offset)715 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
716 int next_offset)
717 {
718 TranslationBlock *tb1;
719 for(;;) {
720 tb1 = *ptb;
721 if (tb1 == tb) {
722 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
723 break;
724 }
725 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
726 }
727 }
728
tb_page_remove(TranslationBlock ** ptb,TranslationBlock * tb)729 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
730 {
731 TranslationBlock *tb1;
732 unsigned int n1;
733
734 for(;;) {
735 tb1 = *ptb;
736 n1 = (long)tb1 & 3;
737 tb1 = (TranslationBlock *)((long)tb1 & ~3);
738 if (tb1 == tb) {
739 *ptb = tb1->page_next[n1];
740 break;
741 }
742 ptb = &tb1->page_next[n1];
743 }
744 }
745
tb_jmp_remove(TranslationBlock * tb,int n)746 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
747 {
748 TranslationBlock *tb1, **ptb;
749 unsigned int n1;
750
751 ptb = &tb->jmp_next[n];
752 tb1 = *ptb;
753 if (tb1) {
754 /* find tb(n) in circular list */
755 for(;;) {
756 tb1 = *ptb;
757 n1 = (long)tb1 & 3;
758 tb1 = (TranslationBlock *)((long)tb1 & ~3);
759 if (n1 == n && tb1 == tb)
760 break;
761 if (n1 == 2) {
762 ptb = &tb1->jmp_first;
763 } else {
764 ptb = &tb1->jmp_next[n1];
765 }
766 }
767 /* now we can suppress tb(n) from the list */
768 *ptb = tb->jmp_next[n];
769
770 tb->jmp_next[n] = NULL;
771 }
772 }
773
774 /* reset the jump entry 'n' of a TB so that it is not chained to
775 another TB */
tb_reset_jump(TranslationBlock * tb,int n)776 static inline void tb_reset_jump(TranslationBlock *tb, int n)
777 {
778 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
779 }
780
tb_phys_invalidate(TranslationBlock * tb,target_ulong page_addr)781 void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
782 {
783 CPUState *env;
784 PageDesc *p;
785 unsigned int h, n1;
786 target_phys_addr_t phys_pc;
787 TranslationBlock *tb1, *tb2;
788
789 /* remove the TB from the hash list */
790 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
791 h = tb_phys_hash_func(phys_pc);
792 tb_remove(&tb_phys_hash[h], tb,
793 offsetof(TranslationBlock, phys_hash_next));
794
795 /* remove the TB from the page list */
796 if (tb->page_addr[0] != page_addr) {
797 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
798 tb_page_remove(&p->first_tb, tb);
799 invalidate_page_bitmap(p);
800 }
801 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
802 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
803 tb_page_remove(&p->first_tb, tb);
804 invalidate_page_bitmap(p);
805 }
806
807 tb_invalidated_flag = 1;
808
809 /* remove the TB from the hash list */
810 h = tb_jmp_cache_hash_func(tb->pc);
811 for(env = first_cpu; env != NULL; env = env->next_cpu) {
812 if (env->tb_jmp_cache[h] == tb)
813 env->tb_jmp_cache[h] = NULL;
814 }
815
816 /* suppress this TB from the two jump lists */
817 tb_jmp_remove(tb, 0);
818 tb_jmp_remove(tb, 1);
819
820 /* suppress any remaining jumps to this TB */
821 tb1 = tb->jmp_first;
822 for(;;) {
823 n1 = (long)tb1 & 3;
824 if (n1 == 2)
825 break;
826 tb1 = (TranslationBlock *)((long)tb1 & ~3);
827 tb2 = tb1->jmp_next[n1];
828 tb_reset_jump(tb1, n1);
829 tb1->jmp_next[n1] = NULL;
830 tb1 = tb2;
831 }
832 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
833
834 #ifdef CONFIG_MEMCHECK
835 if (tb->tpc2gpc != NULL) {
836 qemu_free(tb->tpc2gpc);
837 tb->tpc2gpc = NULL;
838 tb->tpc2gpc_pairs = 0;
839 }
840 #endif // CONFIG_MEMCHECK
841
842 tb_phys_invalidate_count++;
843 }
844
set_bits(uint8_t * tab,int start,int len)845 static inline void set_bits(uint8_t *tab, int start, int len)
846 {
847 int end, mask, end1;
848
849 end = start + len;
850 tab += start >> 3;
851 mask = 0xff << (start & 7);
852 if ((start & ~7) == (end & ~7)) {
853 if (start < end) {
854 mask &= ~(0xff << (end & 7));
855 *tab |= mask;
856 }
857 } else {
858 *tab++ |= mask;
859 start = (start + 8) & ~7;
860 end1 = end & ~7;
861 while (start < end1) {
862 *tab++ = 0xff;
863 start += 8;
864 }
865 if (start < end) {
866 mask = ~(0xff << (end & 7));
867 *tab |= mask;
868 }
869 }
870 }
871
build_page_bitmap(PageDesc * p)872 static void build_page_bitmap(PageDesc *p)
873 {
874 int n, tb_start, tb_end;
875 TranslationBlock *tb;
876
877 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
878
879 tb = p->first_tb;
880 while (tb != NULL) {
881 n = (long)tb & 3;
882 tb = (TranslationBlock *)((long)tb & ~3);
883 /* NOTE: this is subtle as a TB may span two physical pages */
884 if (n == 0) {
885 /* NOTE: tb_end may be after the end of the page, but
886 it is not a problem */
887 tb_start = tb->pc & ~TARGET_PAGE_MASK;
888 tb_end = tb_start + tb->size;
889 if (tb_end > TARGET_PAGE_SIZE)
890 tb_end = TARGET_PAGE_SIZE;
891 } else {
892 tb_start = 0;
893 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
894 }
895 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
896 tb = tb->page_next[n];
897 }
898 }
899
tb_gen_code(CPUState * env,target_ulong pc,target_ulong cs_base,int flags,int cflags)900 TranslationBlock *tb_gen_code(CPUState *env,
901 target_ulong pc, target_ulong cs_base,
902 int flags, int cflags)
903 {
904 TranslationBlock *tb;
905 uint8_t *tc_ptr;
906 target_ulong phys_pc, phys_page2, virt_page2;
907 int code_gen_size;
908
909 phys_pc = get_phys_addr_code(env, pc);
910 tb = tb_alloc(pc);
911 if (!tb) {
912 /* flush must be done */
913 tb_flush(env);
914 /* cannot fail at this point */
915 tb = tb_alloc(pc);
916 /* Don't forget to invalidate previous TB info. */
917 tb_invalidated_flag = 1;
918 }
919 tc_ptr = code_gen_ptr;
920 tb->tc_ptr = tc_ptr;
921 tb->cs_base = cs_base;
922 tb->flags = flags;
923 tb->cflags = cflags;
924 #ifdef CONFIG_TRACE
925 tb->bb_rec = NULL;
926 tb->prev_time = 0;
927 #endif
928 cpu_gen_code(env, tb, &code_gen_size);
929 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
930
931 /* check next page if needed */
932 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
933 phys_page2 = -1;
934 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
935 phys_page2 = get_phys_addr_code(env, virt_page2);
936 }
937 tb_link_phys(tb, phys_pc, phys_page2);
938 return tb;
939 }
940
941 /* invalidate all TBs which intersect with the target physical page
942 starting in range [start;end[. NOTE: start and end must refer to
943 the same physical page. 'is_cpu_write_access' should be true if called
944 from a real cpu write access: the virtual CPU will exit the current
945 TB if code is modified inside this TB. */
tb_invalidate_phys_page_range(target_phys_addr_t start,target_phys_addr_t end,int is_cpu_write_access)946 void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
947 int is_cpu_write_access)
948 {
949 TranslationBlock *tb, *tb_next, *saved_tb;
950 CPUState *env = cpu_single_env;
951 target_ulong tb_start, tb_end;
952 PageDesc *p;
953 int n;
954 #ifdef TARGET_HAS_PRECISE_SMC
955 int current_tb_not_found = is_cpu_write_access;
956 TranslationBlock *current_tb = NULL;
957 int current_tb_modified = 0;
958 target_ulong current_pc = 0;
959 target_ulong current_cs_base = 0;
960 int current_flags = 0;
961 #endif /* TARGET_HAS_PRECISE_SMC */
962
963 p = page_find(start >> TARGET_PAGE_BITS);
964 if (!p)
965 return;
966 if (!p->code_bitmap &&
967 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
968 is_cpu_write_access) {
969 /* build code bitmap */
970 build_page_bitmap(p);
971 }
972
973 /* we remove all the TBs in the range [start, end[ */
974 /* XXX: see if in some cases it could be faster to invalidate all the code */
975 tb = p->first_tb;
976 while (tb != NULL) {
977 n = (long)tb & 3;
978 tb = (TranslationBlock *)((long)tb & ~3);
979 tb_next = tb->page_next[n];
980 /* NOTE: this is subtle as a TB may span two physical pages */
981 if (n == 0) {
982 /* NOTE: tb_end may be after the end of the page, but
983 it is not a problem */
984 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
985 tb_end = tb_start + tb->size;
986 } else {
987 tb_start = tb->page_addr[1];
988 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
989 }
990 if (!(tb_end <= start || tb_start >= end)) {
991 #ifdef TARGET_HAS_PRECISE_SMC
992 if (current_tb_not_found) {
993 current_tb_not_found = 0;
994 current_tb = NULL;
995 if (env->mem_io_pc) {
996 /* now we have a real cpu fault */
997 current_tb = tb_find_pc(env->mem_io_pc);
998 }
999 }
1000 if (current_tb == tb &&
1001 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1002 /* If we are modifying the current TB, we must stop
1003 its execution. We could be more precise by checking
1004 that the modification is after the current PC, but it
1005 would require a specialized function to partially
1006 restore the CPU state */
1007
1008 current_tb_modified = 1;
1009 cpu_restore_state(current_tb, env, env->mem_io_pc);
1010 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1011 ¤t_flags);
1012 }
1013 #endif /* TARGET_HAS_PRECISE_SMC */
1014 /* we need to do that to handle the case where a signal
1015 occurs while doing tb_phys_invalidate() */
1016 saved_tb = NULL;
1017 if (env) {
1018 saved_tb = env->current_tb;
1019 env->current_tb = NULL;
1020 }
1021 tb_phys_invalidate(tb, -1);
1022 if (env) {
1023 env->current_tb = saved_tb;
1024 if (env->interrupt_request && env->current_tb)
1025 cpu_interrupt(env, env->interrupt_request);
1026 }
1027 }
1028 tb = tb_next;
1029 }
1030 #if !defined(CONFIG_USER_ONLY)
1031 /* if no code remaining, no need to continue to use slow writes */
1032 if (!p->first_tb) {
1033 invalidate_page_bitmap(p);
1034 if (is_cpu_write_access) {
1035 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1036 }
1037 }
1038 #endif
1039 #ifdef TARGET_HAS_PRECISE_SMC
1040 if (current_tb_modified) {
1041 /* we generate a block containing just the instruction
1042 modifying the memory. It will ensure that it cannot modify
1043 itself */
1044 env->current_tb = NULL;
1045 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1046 cpu_resume_from_signal(env, NULL);
1047 }
1048 #endif
1049 }
1050
1051 /* len must be <= 8 and start must be a multiple of len */
tb_invalidate_phys_page_fast(target_phys_addr_t start,int len)1052 static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1053 {
1054 PageDesc *p;
1055 int offset, b;
1056 #if 0
1057 if (1) {
1058 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1059 cpu_single_env->mem_io_vaddr, len,
1060 cpu_single_env->eip,
1061 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1062 }
1063 #endif
1064 p = page_find(start >> TARGET_PAGE_BITS);
1065 if (!p)
1066 return;
1067 if (p->code_bitmap) {
1068 offset = start & ~TARGET_PAGE_MASK;
1069 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1070 if (b & ((1 << len) - 1))
1071 goto do_invalidate;
1072 } else {
1073 do_invalidate:
1074 tb_invalidate_phys_page_range(start, start + len, 1);
1075 }
1076 }
1077
1078 #if !defined(CONFIG_SOFTMMU)
tb_invalidate_phys_page(target_phys_addr_t addr,unsigned long pc,void * puc)1079 static void tb_invalidate_phys_page(target_phys_addr_t addr,
1080 unsigned long pc, void *puc)
1081 {
1082 TranslationBlock *tb;
1083 PageDesc *p;
1084 int n;
1085 #ifdef TARGET_HAS_PRECISE_SMC
1086 TranslationBlock *current_tb = NULL;
1087 CPUState *env = cpu_single_env;
1088 int current_tb_modified = 0;
1089 target_ulong current_pc = 0;
1090 target_ulong current_cs_base = 0;
1091 int current_flags = 0;
1092 #endif
1093
1094 addr &= TARGET_PAGE_MASK;
1095 p = page_find(addr >> TARGET_PAGE_BITS);
1096 if (!p)
1097 return;
1098 tb = p->first_tb;
1099 #ifdef TARGET_HAS_PRECISE_SMC
1100 if (tb && pc != 0) {
1101 current_tb = tb_find_pc(pc);
1102 }
1103 #endif
1104 while (tb != NULL) {
1105 n = (long)tb & 3;
1106 tb = (TranslationBlock *)((long)tb & ~3);
1107 #ifdef TARGET_HAS_PRECISE_SMC
1108 if (current_tb == tb &&
1109 (current_tb->cflags & CF_COUNT_MASK) != 1) {
1110 /* If we are modifying the current TB, we must stop
1111 its execution. We could be more precise by checking
1112 that the modification is after the current PC, but it
1113 would require a specialized function to partially
1114 restore the CPU state */
1115
1116 current_tb_modified = 1;
1117 cpu_restore_state(current_tb, env, pc);
1118 cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base,
1119 ¤t_flags);
1120 }
1121 #endif /* TARGET_HAS_PRECISE_SMC */
1122 tb_phys_invalidate(tb, addr);
1123 tb = tb->page_next[n];
1124 }
1125 p->first_tb = NULL;
1126 #ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
1131 env->current_tb = NULL;
1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1133 cpu_resume_from_signal(env, puc);
1134 }
1135 #endif
1136 }
1137 #endif
1138
1139 /* add the tb in the target page and protect it if necessary */
tb_alloc_page(TranslationBlock * tb,unsigned int n,target_ulong page_addr)1140 static inline void tb_alloc_page(TranslationBlock *tb,
1141 unsigned int n, target_ulong page_addr)
1142 {
1143 PageDesc *p;
1144 TranslationBlock *last_first_tb;
1145
1146 tb->page_addr[n] = page_addr;
1147 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1148 tb->page_next[n] = p->first_tb;
1149 last_first_tb = p->first_tb;
1150 p->first_tb = (TranslationBlock *)((long)tb | n);
1151 invalidate_page_bitmap(p);
1152
1153 #if defined(TARGET_HAS_SMC) || 1
1154
1155 #if defined(CONFIG_USER_ONLY)
1156 if (p->flags & PAGE_WRITE) {
1157 target_ulong addr;
1158 PageDesc *p2;
1159 int prot;
1160
1161 /* force the host page as non writable (writes will have a
1162 page fault + mprotect overhead) */
1163 page_addr &= qemu_host_page_mask;
1164 prot = 0;
1165 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1166 addr += TARGET_PAGE_SIZE) {
1167
1168 p2 = page_find (addr >> TARGET_PAGE_BITS);
1169 if (!p2)
1170 continue;
1171 prot |= p2->flags;
1172 p2->flags &= ~PAGE_WRITE;
1173 page_get_flags(addr);
1174 }
1175 mprotect(g2h(page_addr), qemu_host_page_size,
1176 (prot & PAGE_BITS) & ~PAGE_WRITE);
1177 #ifdef DEBUG_TB_INVALIDATE
1178 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1179 page_addr);
1180 #endif
1181 }
1182 #else
1183 /* if some code is already present, then the pages are already
1184 protected. So we handle the case where only the first TB is
1185 allocated in a physical page */
1186 if (!last_first_tb) {
1187 tlb_protect_code(page_addr);
1188 }
1189 #endif
1190
1191 #endif /* TARGET_HAS_SMC */
1192 }
1193
1194 /* Allocate a new translation block. Flush the translation buffer if
1195 too many translation blocks or too much generated code. */
tb_alloc(target_ulong pc)1196 TranslationBlock *tb_alloc(target_ulong pc)
1197 {
1198 TranslationBlock *tb;
1199
1200 if (nb_tbs >= code_gen_max_blocks ||
1201 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1202 return NULL;
1203 tb = &tbs[nb_tbs++];
1204 tb->pc = pc;
1205 tb->cflags = 0;
1206 #ifdef CONFIG_MEMCHECK
1207 tb->tpc2gpc = NULL;
1208 tb->tpc2gpc_pairs = 0;
1209 #endif // CONFIG_MEMCHECK
1210 return tb;
1211 }
1212
tb_free(TranslationBlock * tb)1213 void tb_free(TranslationBlock *tb)
1214 {
1215 /* In practice this is mostly used for single use temporary TB
1216 Ignore the hard cases and just back up if this TB happens to
1217 be the last one generated. */
1218 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1219 code_gen_ptr = tb->tc_ptr;
1220 nb_tbs--;
1221 }
1222 }
1223
1224 /* add a new TB and link it to the physical page tables. phys_page2 is
1225 (-1) to indicate that only one page contains the TB. */
tb_link_phys(TranslationBlock * tb,target_ulong phys_pc,target_ulong phys_page2)1226 void tb_link_phys(TranslationBlock *tb,
1227 target_ulong phys_pc, target_ulong phys_page2)
1228 {
1229 unsigned int h;
1230 TranslationBlock **ptb;
1231
1232 /* Grab the mmap lock to stop another thread invalidating this TB
1233 before we are done. */
1234 mmap_lock();
1235 /* add in the physical hash table */
1236 h = tb_phys_hash_func(phys_pc);
1237 ptb = &tb_phys_hash[h];
1238 tb->phys_hash_next = *ptb;
1239 *ptb = tb;
1240
1241 /* add in the page list */
1242 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1243 if (phys_page2 != -1)
1244 tb_alloc_page(tb, 1, phys_page2);
1245 else
1246 tb->page_addr[1] = -1;
1247
1248 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1249 tb->jmp_next[0] = NULL;
1250 tb->jmp_next[1] = NULL;
1251
1252 /* init original jump addresses */
1253 if (tb->tb_next_offset[0] != 0xffff)
1254 tb_reset_jump(tb, 0);
1255 if (tb->tb_next_offset[1] != 0xffff)
1256 tb_reset_jump(tb, 1);
1257
1258 #ifdef DEBUG_TB_CHECK
1259 tb_page_check();
1260 #endif
1261 mmap_unlock();
1262 }
1263
1264 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1265 tb[1].tc_ptr. Return NULL if not found */
tb_find_pc(unsigned long tc_ptr)1266 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1267 {
1268 int m_min, m_max, m;
1269 unsigned long v;
1270 TranslationBlock *tb;
1271
1272 if (nb_tbs <= 0)
1273 return NULL;
1274 if (tc_ptr < (unsigned long)code_gen_buffer ||
1275 tc_ptr >= (unsigned long)code_gen_ptr)
1276 return NULL;
1277 /* binary search (cf Knuth) */
1278 m_min = 0;
1279 m_max = nb_tbs - 1;
1280 while (m_min <= m_max) {
1281 m = (m_min + m_max) >> 1;
1282 tb = &tbs[m];
1283 v = (unsigned long)tb->tc_ptr;
1284 if (v == tc_ptr)
1285 return tb;
1286 else if (tc_ptr < v) {
1287 m_max = m - 1;
1288 } else {
1289 m_min = m + 1;
1290 }
1291 }
1292 return &tbs[m_max];
1293 }
1294
1295 static void tb_reset_jump_recursive(TranslationBlock *tb);
1296
tb_reset_jump_recursive2(TranslationBlock * tb,int n)1297 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1298 {
1299 TranslationBlock *tb1, *tb_next, **ptb;
1300 unsigned int n1;
1301
1302 tb1 = tb->jmp_next[n];
1303 if (tb1 != NULL) {
1304 /* find head of list */
1305 for(;;) {
1306 n1 = (long)tb1 & 3;
1307 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1308 if (n1 == 2)
1309 break;
1310 tb1 = tb1->jmp_next[n1];
1311 }
1312 /* we are now sure now that tb jumps to tb1 */
1313 tb_next = tb1;
1314
1315 /* remove tb from the jmp_first list */
1316 ptb = &tb_next->jmp_first;
1317 for(;;) {
1318 tb1 = *ptb;
1319 n1 = (long)tb1 & 3;
1320 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1321 if (n1 == n && tb1 == tb)
1322 break;
1323 ptb = &tb1->jmp_next[n1];
1324 }
1325 *ptb = tb->jmp_next[n];
1326 tb->jmp_next[n] = NULL;
1327
1328 /* suppress the jump to next tb in generated code */
1329 tb_reset_jump(tb, n);
1330
1331 /* suppress jumps in the tb on which we could have jumped */
1332 tb_reset_jump_recursive(tb_next);
1333 }
1334 }
1335
tb_reset_jump_recursive(TranslationBlock * tb)1336 static void tb_reset_jump_recursive(TranslationBlock *tb)
1337 {
1338 tb_reset_jump_recursive2(tb, 0);
1339 tb_reset_jump_recursive2(tb, 1);
1340 }
1341
1342 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUState * env,target_ulong pc)1343 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1344 {
1345 target_phys_addr_t addr;
1346 target_ulong pd;
1347 ram_addr_t ram_addr;
1348 PhysPageDesc *p;
1349
1350 addr = cpu_get_phys_page_debug(env, pc);
1351 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1352 if (!p) {
1353 pd = IO_MEM_UNASSIGNED;
1354 } else {
1355 pd = p->phys_offset;
1356 }
1357 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1358 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1359 }
1360 #endif
1361
1362 /* Add a watchpoint. */
cpu_watchpoint_insert(CPUState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)1363 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1364 int flags, CPUWatchpoint **watchpoint)
1365 {
1366 target_ulong len_mask = ~(len - 1);
1367 CPUWatchpoint *wp;
1368
1369 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1370 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1371 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1372 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1373 return -EINVAL;
1374 }
1375 wp = qemu_malloc(sizeof(*wp));
1376
1377 wp->vaddr = addr;
1378 wp->len_mask = len_mask;
1379 wp->flags = flags;
1380
1381 /* keep all GDB-injected watchpoints in front */
1382 if (flags & BP_GDB)
1383 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1384 else
1385 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1386
1387 tlb_flush_page(env, addr);
1388
1389 if (watchpoint)
1390 *watchpoint = wp;
1391 return 0;
1392 }
1393
1394 /* Remove a specific watchpoint. */
cpu_watchpoint_remove(CPUState * env,target_ulong addr,target_ulong len,int flags)1395 int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1396 int flags)
1397 {
1398 target_ulong len_mask = ~(len - 1);
1399 CPUWatchpoint *wp;
1400
1401 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1402 if (addr == wp->vaddr && len_mask == wp->len_mask
1403 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1404 cpu_watchpoint_remove_by_ref(env, wp);
1405 return 0;
1406 }
1407 }
1408 return -ENOENT;
1409 }
1410
1411 /* Remove a specific watchpoint by reference. */
cpu_watchpoint_remove_by_ref(CPUState * env,CPUWatchpoint * watchpoint)1412 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1413 {
1414 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1415
1416 tlb_flush_page(env, watchpoint->vaddr);
1417
1418 qemu_free(watchpoint);
1419 }
1420
1421 /* Remove all matching watchpoints. */
cpu_watchpoint_remove_all(CPUState * env,int mask)1422 void cpu_watchpoint_remove_all(CPUState *env, int mask)
1423 {
1424 CPUWatchpoint *wp, *next;
1425
1426 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1427 if (wp->flags & mask)
1428 cpu_watchpoint_remove_by_ref(env, wp);
1429 }
1430 }
1431
1432 /* Add a breakpoint. */
cpu_breakpoint_insert(CPUState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)1433 int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1434 CPUBreakpoint **breakpoint)
1435 {
1436 #if defined(TARGET_HAS_ICE)
1437 CPUBreakpoint *bp;
1438
1439 bp = qemu_malloc(sizeof(*bp));
1440
1441 bp->pc = pc;
1442 bp->flags = flags;
1443
1444 /* keep all GDB-injected breakpoints in front */
1445 if (flags & BP_GDB)
1446 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1447 else
1448 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1449
1450 breakpoint_invalidate(env, pc);
1451
1452 if (breakpoint)
1453 *breakpoint = bp;
1454 return 0;
1455 #else
1456 return -ENOSYS;
1457 #endif
1458 }
1459
1460 /* Remove a specific breakpoint. */
cpu_breakpoint_remove(CPUState * env,target_ulong pc,int flags)1461 int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1462 {
1463 #if defined(TARGET_HAS_ICE)
1464 CPUBreakpoint *bp;
1465
1466 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1467 if (bp->pc == pc && bp->flags == flags) {
1468 cpu_breakpoint_remove_by_ref(env, bp);
1469 return 0;
1470 }
1471 }
1472 return -ENOENT;
1473 #else
1474 return -ENOSYS;
1475 #endif
1476 }
1477
1478 /* Remove a specific breakpoint by reference. */
cpu_breakpoint_remove_by_ref(CPUState * env,CPUBreakpoint * breakpoint)1479 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1480 {
1481 #if defined(TARGET_HAS_ICE)
1482 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1483
1484 breakpoint_invalidate(env, breakpoint->pc);
1485
1486 qemu_free(breakpoint);
1487 #endif
1488 }
1489
1490 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUState * env,int mask)1491 void cpu_breakpoint_remove_all(CPUState *env, int mask)
1492 {
1493 #if defined(TARGET_HAS_ICE)
1494 CPUBreakpoint *bp, *next;
1495
1496 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1497 if (bp->flags & mask)
1498 cpu_breakpoint_remove_by_ref(env, bp);
1499 }
1500 #endif
1501 }
1502
1503 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1504 CPU loop after each instruction */
cpu_single_step(CPUState * env,int enabled)1505 void cpu_single_step(CPUState *env, int enabled)
1506 {
1507 #if defined(TARGET_HAS_ICE)
1508 if (env->singlestep_enabled != enabled) {
1509 env->singlestep_enabled = enabled;
1510 if (kvm_enabled())
1511 kvm_update_guest_debug(env, 0);
1512 else {
1513 /* must flush all the translated code to avoid inconsistencies */
1514 /* XXX: only flush what is necessary */
1515 tb_flush(env);
1516 }
1517 }
1518 #endif
1519 }
1520
1521 /* enable or disable low levels log */
cpu_set_log(int log_flags)1522 void cpu_set_log(int log_flags)
1523 {
1524 loglevel = log_flags;
1525 if (loglevel && !logfile) {
1526 logfile = fopen(logfilename, log_append ? "a" : "w");
1527 if (!logfile) {
1528 perror(logfilename);
1529 _exit(1);
1530 }
1531 #if !defined(CONFIG_SOFTMMU)
1532 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1533 {
1534 static char logfile_buf[4096];
1535 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1536 }
1537 #elif !defined(_WIN32)
1538 /* Win32 doesn't support line-buffering and requires size >= 2 */
1539 setvbuf(logfile, NULL, _IOLBF, 0);
1540 #endif
1541 log_append = 1;
1542 }
1543 if (!loglevel && logfile) {
1544 fclose(logfile);
1545 logfile = NULL;
1546 }
1547 }
1548
cpu_set_log_filename(const char * filename)1549 void cpu_set_log_filename(const char *filename)
1550 {
1551 logfilename = strdup(filename);
1552 if (logfile) {
1553 fclose(logfile);
1554 logfile = NULL;
1555 }
1556 cpu_set_log(loglevel);
1557 }
1558
cpu_unlink_tb(CPUState * env)1559 static void cpu_unlink_tb(CPUState *env)
1560 {
1561 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1562 problem and hope the cpu will stop of its own accord. For userspace
1563 emulation this often isn't actually as bad as it sounds. Often
1564 signals are used primarily to interrupt blocking syscalls. */
1565 TranslationBlock *tb;
1566 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1567
1568 spin_lock(&interrupt_lock);
1569 tb = env->current_tb;
1570 /* if the cpu is currently executing code, we must unlink it and
1571 all the potentially executing TB */
1572 if (tb) {
1573 env->current_tb = NULL;
1574 tb_reset_jump_recursive(tb);
1575 }
1576 spin_unlock(&interrupt_lock);
1577 }
1578
1579 /* mask must never be zero, except for A20 change call */
cpu_interrupt(CPUState * env,int mask)1580 void cpu_interrupt(CPUState *env, int mask)
1581 {
1582 int old_mask;
1583
1584 old_mask = env->interrupt_request;
1585 env->interrupt_request |= mask;
1586
1587 #ifndef CONFIG_USER_ONLY
1588 /*
1589 * If called from iothread context, wake the target cpu in
1590 * case its halted.
1591 */
1592 if (!qemu_cpu_self(env)) {
1593 qemu_cpu_kick(env);
1594 return;
1595 }
1596 #endif
1597
1598 if (use_icount) {
1599 env->icount_decr.u16.high = 0xffff;
1600 #ifndef CONFIG_USER_ONLY
1601 if (!can_do_io(env)
1602 && (mask & ~old_mask) != 0) {
1603 cpu_abort(env, "Raised interrupt while not in I/O function");
1604 }
1605 #endif
1606 } else {
1607 cpu_unlink_tb(env);
1608 }
1609 }
1610
cpu_reset_interrupt(CPUState * env,int mask)1611 void cpu_reset_interrupt(CPUState *env, int mask)
1612 {
1613 env->interrupt_request &= ~mask;
1614 }
1615
cpu_exit(CPUState * env)1616 void cpu_exit(CPUState *env)
1617 {
1618 env->exit_request = 1;
1619 cpu_unlink_tb(env);
1620 }
1621
1622 const CPULogItem cpu_log_items[] = {
1623 { CPU_LOG_TB_OUT_ASM, "out_asm",
1624 "show generated host assembly code for each compiled TB" },
1625 { CPU_LOG_TB_IN_ASM, "in_asm",
1626 "show target assembly code for each compiled TB" },
1627 { CPU_LOG_TB_OP, "op",
1628 "show micro ops for each compiled TB" },
1629 { CPU_LOG_TB_OP_OPT, "op_opt",
1630 "show micro ops "
1631 #ifdef TARGET_I386
1632 "before eflags optimization and "
1633 #endif
1634 "after liveness analysis" },
1635 { CPU_LOG_INT, "int",
1636 "show interrupts/exceptions in short format" },
1637 { CPU_LOG_EXEC, "exec",
1638 "show trace before each executed TB (lots of logs)" },
1639 { CPU_LOG_TB_CPU, "cpu",
1640 "show CPU state before block translation" },
1641 #ifdef TARGET_I386
1642 { CPU_LOG_PCALL, "pcall",
1643 "show protected mode far calls/returns/exceptions" },
1644 { CPU_LOG_RESET, "cpu_reset",
1645 "show CPU state before CPU resets" },
1646 #endif
1647 #ifdef DEBUG_IOPORT
1648 { CPU_LOG_IOPORT, "ioport",
1649 "show all i/o ports accesses" },
1650 #endif
1651 { 0, NULL, NULL },
1652 };
1653
cmp1(const char * s1,int n,const char * s2)1654 static int cmp1(const char *s1, int n, const char *s2)
1655 {
1656 if (strlen(s2) != n)
1657 return 0;
1658 return memcmp(s1, s2, n) == 0;
1659 }
1660
1661 /* takes a comma separated list of log masks. Return 0 if error. */
cpu_str_to_log_mask(const char * str)1662 int cpu_str_to_log_mask(const char *str)
1663 {
1664 const CPULogItem *item;
1665 int mask;
1666 const char *p, *p1;
1667
1668 p = str;
1669 mask = 0;
1670 for(;;) {
1671 p1 = strchr(p, ',');
1672 if (!p1)
1673 p1 = p + strlen(p);
1674 if(cmp1(p,p1-p,"all")) {
1675 for(item = cpu_log_items; item->mask != 0; item++) {
1676 mask |= item->mask;
1677 }
1678 } else {
1679 for(item = cpu_log_items; item->mask != 0; item++) {
1680 if (cmp1(p, p1 - p, item->name))
1681 goto found;
1682 }
1683 return 0;
1684 }
1685 found:
1686 mask |= item->mask;
1687 if (*p1 != ',')
1688 break;
1689 p = p1 + 1;
1690 }
1691 return mask;
1692 }
1693
cpu_abort(CPUState * env,const char * fmt,...)1694 void cpu_abort(CPUState *env, const char *fmt, ...)
1695 {
1696 va_list ap;
1697 va_list ap2;
1698
1699 va_start(ap, fmt);
1700 va_copy(ap2, ap);
1701 fprintf(stderr, "qemu: fatal: ");
1702 vfprintf(stderr, fmt, ap);
1703 fprintf(stderr, "\n");
1704 #ifdef TARGET_I386
1705 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1706 #else
1707 cpu_dump_state(env, stderr, fprintf, 0);
1708 #endif
1709 if (qemu_log_enabled()) {
1710 qemu_log("qemu: fatal: ");
1711 qemu_log_vprintf(fmt, ap2);
1712 qemu_log("\n");
1713 #ifdef TARGET_I386
1714 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1715 #else
1716 log_cpu_state(env, 0);
1717 #endif
1718 qemu_log_flush();
1719 qemu_log_close();
1720 }
1721 va_end(ap2);
1722 va_end(ap);
1723 #if defined(CONFIG_USER_ONLY)
1724 {
1725 struct sigaction act;
1726 sigfillset(&act.sa_mask);
1727 act.sa_handler = SIG_DFL;
1728 sigaction(SIGABRT, &act, NULL);
1729 }
1730 #endif
1731 abort();
1732 }
1733
cpu_copy(CPUState * env)1734 CPUState *cpu_copy(CPUState *env)
1735 {
1736 CPUState *new_env = cpu_init(env->cpu_model_str);
1737 CPUState *next_cpu = new_env->next_cpu;
1738 int cpu_index = new_env->cpu_index;
1739 #if defined(TARGET_HAS_ICE)
1740 CPUBreakpoint *bp;
1741 CPUWatchpoint *wp;
1742 #endif
1743
1744 memcpy(new_env, env, sizeof(CPUState));
1745
1746 /* Preserve chaining and index. */
1747 new_env->next_cpu = next_cpu;
1748 new_env->cpu_index = cpu_index;
1749
1750 /* Clone all break/watchpoints.
1751 Note: Once we support ptrace with hw-debug register access, make sure
1752 BP_CPU break/watchpoints are handled correctly on clone. */
1753 QTAILQ_INIT(&env->breakpoints);
1754 QTAILQ_INIT(&env->watchpoints);
1755 #if defined(TARGET_HAS_ICE)
1756 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1757 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1758 }
1759 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1760 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1761 wp->flags, NULL);
1762 }
1763 #endif
1764
1765 return new_env;
1766 }
1767
1768 #if !defined(CONFIG_USER_ONLY)
1769
tlb_flush_jmp_cache(CPUState * env,target_ulong addr)1770 static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1771 {
1772 unsigned int i;
1773
1774 /* Discard jump cache entries for any tb which might potentially
1775 overlap the flushed page. */
1776 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1777 memset (&env->tb_jmp_cache[i], 0,
1778 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1779
1780 i = tb_jmp_cache_hash_page(addr);
1781 memset (&env->tb_jmp_cache[i], 0,
1782 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1783 }
1784
1785 /* NOTE: if flush_global is true, also flush global entries (not
1786 implemented yet) */
tlb_flush(CPUState * env,int flush_global)1787 void tlb_flush(CPUState *env, int flush_global)
1788 {
1789 int i;
1790
1791 #if defined(DEBUG_TLB)
1792 printf("tlb_flush:\n");
1793 #endif
1794 /* must reset current TB so that interrupts cannot modify the
1795 links while we are modifying them */
1796 env->current_tb = NULL;
1797
1798 for(i = 0; i < CPU_TLB_SIZE; i++) {
1799 int mmu_idx;
1800 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1801 env->tlb_table[mmu_idx][i].addr_read = -1;
1802 env->tlb_table[mmu_idx][i].addr_write = -1;
1803 env->tlb_table[mmu_idx][i].addr_code = -1;
1804 }
1805 }
1806
1807 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1808
1809 #ifdef CONFIG_KQEMU
1810 if (env->kqemu_enabled) {
1811 kqemu_flush(env, flush_global);
1812 }
1813 #endif
1814 tlb_flush_count++;
1815 }
1816
tlb_flush_entry(CPUTLBEntry * tlb_entry,target_ulong addr)1817 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1818 {
1819 if (addr == (tlb_entry->addr_read &
1820 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1821 addr == (tlb_entry->addr_write &
1822 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1823 addr == (tlb_entry->addr_code &
1824 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1825 tlb_entry->addr_read = -1;
1826 tlb_entry->addr_write = -1;
1827 tlb_entry->addr_code = -1;
1828 }
1829 }
1830
tlb_flush_page(CPUState * env,target_ulong addr)1831 void tlb_flush_page(CPUState *env, target_ulong addr)
1832 {
1833 int i;
1834 int mmu_idx;
1835
1836 #if defined(DEBUG_TLB)
1837 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1838 #endif
1839 /* must reset current TB so that interrupts cannot modify the
1840 links while we are modifying them */
1841 env->current_tb = NULL;
1842
1843 addr &= TARGET_PAGE_MASK;
1844 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1845 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1846 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1847
1848 tlb_flush_jmp_cache(env, addr);
1849 }
1850
1851 /* update the TLBs so that writes to code in the virtual page 'addr'
1852 can be detected */
tlb_protect_code(ram_addr_t ram_addr)1853 static void tlb_protect_code(ram_addr_t ram_addr)
1854 {
1855 cpu_physical_memory_reset_dirty(ram_addr,
1856 ram_addr + TARGET_PAGE_SIZE,
1857 CODE_DIRTY_FLAG);
1858 }
1859
1860 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1861 tested for self modifying code */
tlb_unprotect_code_phys(CPUState * env,ram_addr_t ram_addr,target_ulong vaddr)1862 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1863 target_ulong vaddr)
1864 {
1865 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1866 }
1867
tlb_reset_dirty_range(CPUTLBEntry * tlb_entry,unsigned long start,unsigned long length)1868 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1869 unsigned long start, unsigned long length)
1870 {
1871 unsigned long addr;
1872 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1873 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1874 if ((addr - start) < length) {
1875 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1876 }
1877 }
1878 }
1879
1880 /* Note: start and end must be within the same ram block. */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)1881 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1882 int dirty_flags)
1883 {
1884 CPUState *env;
1885 unsigned long length, start1;
1886 int i;
1887
1888 start &= TARGET_PAGE_MASK;
1889 end = TARGET_PAGE_ALIGN(end);
1890
1891 length = end - start;
1892 if (length == 0)
1893 return;
1894 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1895
1896 /* we modify the TLB cache so that the dirty bit will be set again
1897 when accessing the range */
1898 start1 = (unsigned long)qemu_safe_ram_ptr(start);
1899 /* Chek that we don't span multiple blocks - this breaks the
1900 address comparisons below. */
1901 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
1902 != (end - 1) - start) {
1903 abort();
1904 }
1905
1906 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1907 int mmu_idx;
1908 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1909 for(i = 0; i < CPU_TLB_SIZE; i++)
1910 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1911 start1, length);
1912 }
1913 }
1914 }
1915
cpu_physical_memory_set_dirty_tracking(int enable)1916 int cpu_physical_memory_set_dirty_tracking(int enable)
1917 {
1918 in_migration = enable;
1919 if (kvm_enabled()) {
1920 return kvm_set_migration_log(enable);
1921 }
1922 return 0;
1923 }
1924
cpu_physical_memory_get_dirty_tracking(void)1925 int cpu_physical_memory_get_dirty_tracking(void)
1926 {
1927 return in_migration;
1928 }
1929
cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,target_phys_addr_t end_addr)1930 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1931 target_phys_addr_t end_addr)
1932 {
1933 int ret = 0;
1934
1935 if (kvm_enabled())
1936 ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1937 return ret;
1938 }
1939
tlb_update_dirty(CPUTLBEntry * tlb_entry)1940 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1941 {
1942 ram_addr_t ram_addr;
1943 void *p;
1944
1945 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1946 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1947 + tlb_entry->addend);
1948 ram_addr = qemu_ram_addr_from_host_nofail(p);
1949 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1950 tlb_entry->addr_write |= TLB_NOTDIRTY;
1951 }
1952 }
1953 }
1954
1955 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUState * env)1956 void cpu_tlb_update_dirty(CPUState *env)
1957 {
1958 int i;
1959 int mmu_idx;
1960 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1961 for(i = 0; i < CPU_TLB_SIZE; i++)
1962 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1963 }
1964 }
1965
tlb_set_dirty1(CPUTLBEntry * tlb_entry,target_ulong vaddr)1966 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1967 {
1968 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1969 tlb_entry->addr_write = vaddr;
1970 }
1971
1972 /* update the TLB corresponding to virtual page vaddr
1973 so that it is no longer dirty */
tlb_set_dirty(CPUState * env,target_ulong vaddr)1974 static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1975 {
1976 int i;
1977 int mmu_idx;
1978
1979 vaddr &= TARGET_PAGE_MASK;
1980 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1981 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1982 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1983 }
1984
1985 /* add a new TLB entry. At most one entry for a given virtual address
1986 is permitted. Return 0 if OK or 2 if the page could not be mapped
1987 (can only happen in non SOFTMMU mode for I/O pages or pages
1988 conflicting with the host address space). */
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)1989 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1990 target_phys_addr_t paddr, int prot,
1991 int mmu_idx, int is_softmmu)
1992 {
1993 PhysPageDesc *p;
1994 unsigned long pd;
1995 unsigned int index;
1996 target_ulong address;
1997 target_ulong code_address;
1998 ptrdiff_t addend;
1999 int ret;
2000 CPUTLBEntry *te;
2001 CPUWatchpoint *wp;
2002 target_phys_addr_t iotlb;
2003
2004 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2005 if (!p) {
2006 pd = IO_MEM_UNASSIGNED;
2007 } else {
2008 pd = p->phys_offset;
2009 }
2010 #if defined(DEBUG_TLB)
2011 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2012 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2013 #endif
2014
2015 ret = 0;
2016 address = vaddr;
2017 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2018 /* IO memory case (romd handled later) */
2019 address |= TLB_MMIO;
2020 }
2021 addend = (ptrdiff_t)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2022 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2023 /* Normal RAM. */
2024 iotlb = pd & TARGET_PAGE_MASK;
2025 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2026 iotlb |= IO_MEM_NOTDIRTY;
2027 else
2028 iotlb |= IO_MEM_ROM;
2029 } else {
2030 /* IO handlers are currently passed a physical address.
2031 It would be nice to pass an offset from the base address
2032 of that region. This would avoid having to special case RAM,
2033 and avoid full address decoding in every device.
2034 We can't use the high bits of pd for this because
2035 IO_MEM_ROMD uses these as a ram address. */
2036 iotlb = (pd & ~TARGET_PAGE_MASK);
2037 if (p) {
2038 iotlb += p->region_offset;
2039 } else {
2040 iotlb += paddr;
2041 }
2042 }
2043
2044 code_address = address;
2045 /* Make accesses to pages with watchpoints go via the
2046 watchpoint trap routines. */
2047 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2048 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2049 iotlb = io_mem_watch + paddr;
2050 /* TODO: The memory case can be optimized by not trapping
2051 reads of pages with a write breakpoint. */
2052 address |= TLB_MMIO;
2053 }
2054 }
2055
2056 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2057 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2058 te = &env->tlb_table[mmu_idx][index];
2059 te->addend = addend - vaddr;
2060 if (prot & PAGE_READ) {
2061 te->addr_read = address;
2062 } else {
2063 te->addr_read = -1;
2064 }
2065
2066 if (prot & PAGE_EXEC) {
2067 te->addr_code = code_address;
2068 } else {
2069 te->addr_code = -1;
2070 }
2071 if (prot & PAGE_WRITE) {
2072 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2073 (pd & IO_MEM_ROMD)) {
2074 /* Write access calls the I/O callback. */
2075 te->addr_write = address | TLB_MMIO;
2076 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2077 !cpu_physical_memory_is_dirty(pd)) {
2078 te->addr_write = address | TLB_NOTDIRTY;
2079 } else {
2080 te->addr_write = address;
2081 }
2082 } else {
2083 te->addr_write = -1;
2084 }
2085
2086 #ifdef CONFIG_MEMCHECK
2087 /*
2088 * If we have memchecker running, we need to make sure that page, cached
2089 * into TLB as the result of this operation will comply with our requirement
2090 * to cause __ld/__stx_mmu being called for memory access on the pages
2091 * containing memory blocks that require access violation checks.
2092 *
2093 * We need to check with memory checker if we should invalidate this page
2094 * iff:
2095 * - Memchecking is enabled.
2096 * - Page that's been cached belongs to the user space.
2097 * - Request to cache this page didn't come from softmmu. We're covered
2098 * there, because after page was cached here we will invalidate it in
2099 * the __ld/__stx_mmu wrapper.
2100 * - Cached page belongs to RAM, not I/O area.
2101 * - Page is cached for read, or write access.
2102 */
2103 if (memcheck_instrument_mmu && mmu_idx == 1 && !is_softmmu &&
2104 (pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2105 (prot & (PAGE_READ | PAGE_WRITE)) &&
2106 memcheck_is_checked(vaddr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE)) {
2107 if (prot & PAGE_READ) {
2108 te->addr_read ^= TARGET_PAGE_MASK;
2109 }
2110 if (prot & PAGE_WRITE) {
2111 te->addr_write ^= TARGET_PAGE_MASK;
2112 }
2113 }
2114 #endif // CONFIG_MEMCHECK
2115
2116 return ret;
2117 }
2118
2119 #else
2120
tlb_flush(CPUState * env,int flush_global)2121 void tlb_flush(CPUState *env, int flush_global)
2122 {
2123 }
2124
tlb_flush_page(CPUState * env,target_ulong addr)2125 void tlb_flush_page(CPUState *env, target_ulong addr)
2126 {
2127 }
2128
tlb_set_page_exec(CPUState * env,target_ulong vaddr,target_phys_addr_t paddr,int prot,int mmu_idx,int is_softmmu)2129 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2130 target_phys_addr_t paddr, int prot,
2131 int mmu_idx, int is_softmmu)
2132 {
2133 return 0;
2134 }
2135
2136 /*
2137 * Walks guest process memory "regions" one by one
2138 * and calls callback function 'fn' for each region.
2139 */
walk_memory_regions(void * priv,int (* fn)(void *,unsigned long,unsigned long,unsigned long))2140 int walk_memory_regions(void *priv,
2141 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2142 {
2143 unsigned long start, end;
2144 PageDesc *p = NULL;
2145 int i, j, prot, prot1;
2146 int rc = 0;
2147
2148 start = end = -1;
2149 prot = 0;
2150
2151 for (i = 0; i <= L1_SIZE; i++) {
2152 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2153 for (j = 0; j < L2_SIZE; j++) {
2154 prot1 = (p == NULL) ? 0 : p[j].flags;
2155 /*
2156 * "region" is one continuous chunk of memory
2157 * that has same protection flags set.
2158 */
2159 if (prot1 != prot) {
2160 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2161 if (start != -1) {
2162 rc = (*fn)(priv, start, end, prot);
2163 /* callback can stop iteration by returning != 0 */
2164 if (rc != 0)
2165 return (rc);
2166 }
2167 if (prot1 != 0)
2168 start = end;
2169 else
2170 start = -1;
2171 prot = prot1;
2172 }
2173 if (p == NULL)
2174 break;
2175 }
2176 }
2177 return (rc);
2178 }
2179
dump_region(void * priv,unsigned long start,unsigned long end,unsigned long prot)2180 static int dump_region(void *priv, unsigned long start,
2181 unsigned long end, unsigned long prot)
2182 {
2183 FILE *f = (FILE *)priv;
2184
2185 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2186 start, end, end - start,
2187 ((prot & PAGE_READ) ? 'r' : '-'),
2188 ((prot & PAGE_WRITE) ? 'w' : '-'),
2189 ((prot & PAGE_EXEC) ? 'x' : '-'));
2190
2191 return (0);
2192 }
2193
2194 /* dump memory mappings */
page_dump(FILE * f)2195 void page_dump(FILE *f)
2196 {
2197 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2198 "start", "end", "size", "prot");
2199 walk_memory_regions(f, dump_region);
2200 }
2201
page_get_flags(target_ulong address)2202 int page_get_flags(target_ulong address)
2203 {
2204 PageDesc *p;
2205
2206 p = page_find(address >> TARGET_PAGE_BITS);
2207 if (!p)
2208 return 0;
2209 return p->flags;
2210 }
2211
2212 /* Modify the flags of a page and invalidate the code if necessary.
2213 The flag PAGE_WRITE_ORG is positioned automatically depending
2214 on PAGE_WRITE. The mmap_lock should already be held. */
page_set_flags(target_ulong start,target_ulong end,int flags)2215 void page_set_flags(target_ulong start, target_ulong end, int flags)
2216 {
2217 PageDesc *p;
2218 target_ulong addr;
2219
2220 /* mmap_lock should already be held. */
2221 start = start & TARGET_PAGE_MASK;
2222 end = TARGET_PAGE_ALIGN(end);
2223 if (flags & PAGE_WRITE)
2224 flags |= PAGE_WRITE_ORG;
2225 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2226 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2227 /* We may be called for host regions that are outside guest
2228 address space. */
2229 if (!p)
2230 return;
2231 /* if the write protection is set, then we invalidate the code
2232 inside */
2233 if (!(p->flags & PAGE_WRITE) &&
2234 (flags & PAGE_WRITE) &&
2235 p->first_tb) {
2236 tb_invalidate_phys_page(addr, 0, NULL);
2237 }
2238 p->flags = flags;
2239 }
2240 }
2241
page_check_range(target_ulong start,target_ulong len,int flags)2242 int page_check_range(target_ulong start, target_ulong len, int flags)
2243 {
2244 PageDesc *p;
2245 target_ulong end;
2246 target_ulong addr;
2247
2248 if (start + len < start)
2249 /* we've wrapped around */
2250 return -1;
2251
2252 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2253 start = start & TARGET_PAGE_MASK;
2254
2255 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2256 p = page_find(addr >> TARGET_PAGE_BITS);
2257 if( !p )
2258 return -1;
2259 if( !(p->flags & PAGE_VALID) )
2260 return -1;
2261
2262 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2263 return -1;
2264 if (flags & PAGE_WRITE) {
2265 if (!(p->flags & PAGE_WRITE_ORG))
2266 return -1;
2267 /* unprotect the page if it was put read-only because it
2268 contains translated code */
2269 if (!(p->flags & PAGE_WRITE)) {
2270 if (!page_unprotect(addr, 0, NULL))
2271 return -1;
2272 }
2273 return 0;
2274 }
2275 }
2276 return 0;
2277 }
2278
2279 /* called from signal handler: invalidate the code and unprotect the
2280 page. Return TRUE if the fault was successfully handled. */
page_unprotect(target_ulong address,unsigned long pc,void * puc)2281 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2282 {
2283 unsigned int page_index, prot, pindex;
2284 PageDesc *p, *p1;
2285 target_ulong host_start, host_end, addr;
2286
2287 /* Technically this isn't safe inside a signal handler. However we
2288 know this only ever happens in a synchronous SEGV handler, so in
2289 practice it seems to be ok. */
2290 mmap_lock();
2291
2292 host_start = address & qemu_host_page_mask;
2293 page_index = host_start >> TARGET_PAGE_BITS;
2294 p1 = page_find(page_index);
2295 if (!p1) {
2296 mmap_unlock();
2297 return 0;
2298 }
2299 host_end = host_start + qemu_host_page_size;
2300 p = p1;
2301 prot = 0;
2302 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2303 prot |= p->flags;
2304 p++;
2305 }
2306 /* if the page was really writable, then we change its
2307 protection back to writable */
2308 if (prot & PAGE_WRITE_ORG) {
2309 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2310 if (!(p1[pindex].flags & PAGE_WRITE)) {
2311 mprotect((void *)g2h(host_start), qemu_host_page_size,
2312 (prot & PAGE_BITS) | PAGE_WRITE);
2313 p1[pindex].flags |= PAGE_WRITE;
2314 /* and since the content will be modified, we must invalidate
2315 the corresponding translated code. */
2316 tb_invalidate_phys_page(address, pc, puc);
2317 #ifdef DEBUG_TB_CHECK
2318 tb_invalidate_check(address);
2319 #endif
2320 mmap_unlock();
2321 return 1;
2322 }
2323 }
2324 mmap_unlock();
2325 return 0;
2326 }
2327
tlb_set_dirty(CPUState * env,unsigned long addr,target_ulong vaddr)2328 static inline void tlb_set_dirty(CPUState *env,
2329 unsigned long addr, target_ulong vaddr)
2330 {
2331 }
2332 #endif /* defined(CONFIG_USER_ONLY) */
2333
2334 #if !defined(CONFIG_USER_ONLY)
2335
2336 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2337 ram_addr_t memory, ram_addr_t region_offset);
2338 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2339 ram_addr_t orig_memory, ram_addr_t region_offset);
2340 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2341 need_subpage) \
2342 do { \
2343 if (addr > start_addr) \
2344 start_addr2 = 0; \
2345 else { \
2346 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2347 if (start_addr2 > 0) \
2348 need_subpage = 1; \
2349 } \
2350 \
2351 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2352 end_addr2 = TARGET_PAGE_SIZE - 1; \
2353 else { \
2354 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2355 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2356 need_subpage = 1; \
2357 } \
2358 } while (0)
2359
2360 /* register physical memory.
2361 For RAM, 'size' must be a multiple of the target page size.
2362 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2363 io memory page. The address used when calling the IO function is
2364 the offset from the start of the region, plus region_offset. Both
2365 start_addr and region_offset are rounded down to a page boundary
2366 before calculating this offset. This should not be a problem unless
2367 the low bits of start_addr and region_offset differ. */
cpu_register_physical_memory_log(target_phys_addr_t start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset,bool log_dirty)2368 void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
2369 ram_addr_t size,
2370 ram_addr_t phys_offset,
2371 ram_addr_t region_offset,
2372 bool log_dirty)
2373 {
2374 target_phys_addr_t addr, end_addr;
2375 PhysPageDesc *p;
2376 CPUState *env;
2377 ram_addr_t orig_size = size;
2378 subpage_t *subpage;
2379
2380 if (kvm_enabled())
2381 kvm_set_phys_mem(start_addr, size, phys_offset);
2382
2383 if (phys_offset == IO_MEM_UNASSIGNED) {
2384 region_offset = start_addr;
2385 }
2386 region_offset &= TARGET_PAGE_MASK;
2387 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2388 end_addr = start_addr + (target_phys_addr_t)size;
2389
2390 addr = start_addr;
2391 do {
2392 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2393 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2394 ram_addr_t orig_memory = p->phys_offset;
2395 target_phys_addr_t start_addr2, end_addr2;
2396 int need_subpage = 0;
2397
2398 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2399 need_subpage);
2400 if (need_subpage) {
2401 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2402 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2403 &p->phys_offset, orig_memory,
2404 p->region_offset);
2405 } else {
2406 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2407 >> IO_MEM_SHIFT];
2408 }
2409 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2410 region_offset);
2411 p->region_offset = 0;
2412 } else {
2413 p->phys_offset = phys_offset;
2414 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2415 (phys_offset & IO_MEM_ROMD))
2416 phys_offset += TARGET_PAGE_SIZE;
2417 }
2418 } else {
2419 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2420 p->phys_offset = phys_offset;
2421 p->region_offset = region_offset;
2422 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2423 (phys_offset & IO_MEM_ROMD)) {
2424 phys_offset += TARGET_PAGE_SIZE;
2425 } else {
2426 target_phys_addr_t start_addr2, end_addr2;
2427 int need_subpage = 0;
2428
2429 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2430 end_addr2, need_subpage);
2431
2432 if (need_subpage) {
2433 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2434 &p->phys_offset, IO_MEM_UNASSIGNED,
2435 addr & TARGET_PAGE_MASK);
2436 subpage_register(subpage, start_addr2, end_addr2,
2437 phys_offset, region_offset);
2438 p->region_offset = 0;
2439 }
2440 }
2441 }
2442 region_offset += TARGET_PAGE_SIZE;
2443 addr += TARGET_PAGE_SIZE;
2444 } while (addr != end_addr);
2445
2446 /* since each CPU stores ram addresses in its TLB cache, we must
2447 reset the modified entries */
2448 /* XXX: slow ! */
2449 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2450 tlb_flush(env, 1);
2451 }
2452 }
2453
2454 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(target_phys_addr_t addr)2455 ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2456 {
2457 PhysPageDesc *p;
2458
2459 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2460 if (!p)
2461 return IO_MEM_UNASSIGNED;
2462 return p->phys_offset;
2463 }
2464
qemu_register_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2465 void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2466 {
2467 if (kvm_enabled())
2468 kvm_coalesce_mmio_region(addr, size);
2469 }
2470
qemu_unregister_coalesced_mmio(target_phys_addr_t addr,ram_addr_t size)2471 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2472 {
2473 if (kvm_enabled())
2474 kvm_uncoalesce_mmio_region(addr, size);
2475 }
2476
find_ram_offset(ram_addr_t size)2477 static ram_addr_t find_ram_offset(ram_addr_t size)
2478 {
2479 RAMBlock *block, *next_block;
2480 ram_addr_t offset = 0, mingap = ULONG_MAX;
2481
2482 if (QLIST_EMPTY(&ram_list.blocks))
2483 return 0;
2484
2485 QLIST_FOREACH(block, &ram_list.blocks, next) {
2486 ram_addr_t end, next = ULONG_MAX;
2487
2488 end = block->offset + block->length;
2489
2490 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2491 if (next_block->offset >= end) {
2492 next = MIN(next, next_block->offset);
2493 }
2494 }
2495 if (next - end >= size && next - end < mingap) {
2496 offset = end;
2497 mingap = next - end;
2498 }
2499 }
2500 return offset;
2501 }
2502
last_ram_offset(void)2503 static ram_addr_t last_ram_offset(void)
2504 {
2505 RAMBlock *block;
2506 ram_addr_t last = 0;
2507
2508 QLIST_FOREACH(block, &ram_list.blocks, next)
2509 last = MAX(last, block->offset + block->length);
2510
2511 return last;
2512 }
2513
qemu_ram_alloc_from_ptr(DeviceState * dev,const char * name,ram_addr_t size,void * host)2514 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2515 ram_addr_t size, void *host)
2516 {
2517 RAMBlock *new_block, *block;
2518
2519 size = TARGET_PAGE_ALIGN(size);
2520 new_block = qemu_mallocz(sizeof(*new_block));
2521
2522 #if 0
2523 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2524 char *id = dev->parent_bus->info->get_dev_path(dev);
2525 if (id) {
2526 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2527 qemu_free(id);
2528 }
2529 }
2530 #endif
2531 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2532
2533 QLIST_FOREACH(block, &ram_list.blocks, next) {
2534 if (!strcmp(block->idstr, new_block->idstr)) {
2535 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2536 new_block->idstr);
2537 abort();
2538 }
2539 }
2540
2541 if (host) {
2542 new_block->host = host;
2543 new_block->flags |= RAM_PREALLOC_MASK;
2544 } else {
2545 if (mem_path) {
2546 #if 0 && defined (__linux__) && !defined(TARGET_S390X)
2547 new_block->host = file_ram_alloc(new_block, size, mem_path);
2548 if (!new_block->host) {
2549 new_block->host = qemu_vmalloc(size);
2550 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2551 }
2552 #else
2553 fprintf(stderr, "-mem-path option unsupported\n");
2554 exit(1);
2555 #endif
2556 } else {
2557 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2558 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2559 new_block->host = mmap((void*)0x1000000, size,
2560 PROT_EXEC|PROT_READ|PROT_WRITE,
2561 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2562 #else
2563 new_block->host = qemu_vmalloc(size);
2564 #endif
2565 #ifdef MADV_MERGEABLE
2566 madvise(new_block->host, size, MADV_MERGEABLE);
2567 #endif
2568 }
2569 }
2570
2571 new_block->offset = find_ram_offset(size);
2572 new_block->length = size;
2573
2574 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2575
2576 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2577 last_ram_offset() >> TARGET_PAGE_BITS);
2578 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2579 0xff, size >> TARGET_PAGE_BITS);
2580
2581 if (kvm_enabled())
2582 kvm_setup_guest_memory(new_block->host, size);
2583
2584 return new_block->offset;
2585 }
2586
qemu_ram_alloc(DeviceState * dev,const char * name,ram_addr_t size)2587 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2588 {
2589 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2590 }
2591
qemu_ram_free(ram_addr_t addr)2592 void qemu_ram_free(ram_addr_t addr)
2593 {
2594 RAMBlock *block;
2595
2596 QLIST_FOREACH(block, &ram_list.blocks, next) {
2597 if (addr == block->offset) {
2598 QLIST_REMOVE(block, next);
2599 if (block->flags & RAM_PREALLOC_MASK) {
2600 ;
2601 } else if (mem_path) {
2602 #if defined (__linux__) && !defined(TARGET_S390X)
2603 if (block->fd) {
2604 munmap(block->host, block->length);
2605 close(block->fd);
2606 } else {
2607 qemu_vfree(block->host);
2608 }
2609 #else
2610 abort();
2611 #endif
2612 } else {
2613 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2614 munmap(block->host, block->length);
2615 #else
2616 qemu_vfree(block->host);
2617 #endif
2618 }
2619 qemu_free(block);
2620 return;
2621 }
2622 }
2623
2624 }
2625
2626 #ifndef _WIN32
qemu_ram_remap(ram_addr_t addr,ram_addr_t length)2627 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2628 {
2629 #ifndef CONFIG_ANDROID
2630 RAMBlock *block;
2631 ram_addr_t offset;
2632 int flags;
2633 void *area, *vaddr;
2634
2635 QLIST_FOREACH(block, &ram_list.blocks, next) {
2636 offset = addr - block->offset;
2637 if (offset < block->length) {
2638 vaddr = block->host + offset;
2639 if (block->flags & RAM_PREALLOC_MASK) {
2640 ;
2641 } else {
2642 flags = MAP_FIXED;
2643 munmap(vaddr, length);
2644 if (mem_path) {
2645 #if defined(__linux__) && !defined(TARGET_S390X)
2646 if (block->fd) {
2647 #ifdef MAP_POPULATE
2648 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2649 MAP_PRIVATE;
2650 #else
2651 flags |= MAP_PRIVATE;
2652 #endif
2653 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2654 flags, block->fd, offset);
2655 } else {
2656 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2657 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2658 flags, -1, 0);
2659 }
2660 #else
2661 abort();
2662 #endif
2663 } else {
2664 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2665 flags |= MAP_SHARED | MAP_ANONYMOUS;
2666 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2667 flags, -1, 0);
2668 #else
2669 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2670 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2671 flags, -1, 0);
2672 #endif
2673 }
2674 if (area != vaddr) {
2675 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
2676 length, addr);
2677 exit(1);
2678 }
2679 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2680 }
2681 return;
2682 }
2683 }
2684 #endif /* !CONFIG_ANDROID */
2685 }
2686 #endif /* !_WIN32 */
2687
2688 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2689 With the exception of the softmmu code in this file, this should
2690 only be used for local memory (e.g. video ram) that the device owns,
2691 and knows it isn't going to access beyond the end of the block.
2692
2693 It should not be used for general purpose DMA.
2694 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2695 */
qemu_get_ram_ptr(ram_addr_t addr)2696 void *qemu_get_ram_ptr(ram_addr_t addr)
2697 {
2698 RAMBlock *block;
2699
2700 QLIST_FOREACH(block, &ram_list.blocks, next) {
2701 if (addr - block->offset < block->length) {
2702 /* Move this entry to to start of the list. */
2703 if (block != QLIST_FIRST(&ram_list.blocks)) {
2704 QLIST_REMOVE(block, next);
2705 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2706 }
2707 return block->host + (addr - block->offset);
2708 }
2709 }
2710
2711 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2712 abort();
2713
2714 return NULL;
2715 }
2716
2717 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2718 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2719 */
qemu_safe_ram_ptr(ram_addr_t addr)2720 void *qemu_safe_ram_ptr(ram_addr_t addr)
2721 {
2722 RAMBlock *block;
2723
2724 QLIST_FOREACH(block, &ram_list.blocks, next) {
2725 if (addr - block->offset < block->length) {
2726 return block->host + (addr - block->offset);
2727 }
2728 }
2729
2730 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2731 abort();
2732
2733 return NULL;
2734 }
2735
qemu_ram_addr_from_host(void * ptr,ram_addr_t * ram_addr)2736 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2737 {
2738 RAMBlock *block;
2739 uint8_t *host = ptr;
2740
2741 QLIST_FOREACH(block, &ram_list.blocks, next) {
2742 if (host - block->host < block->length) {
2743 *ram_addr = block->offset + (host - block->host);
2744 return 0;
2745 }
2746 }
2747 return -1;
2748 }
2749
2750 /* Some of the softmmu routines need to translate from a host pointer
2751 (typically a TLB entry) back to a ram offset. */
qemu_ram_addr_from_host_nofail(void * ptr)2752 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2753 {
2754 ram_addr_t ram_addr;
2755
2756 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2757 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2758 abort();
2759 }
2760 return ram_addr;
2761 }
2762
unassigned_mem_readb(void * opaque,target_phys_addr_t addr)2763 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2764 {
2765 #ifdef DEBUG_UNASSIGNED
2766 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2767 #endif
2768 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2769 do_unassigned_access(addr, 0, 0, 0, 1);
2770 #endif
2771 return 0;
2772 }
2773
unassigned_mem_readw(void * opaque,target_phys_addr_t addr)2774 static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2775 {
2776 #ifdef DEBUG_UNASSIGNED
2777 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2778 #endif
2779 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2780 do_unassigned_access(addr, 0, 0, 0, 2);
2781 #endif
2782 return 0;
2783 }
2784
unassigned_mem_readl(void * opaque,target_phys_addr_t addr)2785 static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2786 {
2787 #ifdef DEBUG_UNASSIGNED
2788 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2789 #endif
2790 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2791 do_unassigned_access(addr, 0, 0, 0, 4);
2792 #endif
2793 return 0;
2794 }
2795
unassigned_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2796 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2797 {
2798 #ifdef DEBUG_UNASSIGNED
2799 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2800 #endif
2801 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2802 do_unassigned_access(addr, 1, 0, 0, 1);
2803 #endif
2804 }
2805
unassigned_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2806 static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2807 {
2808 #ifdef DEBUG_UNASSIGNED
2809 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2810 #endif
2811 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2812 do_unassigned_access(addr, 1, 0, 0, 2);
2813 #endif
2814 }
2815
unassigned_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2816 static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2817 {
2818 #ifdef DEBUG_UNASSIGNED
2819 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2820 #endif
2821 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2822 do_unassigned_access(addr, 1, 0, 0, 4);
2823 #endif
2824 }
2825
2826 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2827 unassigned_mem_readb,
2828 unassigned_mem_readw,
2829 unassigned_mem_readl,
2830 };
2831
2832 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2833 unassigned_mem_writeb,
2834 unassigned_mem_writew,
2835 unassigned_mem_writel,
2836 };
2837
notdirty_mem_writeb(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2838 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2839 uint32_t val)
2840 {
2841 int dirty_flags;
2842 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2843 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2844 #if !defined(CONFIG_USER_ONLY)
2845 tb_invalidate_phys_page_fast(ram_addr, 1);
2846 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2847 #endif
2848 }
2849 stb_p(qemu_get_ram_ptr(ram_addr), val);
2850 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2851 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2852 /* we remove the notdirty callback only if the code has been
2853 flushed */
2854 if (dirty_flags == 0xff)
2855 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2856 }
2857
notdirty_mem_writew(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2858 static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2859 uint32_t val)
2860 {
2861 int dirty_flags;
2862 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2863 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2864 #if !defined(CONFIG_USER_ONLY)
2865 tb_invalidate_phys_page_fast(ram_addr, 2);
2866 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2867 #endif
2868 }
2869 stw_p(qemu_get_ram_ptr(ram_addr), val);
2870 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2871 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2872 /* we remove the notdirty callback only if the code has been
2873 flushed */
2874 if (dirty_flags == 0xff)
2875 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2876 }
2877
notdirty_mem_writel(void * opaque,target_phys_addr_t ram_addr,uint32_t val)2878 static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2879 uint32_t val)
2880 {
2881 int dirty_flags;
2882 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2883 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2884 #if !defined(CONFIG_USER_ONLY)
2885 tb_invalidate_phys_page_fast(ram_addr, 4);
2886 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
2887 #endif
2888 }
2889 stl_p(qemu_get_ram_ptr(ram_addr), val);
2890 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2891 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
2892 /* we remove the notdirty callback only if the code has been
2893 flushed */
2894 if (dirty_flags == 0xff)
2895 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2896 }
2897
2898 static CPUReadMemoryFunc * const error_mem_read[3] = {
2899 NULL, /* never used */
2900 NULL, /* never used */
2901 NULL, /* never used */
2902 };
2903
2904 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2905 notdirty_mem_writeb,
2906 notdirty_mem_writew,
2907 notdirty_mem_writel,
2908 };
2909
2910 /* Generate a debug exception if a watchpoint has been hit. */
check_watchpoint(int offset,int len_mask,int flags)2911 static void check_watchpoint(int offset, int len_mask, int flags)
2912 {
2913 CPUState *env = cpu_single_env;
2914 target_ulong pc, cs_base;
2915 TranslationBlock *tb;
2916 target_ulong vaddr;
2917 CPUWatchpoint *wp;
2918 int cpu_flags;
2919
2920 if (env->watchpoint_hit) {
2921 /* We re-entered the check after replacing the TB. Now raise
2922 * the debug interrupt so that is will trigger after the
2923 * current instruction. */
2924 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2925 return;
2926 }
2927 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2928 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2929 if ((vaddr == (wp->vaddr & len_mask) ||
2930 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2931 wp->flags |= BP_WATCHPOINT_HIT;
2932 if (!env->watchpoint_hit) {
2933 env->watchpoint_hit = wp;
2934 tb = tb_find_pc(env->mem_io_pc);
2935 if (!tb) {
2936 cpu_abort(env, "check_watchpoint: could not find TB for "
2937 "pc=%p", (void *)env->mem_io_pc);
2938 }
2939 cpu_restore_state(tb, env, env->mem_io_pc);
2940 tb_phys_invalidate(tb, -1);
2941 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2942 env->exception_index = EXCP_DEBUG;
2943 } else {
2944 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2945 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2946 }
2947 cpu_resume_from_signal(env, NULL);
2948 }
2949 } else {
2950 wp->flags &= ~BP_WATCHPOINT_HIT;
2951 }
2952 }
2953 }
2954
2955 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2956 so these check for a hit then pass through to the normal out-of-line
2957 phys routines. */
watch_mem_readb(void * opaque,target_phys_addr_t addr)2958 static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2959 {
2960 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2961 return ldub_phys(addr);
2962 }
2963
watch_mem_readw(void * opaque,target_phys_addr_t addr)2964 static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2965 {
2966 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2967 return lduw_phys(addr);
2968 }
2969
watch_mem_readl(void * opaque,target_phys_addr_t addr)2970 static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2971 {
2972 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2973 return ldl_phys(addr);
2974 }
2975
watch_mem_writeb(void * opaque,target_phys_addr_t addr,uint32_t val)2976 static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2977 uint32_t val)
2978 {
2979 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2980 stb_phys(addr, val);
2981 }
2982
watch_mem_writew(void * opaque,target_phys_addr_t addr,uint32_t val)2983 static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2984 uint32_t val)
2985 {
2986 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2987 stw_phys(addr, val);
2988 }
2989
watch_mem_writel(void * opaque,target_phys_addr_t addr,uint32_t val)2990 static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2991 uint32_t val)
2992 {
2993 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2994 stl_phys(addr, val);
2995 }
2996
2997 static CPUReadMemoryFunc * const watch_mem_read[3] = {
2998 watch_mem_readb,
2999 watch_mem_readw,
3000 watch_mem_readl,
3001 };
3002
3003 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3004 watch_mem_writeb,
3005 watch_mem_writew,
3006 watch_mem_writel,
3007 };
3008
subpage_readlen(subpage_t * mmio,target_phys_addr_t addr,unsigned int len)3009 static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3010 unsigned int len)
3011 {
3012 uint32_t ret;
3013 unsigned int idx;
3014
3015 idx = SUBPAGE_IDX(addr);
3016 #if defined(DEBUG_SUBPAGE)
3017 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3018 mmio, len, addr, idx);
3019 #endif
3020 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3021 addr + mmio->region_offset[idx][0][len]);
3022
3023 return ret;
3024 }
3025
subpage_writelen(subpage_t * mmio,target_phys_addr_t addr,uint32_t value,unsigned int len)3026 static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3027 uint32_t value, unsigned int len)
3028 {
3029 unsigned int idx;
3030
3031 idx = SUBPAGE_IDX(addr);
3032 #if defined(DEBUG_SUBPAGE)
3033 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3034 mmio, len, addr, idx, value);
3035 #endif
3036 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3037 addr + mmio->region_offset[idx][1][len],
3038 value);
3039 }
3040
subpage_readb(void * opaque,target_phys_addr_t addr)3041 static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3042 {
3043 #if defined(DEBUG_SUBPAGE)
3044 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3045 #endif
3046
3047 return subpage_readlen(opaque, addr, 0);
3048 }
3049
subpage_writeb(void * opaque,target_phys_addr_t addr,uint32_t value)3050 static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3051 uint32_t value)
3052 {
3053 #if defined(DEBUG_SUBPAGE)
3054 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3055 #endif
3056 subpage_writelen(opaque, addr, value, 0);
3057 }
3058
subpage_readw(void * opaque,target_phys_addr_t addr)3059 static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3060 {
3061 #if defined(DEBUG_SUBPAGE)
3062 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3063 #endif
3064
3065 return subpage_readlen(opaque, addr, 1);
3066 }
3067
subpage_writew(void * opaque,target_phys_addr_t addr,uint32_t value)3068 static void subpage_writew (void *opaque, target_phys_addr_t addr,
3069 uint32_t value)
3070 {
3071 #if defined(DEBUG_SUBPAGE)
3072 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3073 #endif
3074 subpage_writelen(opaque, addr, value, 1);
3075 }
3076
subpage_readl(void * opaque,target_phys_addr_t addr)3077 static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3078 {
3079 #if defined(DEBUG_SUBPAGE)
3080 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3081 #endif
3082
3083 return subpage_readlen(opaque, addr, 2);
3084 }
3085
subpage_writel(void * opaque,target_phys_addr_t addr,uint32_t value)3086 static void subpage_writel (void *opaque,
3087 target_phys_addr_t addr, uint32_t value)
3088 {
3089 #if defined(DEBUG_SUBPAGE)
3090 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3091 #endif
3092 subpage_writelen(opaque, addr, value, 2);
3093 }
3094
3095 static CPUReadMemoryFunc * const subpage_read[] = {
3096 &subpage_readb,
3097 &subpage_readw,
3098 &subpage_readl,
3099 };
3100
3101 static CPUWriteMemoryFunc * const subpage_write[] = {
3102 &subpage_writeb,
3103 &subpage_writew,
3104 &subpage_writel,
3105 };
3106
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)3107 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3108 ram_addr_t memory, ram_addr_t region_offset)
3109 {
3110 int idx, eidx;
3111 unsigned int i;
3112
3113 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3114 return -1;
3115 idx = SUBPAGE_IDX(start);
3116 eidx = SUBPAGE_IDX(end);
3117 #if defined(DEBUG_SUBPAGE)
3118 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3119 mmio, start, end, idx, eidx, memory);
3120 #endif
3121 memory >>= IO_MEM_SHIFT;
3122 for (; idx <= eidx; idx++) {
3123 for (i = 0; i < 4; i++) {
3124 if (io_mem_read[memory][i]) {
3125 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3126 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3127 mmio->region_offset[idx][0][i] = region_offset;
3128 }
3129 if (io_mem_write[memory][i]) {
3130 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3131 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3132 mmio->region_offset[idx][1][i] = region_offset;
3133 }
3134 }
3135 }
3136
3137 return 0;
3138 }
3139
subpage_init(target_phys_addr_t base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)3140 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3141 ram_addr_t orig_memory, ram_addr_t region_offset)
3142 {
3143 subpage_t *mmio;
3144 int subpage_memory;
3145
3146 mmio = qemu_mallocz(sizeof(subpage_t));
3147
3148 mmio->base = base;
3149 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3150 #if defined(DEBUG_SUBPAGE)
3151 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3152 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3153 #endif
3154 *phys = subpage_memory | IO_MEM_SUBPAGE;
3155 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3156 region_offset);
3157
3158 return mmio;
3159 }
3160
get_free_io_mem_idx(void)3161 static int get_free_io_mem_idx(void)
3162 {
3163 int i;
3164
3165 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3166 if (!io_mem_used[i]) {
3167 io_mem_used[i] = 1;
3168 return i;
3169 }
3170 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3171 return -1;
3172 }
3173
3174 /* mem_read and mem_write are arrays of functions containing the
3175 function to access byte (index 0), word (index 1) and dword (index
3176 2). Functions can be omitted with a NULL function pointer.
3177 If io_index is non zero, the corresponding io zone is
3178 modified. If it is zero, a new io zone is allocated. The return
3179 value can be used with cpu_register_physical_memory(). (-1) is
3180 returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)3181 static int cpu_register_io_memory_fixed(int io_index,
3182 CPUReadMemoryFunc * const *mem_read,
3183 CPUWriteMemoryFunc * const *mem_write,
3184 void *opaque)
3185 {
3186 int i, subwidth = 0;
3187
3188 if (io_index <= 0) {
3189 io_index = get_free_io_mem_idx();
3190 if (io_index == -1)
3191 return io_index;
3192 } else {
3193 io_index >>= IO_MEM_SHIFT;
3194 if (io_index >= IO_MEM_NB_ENTRIES)
3195 return -1;
3196 }
3197
3198 for(i = 0;i < 3; i++) {
3199 if (!mem_read[i] || !mem_write[i])
3200 subwidth = IO_MEM_SUBWIDTH;
3201 io_mem_read[io_index][i] = mem_read[i];
3202 io_mem_write[io_index][i] = mem_write[i];
3203 }
3204 io_mem_opaque[io_index] = opaque;
3205 return (io_index << IO_MEM_SHIFT) | subwidth;
3206 }
3207
cpu_register_io_memory(CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)3208 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3209 CPUWriteMemoryFunc * const *mem_write,
3210 void *opaque)
3211 {
3212 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3213 }
3214
cpu_unregister_io_memory(int io_table_address)3215 void cpu_unregister_io_memory(int io_table_address)
3216 {
3217 int i;
3218 int io_index = io_table_address >> IO_MEM_SHIFT;
3219
3220 for (i=0;i < 3; i++) {
3221 io_mem_read[io_index][i] = unassigned_mem_read[i];
3222 io_mem_write[io_index][i] = unassigned_mem_write[i];
3223 }
3224 io_mem_opaque[io_index] = NULL;
3225 io_mem_used[io_index] = 0;
3226 }
3227
io_mem_init(void)3228 static void io_mem_init(void)
3229 {
3230 int i;
3231
3232 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3233 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3234 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3235 for (i=0; i<5; i++)
3236 io_mem_used[i] = 1;
3237
3238 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3239 watch_mem_write, NULL);
3240 }
3241
3242 #endif /* !defined(CONFIG_USER_ONLY) */
3243
3244 /* physical memory access (slow version, mainly for debug) */
3245 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3246 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3247 int len, int is_write)
3248 {
3249 int l, flags;
3250 target_ulong page;
3251 void * p;
3252
3253 while (len > 0) {
3254 page = addr & TARGET_PAGE_MASK;
3255 l = (page + TARGET_PAGE_SIZE) - addr;
3256 if (l > len)
3257 l = len;
3258 flags = page_get_flags(page);
3259 if (!(flags & PAGE_VALID))
3260 return;
3261 if (is_write) {
3262 if (!(flags & PAGE_WRITE))
3263 return;
3264 /* XXX: this code should not depend on lock_user */
3265 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3266 /* FIXME - should this return an error rather than just fail? */
3267 return;
3268 memcpy(p, buf, l);
3269 unlock_user(p, addr, l);
3270 } else {
3271 if (!(flags & PAGE_READ))
3272 return;
3273 /* XXX: this code should not depend on lock_user */
3274 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3275 /* FIXME - should this return an error rather than just fail? */
3276 return;
3277 memcpy(buf, p, l);
3278 unlock_user(p, addr, 0);
3279 }
3280 len -= l;
3281 buf += l;
3282 addr += l;
3283 }
3284 }
3285
3286 #else
cpu_physical_memory_rw(target_phys_addr_t addr,uint8_t * buf,int len,int is_write)3287 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3288 int len, int is_write)
3289 {
3290 int l, io_index;
3291 uint8_t *ptr;
3292 uint32_t val;
3293 target_phys_addr_t page;
3294 unsigned long pd;
3295 PhysPageDesc *p;
3296
3297 while (len > 0) {
3298 page = addr & TARGET_PAGE_MASK;
3299 l = (page + TARGET_PAGE_SIZE) - addr;
3300 if (l > len)
3301 l = len;
3302 p = phys_page_find(page >> TARGET_PAGE_BITS);
3303 if (!p) {
3304 pd = IO_MEM_UNASSIGNED;
3305 } else {
3306 pd = p->phys_offset;
3307 }
3308
3309 if (is_write) {
3310 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3311 target_phys_addr_t addr1 = addr;
3312 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3313 if (p)
3314 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3315 /* XXX: could force cpu_single_env to NULL to avoid
3316 potential bugs */
3317 if (l >= 4 && ((addr1 & 3) == 0)) {
3318 /* 32 bit write access */
3319 val = ldl_p(buf);
3320 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3321 l = 4;
3322 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3323 /* 16 bit write access */
3324 val = lduw_p(buf);
3325 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3326 l = 2;
3327 } else {
3328 /* 8 bit write access */
3329 val = ldub_p(buf);
3330 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3331 l = 1;
3332 }
3333 } else {
3334 unsigned long addr1;
3335 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3336 /* RAM case */
3337 ptr = qemu_get_ram_ptr(addr1);
3338 memcpy(ptr, buf, l);
3339 if (!cpu_physical_memory_is_dirty(addr1)) {
3340 /* invalidate code */
3341 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3342 /* set dirty bit */
3343 cpu_physical_memory_set_dirty_flags(
3344 addr1, (0xff & ~CODE_DIRTY_FLAG));
3345 }
3346 }
3347 } else {
3348 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3349 !(pd & IO_MEM_ROMD)) {
3350 target_phys_addr_t addr1 = addr;
3351 /* I/O case */
3352 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3353 if (p)
3354 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3355 if (l >= 4 && ((addr1 & 3) == 0)) {
3356 /* 32 bit read access */
3357 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3358 stl_p(buf, val);
3359 l = 4;
3360 } else if (l >= 2 && ((addr1 & 1) == 0)) {
3361 /* 16 bit read access */
3362 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3363 stw_p(buf, val);
3364 l = 2;
3365 } else {
3366 /* 8 bit read access */
3367 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3368 stb_p(buf, val);
3369 l = 1;
3370 }
3371 } else {
3372 /* RAM case */
3373 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3374 (addr & ~TARGET_PAGE_MASK);
3375 memcpy(buf, ptr, l);
3376 }
3377 }
3378 len -= l;
3379 buf += l;
3380 addr += l;
3381 }
3382 }
3383
3384 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(target_phys_addr_t addr,const uint8_t * buf,int len)3385 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3386 const uint8_t *buf, int len)
3387 {
3388 int l;
3389 uint8_t *ptr;
3390 target_phys_addr_t page;
3391 unsigned long pd;
3392 PhysPageDesc *p;
3393
3394 while (len > 0) {
3395 page = addr & TARGET_PAGE_MASK;
3396 l = (page + TARGET_PAGE_SIZE) - addr;
3397 if (l > len)
3398 l = len;
3399 p = phys_page_find(page >> TARGET_PAGE_BITS);
3400 if (!p) {
3401 pd = IO_MEM_UNASSIGNED;
3402 } else {
3403 pd = p->phys_offset;
3404 }
3405
3406 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3407 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3408 !(pd & IO_MEM_ROMD)) {
3409 /* do nothing */
3410 } else {
3411 unsigned long addr1;
3412 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3413 /* ROM/RAM case */
3414 ptr = qemu_get_ram_ptr(addr1);
3415 memcpy(ptr, buf, l);
3416 }
3417 len -= l;
3418 buf += l;
3419 addr += l;
3420 }
3421 }
3422
3423 typedef struct {
3424 void *buffer;
3425 target_phys_addr_t addr;
3426 target_phys_addr_t len;
3427 } BounceBuffer;
3428
3429 static BounceBuffer bounce;
3430
3431 typedef struct MapClient {
3432 void *opaque;
3433 void (*callback)(void *opaque);
3434 QLIST_ENTRY(MapClient) link;
3435 } MapClient;
3436
3437 static QLIST_HEAD(map_client_list, MapClient) map_client_list
3438 = QLIST_HEAD_INITIALIZER(map_client_list);
3439
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))3440 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3441 {
3442 MapClient *client = qemu_malloc(sizeof(*client));
3443
3444 client->opaque = opaque;
3445 client->callback = callback;
3446 QLIST_INSERT_HEAD(&map_client_list, client, link);
3447 return client;
3448 }
3449
cpu_unregister_map_client(void * _client)3450 void cpu_unregister_map_client(void *_client)
3451 {
3452 MapClient *client = (MapClient *)_client;
3453
3454 QLIST_REMOVE(client, link);
3455 qemu_free(client);
3456 }
3457
cpu_notify_map_clients(void)3458 static void cpu_notify_map_clients(void)
3459 {
3460 MapClient *client;
3461
3462 while (!QLIST_EMPTY(&map_client_list)) {
3463 client = QLIST_FIRST(&map_client_list);
3464 client->callback(client->opaque);
3465 QLIST_REMOVE(client, link);
3466 }
3467 }
3468
3469 /* Map a physical memory region into a host virtual address.
3470 * May map a subset of the requested range, given by and returned in *plen.
3471 * May return NULL if resources needed to perform the mapping are exhausted.
3472 * Use only for reads OR writes - not for read-modify-write operations.
3473 * Use cpu_register_map_client() to know when retrying the map operation is
3474 * likely to succeed.
3475 */
cpu_physical_memory_map(target_phys_addr_t addr,target_phys_addr_t * plen,int is_write)3476 void *cpu_physical_memory_map(target_phys_addr_t addr,
3477 target_phys_addr_t *plen,
3478 int is_write)
3479 {
3480 target_phys_addr_t len = *plen;
3481 target_phys_addr_t done = 0;
3482 int l;
3483 uint8_t *ret = NULL;
3484 uint8_t *ptr;
3485 target_phys_addr_t page;
3486 unsigned long pd;
3487 PhysPageDesc *p;
3488 unsigned long addr1;
3489
3490 while (len > 0) {
3491 page = addr & TARGET_PAGE_MASK;
3492 l = (page + TARGET_PAGE_SIZE) - addr;
3493 if (l > len)
3494 l = len;
3495 p = phys_page_find(page >> TARGET_PAGE_BITS);
3496 if (!p) {
3497 pd = IO_MEM_UNASSIGNED;
3498 } else {
3499 pd = p->phys_offset;
3500 }
3501
3502 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3503 if (done || bounce.buffer) {
3504 break;
3505 }
3506 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3507 bounce.addr = addr;
3508 bounce.len = l;
3509 if (!is_write) {
3510 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3511 }
3512 ptr = bounce.buffer;
3513 } else {
3514 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3515 ptr = qemu_get_ram_ptr(addr1);
3516 }
3517 if (!done) {
3518 ret = ptr;
3519 } else if (ret + done != ptr) {
3520 break;
3521 }
3522
3523 len -= l;
3524 addr += l;
3525 done += l;
3526 }
3527 *plen = done;
3528 return ret;
3529 }
3530
3531 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3532 * Will also mark the memory as dirty if is_write == 1. access_len gives
3533 * the amount of memory that was actually read or written by the caller.
3534 */
cpu_physical_memory_unmap(void * buffer,target_phys_addr_t len,int is_write,target_phys_addr_t access_len)3535 void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3536 int is_write, target_phys_addr_t access_len)
3537 {
3538 if (buffer != bounce.buffer) {
3539 if (is_write) {
3540 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3541 while (access_len) {
3542 unsigned l;
3543 l = TARGET_PAGE_SIZE;
3544 if (l > access_len)
3545 l = access_len;
3546 if (!cpu_physical_memory_is_dirty(addr1)) {
3547 /* invalidate code */
3548 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3549 /* set dirty bit */
3550 cpu_physical_memory_set_dirty_flags(
3551 addr1, (0xff & ~CODE_DIRTY_FLAG));
3552 }
3553 addr1 += l;
3554 access_len -= l;
3555 }
3556 }
3557 return;
3558 }
3559 if (is_write) {
3560 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3561 }
3562 qemu_vfree(bounce.buffer);
3563 bounce.buffer = NULL;
3564 cpu_notify_map_clients();
3565 }
3566
3567 /* warning: addr must be aligned */
ldl_phys(target_phys_addr_t addr)3568 uint32_t ldl_phys(target_phys_addr_t addr)
3569 {
3570 int io_index;
3571 uint8_t *ptr;
3572 uint32_t val;
3573 unsigned long pd;
3574 PhysPageDesc *p;
3575
3576 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3577 if (!p) {
3578 pd = IO_MEM_UNASSIGNED;
3579 } else {
3580 pd = p->phys_offset;
3581 }
3582
3583 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3584 !(pd & IO_MEM_ROMD)) {
3585 /* I/O case */
3586 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3587 if (p)
3588 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3589 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3590 } else {
3591 /* RAM case */
3592 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3593 (addr & ~TARGET_PAGE_MASK);
3594 val = ldl_p(ptr);
3595 }
3596 return val;
3597 }
3598
3599 /* warning: addr must be aligned */
ldq_phys(target_phys_addr_t addr)3600 uint64_t ldq_phys(target_phys_addr_t addr)
3601 {
3602 int io_index;
3603 uint8_t *ptr;
3604 uint64_t val;
3605 unsigned long pd;
3606 PhysPageDesc *p;
3607
3608 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3609 if (!p) {
3610 pd = IO_MEM_UNASSIGNED;
3611 } else {
3612 pd = p->phys_offset;
3613 }
3614
3615 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3616 !(pd & IO_MEM_ROMD)) {
3617 /* I/O case */
3618 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3619 if (p)
3620 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3621 #ifdef TARGET_WORDS_BIGENDIAN
3622 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3623 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3624 #else
3625 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3626 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3627 #endif
3628 } else {
3629 /* RAM case */
3630 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3631 (addr & ~TARGET_PAGE_MASK);
3632 val = ldq_p(ptr);
3633 }
3634 return val;
3635 }
3636
3637 /* XXX: optimize */
ldub_phys(target_phys_addr_t addr)3638 uint32_t ldub_phys(target_phys_addr_t addr)
3639 {
3640 uint8_t val;
3641 cpu_physical_memory_read(addr, &val, 1);
3642 return val;
3643 }
3644
3645 /* XXX: optimize */
lduw_phys(target_phys_addr_t addr)3646 uint32_t lduw_phys(target_phys_addr_t addr)
3647 {
3648 uint16_t val;
3649 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3650 return tswap16(val);
3651 }
3652
3653 /* warning: addr must be aligned. The ram page is not masked as dirty
3654 and the code inside is not invalidated. It is useful if the dirty
3655 bits are used to track modified PTEs */
stl_phys_notdirty(target_phys_addr_t addr,uint32_t val)3656 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3657 {
3658 int io_index;
3659 uint8_t *ptr;
3660 unsigned long pd;
3661 PhysPageDesc *p;
3662
3663 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3664 if (!p) {
3665 pd = IO_MEM_UNASSIGNED;
3666 } else {
3667 pd = p->phys_offset;
3668 }
3669
3670 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3671 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3672 if (p)
3673 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3674 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3675 } else {
3676 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3677 ptr = qemu_get_ram_ptr(addr1);
3678 stl_p(ptr, val);
3679
3680 if (unlikely(in_migration)) {
3681 if (!cpu_physical_memory_is_dirty(addr1)) {
3682 /* invalidate code */
3683 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3684 /* set dirty bit */
3685 cpu_physical_memory_set_dirty_flags(
3686 addr1, (0xff & ~CODE_DIRTY_FLAG));
3687 }
3688 }
3689 }
3690 }
3691
stq_phys_notdirty(target_phys_addr_t addr,uint64_t val)3692 void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3693 {
3694 int io_index;
3695 uint8_t *ptr;
3696 unsigned long pd;
3697 PhysPageDesc *p;
3698
3699 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3700 if (!p) {
3701 pd = IO_MEM_UNASSIGNED;
3702 } else {
3703 pd = p->phys_offset;
3704 }
3705
3706 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3707 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3708 if (p)
3709 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3710 #ifdef TARGET_WORDS_BIGENDIAN
3711 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3712 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3713 #else
3714 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3715 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3716 #endif
3717 } else {
3718 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3719 (addr & ~TARGET_PAGE_MASK);
3720 stq_p(ptr, val);
3721 }
3722 }
3723
3724 /* warning: addr must be aligned */
stl_phys(target_phys_addr_t addr,uint32_t val)3725 void stl_phys(target_phys_addr_t addr, uint32_t val)
3726 {
3727 int io_index;
3728 uint8_t *ptr;
3729 unsigned long pd;
3730 PhysPageDesc *p;
3731
3732 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3733 if (!p) {
3734 pd = IO_MEM_UNASSIGNED;
3735 } else {
3736 pd = p->phys_offset;
3737 }
3738
3739 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3740 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3741 if (p)
3742 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3743 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3744 } else {
3745 unsigned long addr1;
3746 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3747 /* RAM case */
3748 ptr = qemu_get_ram_ptr(addr1);
3749 stl_p(ptr, val);
3750 if (!cpu_physical_memory_is_dirty(addr1)) {
3751 /* invalidate code */
3752 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3753 /* set dirty bit */
3754 cpu_physical_memory_set_dirty_flags(addr1,
3755 (0xff & ~CODE_DIRTY_FLAG));
3756 }
3757 }
3758 }
3759
3760 /* XXX: optimize */
stb_phys(target_phys_addr_t addr,uint32_t val)3761 void stb_phys(target_phys_addr_t addr, uint32_t val)
3762 {
3763 uint8_t v = val;
3764 cpu_physical_memory_write(addr, &v, 1);
3765 }
3766
3767 /* XXX: optimize */
stw_phys(target_phys_addr_t addr,uint32_t val)3768 void stw_phys(target_phys_addr_t addr, uint32_t val)
3769 {
3770 uint16_t v = tswap16(val);
3771 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3772 }
3773
3774 /* XXX: optimize */
stq_phys(target_phys_addr_t addr,uint64_t val)3775 void stq_phys(target_phys_addr_t addr, uint64_t val)
3776 {
3777 val = tswap64(val);
3778 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3779 }
3780
3781 #endif
3782
3783 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * env,target_ulong addr,uint8_t * buf,int len,int is_write)3784 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3785 uint8_t *buf, int len, int is_write)
3786 {
3787 int l;
3788 target_phys_addr_t phys_addr;
3789 target_ulong page;
3790
3791 while (len > 0) {
3792 page = addr & TARGET_PAGE_MASK;
3793 phys_addr = cpu_get_phys_page_debug(env, page);
3794 /* if no physical page mapped, return an error */
3795 if (phys_addr == -1)
3796 return -1;
3797 l = (page + TARGET_PAGE_SIZE) - addr;
3798 if (l > len)
3799 l = len;
3800 phys_addr += (addr & ~TARGET_PAGE_MASK);
3801 #if !defined(CONFIG_USER_ONLY)
3802 if (is_write)
3803 cpu_physical_memory_write_rom(phys_addr, buf, l);
3804 else
3805 #endif
3806 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3807 len -= l;
3808 buf += l;
3809 addr += l;
3810 }
3811 return 0;
3812 }
3813
3814 /* in deterministic execution mode, instructions doing device I/Os
3815 must be at the end of the TB */
cpu_io_recompile(CPUState * env,void * retaddr)3816 void cpu_io_recompile(CPUState *env, void *retaddr)
3817 {
3818 TranslationBlock *tb;
3819 uint32_t n, cflags;
3820 target_ulong pc, cs_base;
3821 uint64_t flags;
3822
3823 tb = tb_find_pc((unsigned long)retaddr);
3824 if (!tb) {
3825 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3826 retaddr);
3827 }
3828 n = env->icount_decr.u16.low + tb->icount;
3829 cpu_restore_state(tb, env, (unsigned long)retaddr);
3830 /* Calculate how many instructions had been executed before the fault
3831 occurred. */
3832 n = n - env->icount_decr.u16.low;
3833 /* Generate a new TB ending on the I/O insn. */
3834 n++;
3835 /* On MIPS and SH, delay slot instructions can only be restarted if
3836 they were already the first instruction in the TB. If this is not
3837 the first instruction in a TB then re-execute the preceding
3838 branch. */
3839 #if defined(TARGET_MIPS)
3840 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3841 env->active_tc.PC -= 4;
3842 env->icount_decr.u16.low++;
3843 env->hflags &= ~MIPS_HFLAG_BMASK;
3844 }
3845 #elif defined(TARGET_SH4)
3846 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3847 && n > 1) {
3848 env->pc -= 2;
3849 env->icount_decr.u16.low++;
3850 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3851 }
3852 #endif
3853 /* This should never happen. */
3854 if (n > CF_COUNT_MASK)
3855 cpu_abort(env, "TB too big during recompile");
3856
3857 cflags = n | CF_LAST_IO;
3858 pc = tb->pc;
3859 cs_base = tb->cs_base;
3860 flags = tb->flags;
3861 tb_phys_invalidate(tb, -1);
3862 /* FIXME: In theory this could raise an exception. In practice
3863 we have already translated the block once so it's probably ok. */
3864 tb_gen_code(env, pc, cs_base, flags, cflags);
3865 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3866 the first in the TB) then we end up generating a whole new TB and
3867 repeating the fault, which is horribly inefficient.
3868 Better would be to execute just this insn uncached, or generate a
3869 second new TB. */
3870 cpu_resume_from_signal(env, NULL);
3871 }
3872
3873 #if !defined(CONFIG_USER_ONLY)
3874
dump_exec_info(FILE * f,fprintf_function cpu_fprintf)3875 void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
3876 {
3877 int i, target_code_size, max_target_code_size;
3878 int direct_jmp_count, direct_jmp2_count, cross_page;
3879 TranslationBlock *tb;
3880
3881 target_code_size = 0;
3882 max_target_code_size = 0;
3883 cross_page = 0;
3884 direct_jmp_count = 0;
3885 direct_jmp2_count = 0;
3886 for(i = 0; i < nb_tbs; i++) {
3887 tb = &tbs[i];
3888 target_code_size += tb->size;
3889 if (tb->size > max_target_code_size)
3890 max_target_code_size = tb->size;
3891 if (tb->page_addr[1] != -1)
3892 cross_page++;
3893 if (tb->tb_next_offset[0] != 0xffff) {
3894 direct_jmp_count++;
3895 if (tb->tb_next_offset[1] != 0xffff) {
3896 direct_jmp2_count++;
3897 }
3898 }
3899 }
3900 /* XXX: avoid using doubles ? */
3901 cpu_fprintf(f, "Translation buffer state:\n");
3902 cpu_fprintf(f, "gen code size %td/%ld\n",
3903 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3904 cpu_fprintf(f, "TB count %d/%d\n",
3905 nb_tbs, code_gen_max_blocks);
3906 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
3907 nb_tbs ? target_code_size / nb_tbs : 0,
3908 max_target_code_size);
3909 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
3910 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3911 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3912 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3913 cross_page,
3914 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3915 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3916 direct_jmp_count,
3917 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3918 direct_jmp2_count,
3919 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3920 cpu_fprintf(f, "\nStatistics:\n");
3921 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3922 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3923 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
3924 tcg_dump_info(f, cpu_fprintf);
3925 }
3926
3927 #define MMUSUFFIX _cmmu
3928 #define GETPC() NULL
3929 #define env cpu_single_env
3930 #define SOFTMMU_CODE_ACCESS
3931
3932 #define SHIFT 0
3933 #include "softmmu_template.h"
3934
3935 #define SHIFT 1
3936 #include "softmmu_template.h"
3937
3938 #define SHIFT 2
3939 #include "softmmu_template.h"
3940
3941 #define SHIFT 3
3942 #include "softmmu_template.h"
3943
3944 #undef env
3945
3946 #endif
3947