• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  virtual page mapping and translated block handling
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19 #include "config.h"
20 #ifdef _WIN32
21 #define WIN32_LEAN_AND_MEAN
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34 
35 #include "cpu.h"
36 #include "exec/exec-all.h"
37 #include "qemu-common.h"
38 #include "tcg.h"
39 #include "hw/hw.h"
40 #include "hw/qdev.h"
41 #include "hw/xen/xen.h"
42 #include "qemu/osdep.h"
43 #include "qemu/tls.h"
44 #include "sysemu/kvm.h"
45 #include "exec/cputlb.h"
46 #include "exec/hax.h"
47 #include "qemu/timer.h"
48 #if defined(CONFIG_USER_ONLY)
49 #include <qemu.h>
50 #endif
51 
52 //#define DEBUG_SUBPAGE
53 
54 #if !defined(CONFIG_USER_ONLY)
55 int phys_ram_fd;
56 static int in_migration;
57 
58 RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) };
59 #endif
60 
61 struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
62 DEFINE_TLS(CPUState *, current_cpu);
63 
64 /* 0 = Do not count executed instructions.
65    1 = Precise instruction counting.
66    2 = Adaptive rate instruction counting.  */
67 int use_icount = 0;
68 /* Current instruction counter.  While executing translated code this may
69    include some instructions that have not yet been executed.  */
70 int64_t qemu_icount;
71 
72 #if !defined(CONFIG_USER_ONLY)
73 static void io_mem_init(void);
74 
75 /* io memory support */
76 CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
77 CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
78 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
79 static char io_mem_used[IO_MEM_NB_ENTRIES];
80 int io_mem_watch;
81 #endif
82 
83 /* log support */
84 #ifdef WIN32
85 static const char *logfilename = "qemu.log";
86 #else
87 static const char *logfilename = "/tmp/qemu.log";
88 #endif
89 FILE *logfile;
90 int loglevel;
91 static int log_append = 0;
92 
93 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
94 typedef struct subpage_t {
95     hwaddr base;
96     CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
97     CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
98     void *opaque[TARGET_PAGE_SIZE][2][4];
99     ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
100 } subpage_t;
101 
102 /* Must be called before using the QEMU cpus. 'tb_size' is the size
103    (in bytes) allocated to the translation buffer. Zero means default
104    size. */
cpu_exec_init_all(unsigned long tb_size)105 void cpu_exec_init_all(unsigned long tb_size)
106 {
107     //cpu_gen_init();
108     //code_gen_alloc(tb_size);
109     //code_gen_ptr = code_gen_buffer;
110     //page_init();
111     tcg_exec_init(tb_size);
112 #if !defined(CONFIG_USER_ONLY)
113     qemu_mutex_init(&ram_list.mutex);
114     io_mem_init();
115 #endif
116 }
117 
118 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
119 
120 #define CPU_COMMON_SAVE_VERSION 1
121 
cpu_common_save(QEMUFile * f,void * opaque)122 static void cpu_common_save(QEMUFile *f, void *opaque)
123 {
124     CPUOldState *env = opaque;
125     CPUState *cpu = ENV_GET_CPU(env);
126 
127     cpu_synchronize_state(cpu, 0);
128 
129     qemu_put_be32s(f, &cpu->halted);
130     qemu_put_be32s(f, &cpu->interrupt_request);
131 }
132 
cpu_common_load(QEMUFile * f,void * opaque,int version_id)133 static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
134 {
135     CPUOldState *env = opaque;
136     CPUState *cpu = ENV_GET_CPU(env);
137 
138     if (version_id != CPU_COMMON_SAVE_VERSION)
139         return -EINVAL;
140 
141     qemu_get_be32s(f, &cpu->halted);
142     qemu_get_be32s(f, &cpu->interrupt_request);
143     /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
144        version_id is increased. */
145     cpu->interrupt_request &= ~0x01;
146     tlb_flush(env, 1);
147     cpu_synchronize_state(cpu, 1);
148 
149     return 0;
150 }
151 #endif
152 
qemu_get_cpu(int cpu_index)153 CPUState *qemu_get_cpu(int cpu_index)
154 {
155     CPUState *cpu;
156 
157     CPU_FOREACH(cpu) {
158         if (cpu->cpu_index == cpu_index)
159             return cpu;
160     }
161     return NULL;
162 }
163 
cpu_exec_init(CPUArchState * env)164 void cpu_exec_init(CPUArchState *env)
165 {
166     CPUState *cpu = ENV_GET_CPU(env);
167 
168 #if defined(CONFIG_USER_ONLY)
169     cpu_list_lock();
170 #endif
171     // Compute CPU index from list position.
172     int cpu_index = 0;
173     CPUState *cpu1;
174     CPU_FOREACH(cpu1) {
175         cpu_index++;
176     }
177     cpu->cpu_index = cpu_index;
178     QTAILQ_INSERT_TAIL(&cpus, cpu, node);
179 
180     cpu->numa_node = 0;
181     QTAILQ_INIT(&env->breakpoints);
182     QTAILQ_INIT(&env->watchpoints);
183 #if defined(CONFIG_USER_ONLY)
184     cpu_list_unlock();
185 #endif
186 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
187     register_savevm(NULL,
188                     "cpu_common",
189                     cpu_index,
190                     CPU_COMMON_SAVE_VERSION,
191                     cpu_common_save,
192                     cpu_common_load,
193                     env);
194     register_savevm(NULL,
195                     "cpu",
196                     cpu_index,
197                     CPU_SAVE_VERSION,
198                     cpu_save,
199                     cpu_load,
200                     env);
201 #endif
202 }
203 
204 #if defined(TARGET_HAS_ICE)
breakpoint_invalidate(CPUArchState * env,target_ulong pc)205 static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
206 {
207     hwaddr addr;
208     target_ulong pd;
209     ram_addr_t ram_addr;
210     PhysPageDesc *p;
211 
212     addr = cpu_get_phys_page_debug(env, pc);
213     p = phys_page_find(addr >> TARGET_PAGE_BITS);
214     if (!p) {
215         pd = IO_MEM_UNASSIGNED;
216     } else {
217         pd = p->phys_offset;
218     }
219     ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
220     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
221 }
222 #endif
223 
224 #if defined(CONFIG_USER_ONLY)
cpu_watchpoint_remove_all(CPUArchState * env,int mask)225 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
226 
227 {
228 }
229 
cpu_watchpoint_insert(CPUArchState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)230 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
231                           int flags, CPUWatchpoint **watchpoint)
232 {
233     return -ENOSYS;
234 }
235 #else
236 /* Add a watchpoint.  */
cpu_watchpoint_insert(CPUArchState * env,target_ulong addr,target_ulong len,int flags,CPUWatchpoint ** watchpoint)237 int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
238                           int flags, CPUWatchpoint **watchpoint)
239 {
240     target_ulong len_mask = ~(len - 1);
241     CPUWatchpoint *wp;
242 
243     /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
244     if ((len & (len - 1)) || (addr & ~len_mask) ||
245             len == 0 || len > TARGET_PAGE_SIZE) {
246         fprintf(stderr, "qemu: tried to set invalid watchpoint at "
247                 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
248         return -EINVAL;
249     }
250     wp = g_malloc(sizeof(*wp));
251 
252     wp->vaddr = addr;
253     wp->len_mask = len_mask;
254     wp->flags = flags;
255 
256     /* keep all GDB-injected watchpoints in front */
257     if (flags & BP_GDB)
258         QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
259     else
260         QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
261 
262     tlb_flush_page(env, addr);
263 
264     if (watchpoint)
265         *watchpoint = wp;
266     return 0;
267 }
268 
269 /* Remove a specific watchpoint.  */
cpu_watchpoint_remove(CPUArchState * env,target_ulong addr,target_ulong len,int flags)270 int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
271                           int flags)
272 {
273     target_ulong len_mask = ~(len - 1);
274     CPUWatchpoint *wp;
275 
276     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
277         if (addr == wp->vaddr && len_mask == wp->len_mask
278                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
279             cpu_watchpoint_remove_by_ref(env, wp);
280             return 0;
281         }
282     }
283     return -ENOENT;
284 }
285 
286 /* Remove a specific watchpoint by reference.  */
cpu_watchpoint_remove_by_ref(CPUArchState * env,CPUWatchpoint * watchpoint)287 void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
288 {
289     QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
290 
291     tlb_flush_page(env, watchpoint->vaddr);
292 
293     g_free(watchpoint);
294 }
295 
296 /* Remove all matching watchpoints.  */
cpu_watchpoint_remove_all(CPUArchState * env,int mask)297 void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
298 {
299     CPUWatchpoint *wp, *next;
300 
301     QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
302         if (wp->flags & mask)
303             cpu_watchpoint_remove_by_ref(env, wp);
304     }
305 }
306 #endif
307 
308 /* Add a breakpoint.  */
cpu_breakpoint_insert(CPUArchState * env,target_ulong pc,int flags,CPUBreakpoint ** breakpoint)309 int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
310                           CPUBreakpoint **breakpoint)
311 {
312 #if defined(TARGET_HAS_ICE)
313     CPUBreakpoint *bp;
314 
315     bp = g_malloc(sizeof(*bp));
316 
317     bp->pc = pc;
318     bp->flags = flags;
319 
320     /* keep all GDB-injected breakpoints in front */
321     if (flags & BP_GDB) {
322         QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
323     } else {
324         QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
325     }
326 
327     breakpoint_invalidate(env, pc);
328 
329     if (breakpoint) {
330         *breakpoint = bp;
331     }
332     return 0;
333 #else
334     return -ENOSYS;
335 #endif
336 }
337 
338 /* Remove a specific breakpoint.  */
cpu_breakpoint_remove(CPUArchState * env,target_ulong pc,int flags)339 int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
340 {
341 #if defined(TARGET_HAS_ICE)
342     CPUBreakpoint *bp;
343 
344     QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
345         if (bp->pc == pc && bp->flags == flags) {
346             cpu_breakpoint_remove_by_ref(env, bp);
347             return 0;
348         }
349     }
350     return -ENOENT;
351 #else
352     return -ENOSYS;
353 #endif
354 }
355 
356 /* Remove a specific breakpoint by reference.  */
cpu_breakpoint_remove_by_ref(CPUArchState * env,CPUBreakpoint * breakpoint)357 void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
358 {
359 #if defined(TARGET_HAS_ICE)
360     QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
361 
362     breakpoint_invalidate(env, breakpoint->pc);
363 
364     g_free(breakpoint);
365 #endif
366 }
367 
368 /* Remove all matching breakpoints. */
cpu_breakpoint_remove_all(CPUArchState * env,int mask)369 void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
370 {
371 #if defined(TARGET_HAS_ICE)
372     CPUBreakpoint *bp, *next;
373 
374     QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
375         if (bp->flags & mask)
376             cpu_breakpoint_remove_by_ref(env, bp);
377     }
378 #endif
379 }
380 
381 /* enable or disable single step mode. EXCP_DEBUG is returned by the
382    CPU loop after each instruction */
cpu_single_step(CPUState * cpu,int enabled)383 void cpu_single_step(CPUState *cpu, int enabled)
384 {
385 #if defined(TARGET_HAS_ICE)
386     if (cpu->singlestep_enabled != enabled) {
387         cpu->singlestep_enabled = enabled;
388         if (kvm_enabled()) {
389             kvm_update_guest_debug(cpu->env_ptr, 0);
390         } else {
391             /* must flush all the translated code to avoid inconsistencies */
392             /* XXX: only flush what is necessary */
393             tb_flush(cpu->env_ptr);
394         }
395     }
396 #endif
397 }
398 
399 /* enable or disable low levels log */
cpu_set_log(int log_flags)400 void cpu_set_log(int log_flags)
401 {
402     loglevel = log_flags;
403     if (loglevel && !logfile) {
404         logfile = fopen(logfilename, log_append ? "a" : "w");
405         if (!logfile) {
406             perror(logfilename);
407             exit(1);
408         }
409 #if !defined(CONFIG_SOFTMMU)
410         /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
411         {
412             static char logfile_buf[4096];
413             setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
414         }
415 #elif !defined(_WIN32)
416         /* Win32 doesn't support line-buffering and requires size >= 2 */
417         setvbuf(logfile, NULL, _IOLBF, 0);
418 #endif
419         log_append = 1;
420     }
421     if (!loglevel && logfile) {
422         fclose(logfile);
423         logfile = NULL;
424     }
425 }
426 
cpu_set_log_filename(const char * filename)427 void cpu_set_log_filename(const char *filename)
428 {
429     logfilename = strdup(filename);
430     if (logfile) {
431         fclose(logfile);
432         logfile = NULL;
433     }
434     cpu_set_log(loglevel);
435 }
436 
cpu_unlink_tb(CPUOldState * env)437 void cpu_unlink_tb(CPUOldState *env)
438 {
439     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
440        problem and hope the cpu will stop of its own accord.  For userspace
441        emulation this often isn't actually as bad as it sounds.  Often
442        signals are used primarily to interrupt blocking syscalls.  */
443     TranslationBlock *tb;
444     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
445 
446     spin_lock(&interrupt_lock);
447     tb = env->current_tb;
448     /* if the cpu is currently executing code, we must unlink it and
449        all the potentially executing TB */
450     if (tb) {
451         env->current_tb = NULL;
452         tb_reset_jump_recursive(tb);
453     }
454     spin_unlock(&interrupt_lock);
455 }
456 
cpu_reset_interrupt(CPUState * cpu,int mask)457 void cpu_reset_interrupt(CPUState *cpu, int mask)
458 {
459     cpu->interrupt_request &= ~mask;
460 }
461 
cpu_exit(CPUState * cpu)462 void cpu_exit(CPUState *cpu)
463 {
464     cpu->exit_request = 1;
465     cpu_unlink_tb(cpu->env_ptr);
466 }
467 
cpu_abort(CPUArchState * env,const char * fmt,...)468 void cpu_abort(CPUArchState *env, const char *fmt, ...)
469 {
470     CPUState *cpu = ENV_GET_CPU(env);
471 
472     va_list ap;
473     va_list ap2;
474 
475     va_start(ap, fmt);
476     va_copy(ap2, ap);
477     fprintf(stderr, "qemu: fatal: ");
478     vfprintf(stderr, fmt, ap);
479     fprintf(stderr, "\n");
480 #ifdef TARGET_I386
481     cpu_dump_state(cpu, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
482 #else
483     cpu_dump_state(cpu, stderr, fprintf, 0);
484 #endif
485     if (qemu_log_enabled()) {
486         qemu_log("qemu: fatal: ");
487         qemu_log_vprintf(fmt, ap2);
488         qemu_log("\n");
489 #ifdef TARGET_I386
490         log_cpu_state(cpu, X86_DUMP_FPU | X86_DUMP_CCOP);
491 #else
492         log_cpu_state(cpu, 0);
493 #endif
494         qemu_log_flush();
495         qemu_log_close();
496     }
497     va_end(ap2);
498     va_end(ap);
499 #if defined(CONFIG_USER_ONLY)
500     {
501         struct sigaction act;
502         sigfillset(&act.sa_mask);
503         act.sa_handler = SIG_DFL;
504         sigaction(SIGABRT, &act, NULL);
505     }
506 #endif
507     abort();
508 }
509 
510 #if !defined(CONFIG_USER_ONLY)
qemu_get_ram_block(ram_addr_t addr)511 static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
512 {
513     RAMBlock *block;
514 
515     /* The list is protected by the iothread lock here.  */
516     block = ram_list.mru_block;
517     if (block && addr - block->offset < block->length) {
518         goto found;
519     }
520     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
521         if (addr - block->offset < block->length) {
522             goto found;
523         }
524     }
525 
526     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
527     abort();
528 
529 found:
530     ram_list.mru_block = block;
531     return block;
532 }
533 
534 /* Note: start and end must be within the same ram block.  */
cpu_physical_memory_reset_dirty(ram_addr_t start,ram_addr_t end,int dirty_flags)535 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
536                                      int dirty_flags)
537 {
538     unsigned long length, start1;
539     int i;
540 
541     start &= TARGET_PAGE_MASK;
542     end = TARGET_PAGE_ALIGN(end);
543 
544     length = end - start;
545     if (length == 0)
546         return;
547     cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
548 
549     /* we modify the TLB cache so that the dirty bit will be set again
550        when accessing the range */
551     start1 = (unsigned long)qemu_safe_ram_ptr(start);
552     /* Chek that we don't span multiple blocks - this breaks the
553        address comparisons below.  */
554     if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
555             != (end - 1) - start) {
556         abort();
557     }
558 
559     CPUState *cpu;
560     CPU_FOREACH(cpu) {
561         int mmu_idx;
562         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
563             for(i = 0; i < CPU_TLB_SIZE; i++) {
564                 CPUArchState* env = cpu->env_ptr;
565                 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
566                                       start1, length);
567             }
568         }
569     }
570 }
571 
cpu_physical_memory_set_dirty_tracking(int enable)572 int cpu_physical_memory_set_dirty_tracking(int enable)
573 {
574     in_migration = enable;
575     if (kvm_enabled()) {
576         return kvm_set_migration_log(enable);
577     }
578     return 0;
579 }
580 
cpu_physical_memory_get_dirty_tracking(void)581 int cpu_physical_memory_get_dirty_tracking(void)
582 {
583     return in_migration;
584 }
585 
cpu_physical_sync_dirty_bitmap(hwaddr start_addr,hwaddr end_addr)586 int cpu_physical_sync_dirty_bitmap(hwaddr start_addr,
587                                    hwaddr end_addr)
588 {
589     int ret = 0;
590 
591     if (kvm_enabled())
592         ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
593     return ret;
594 }
595 
tlb_update_dirty(CPUTLBEntry * tlb_entry)596 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
597 {
598     ram_addr_t ram_addr;
599     void *p;
600 
601     if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
602         p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
603             + tlb_entry->addend);
604         ram_addr = qemu_ram_addr_from_host_nofail(p);
605         if (!cpu_physical_memory_is_dirty(ram_addr)) {
606             tlb_entry->addr_write |= TLB_NOTDIRTY;
607         }
608     }
609 }
610 
611 /* update the TLB according to the current state of the dirty bits */
cpu_tlb_update_dirty(CPUArchState * env)612 void cpu_tlb_update_dirty(CPUArchState *env)
613 {
614     int i;
615     int mmu_idx;
616     for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
617         for(i = 0; i < CPU_TLB_SIZE; i++)
618             tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
619     }
620 }
621 
622 
623 #else
624 
tlb_flush(CPUArchState * env,int flush_global)625 void tlb_flush(CPUArchState *env, int flush_global)
626 {
627 }
628 
tlb_flush_page(CPUArchState * env,target_ulong addr)629 void tlb_flush_page(CPUArchState *env, target_ulong addr)
630 {
631 }
632 
tlb_set_page_exec(CPUArchState * env,target_ulong vaddr,hwaddr paddr,int prot,int mmu_idx,int is_softmmu)633 int tlb_set_page_exec(CPUArchState *env, target_ulong vaddr,
634                       hwaddr paddr, int prot,
635                       int mmu_idx, int is_softmmu)
636 {
637     return 0;
638 }
639 
tlb_set_dirty(CPUOldState * env,unsigned long addr,target_ulong vaddr)640 static inline void tlb_set_dirty(CPUOldState *env,
641                                  unsigned long addr, target_ulong vaddr)
642 {
643 }
644 #endif /* defined(CONFIG_USER_ONLY) */
645 
646 #if !defined(CONFIG_USER_ONLY)
647 
648 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
649                              ram_addr_t memory, ram_addr_t region_offset);
650 static void *subpage_init (hwaddr base, ram_addr_t *phys,
651                            ram_addr_t orig_memory, ram_addr_t region_offset);
652 
653 static void *(*phys_mem_alloc)(size_t size) = qemu_anon_ram_alloc;
654 
655 /*
656  * Set a custom physical guest memory alloator.
657  * Accelerators with unusual needs may need this.  Hopefully, we can
658  * get rid of it eventually.
659  */
phys_mem_set_alloc(void * (* alloc)(size_t))660 void phys_mem_set_alloc(void *(*alloc)(size_t))
661 {
662     phys_mem_alloc = alloc;
663 }
664 
665 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
666                       need_subpage)                                     \
667     do {                                                                \
668         if (addr > start_addr)                                          \
669             start_addr2 = 0;                                            \
670         else {                                                          \
671             start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
672             if (start_addr2 > 0)                                        \
673                 need_subpage = 1;                                       \
674         }                                                               \
675                                                                         \
676         if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
677             end_addr2 = TARGET_PAGE_SIZE - 1;                           \
678         else {                                                          \
679             end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
680             if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
681                 need_subpage = 1;                                       \
682         }                                                               \
683     } while (0)
684 
685 /* register physical memory.
686    For RAM, 'size' must be a multiple of the target page size.
687    If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
688    io memory page.  The address used when calling the IO function is
689    the offset from the start of the region, plus region_offset.  Both
690    start_addr and region_offset are rounded down to a page boundary
691    before calculating this offset.  This should not be a problem unless
692    the low bits of start_addr and region_offset differ.  */
cpu_register_physical_memory_log(hwaddr start_addr,ram_addr_t size,ram_addr_t phys_offset,ram_addr_t region_offset,bool log_dirty)693 void cpu_register_physical_memory_log(hwaddr start_addr,
694                                          ram_addr_t size,
695                                          ram_addr_t phys_offset,
696                                          ram_addr_t region_offset,
697                                          bool log_dirty)
698 {
699     hwaddr addr, end_addr;
700     PhysPageDesc *p;
701     CPUState *cpu;
702     ram_addr_t orig_size = size;
703     subpage_t *subpage;
704 
705     if (kvm_enabled())
706         kvm_set_phys_mem(start_addr, size, phys_offset);
707 #ifdef CONFIG_HAX
708     if (hax_enabled())
709         hax_set_phys_mem(start_addr, size, phys_offset);
710 #endif
711 
712     if (phys_offset == IO_MEM_UNASSIGNED) {
713         region_offset = start_addr;
714     }
715     region_offset &= TARGET_PAGE_MASK;
716     size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
717     end_addr = start_addr + (hwaddr)size;
718 
719     addr = start_addr;
720     do {
721         p = phys_page_find(addr >> TARGET_PAGE_BITS);
722         if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
723             ram_addr_t orig_memory = p->phys_offset;
724             hwaddr start_addr2, end_addr2;
725             int need_subpage = 0;
726 
727             CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
728                           need_subpage);
729             if (need_subpage) {
730                 if (!(orig_memory & IO_MEM_SUBPAGE)) {
731                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
732                                            &p->phys_offset, orig_memory,
733                                            p->region_offset);
734                 } else {
735                     subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
736                                             >> IO_MEM_SHIFT];
737                 }
738                 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
739                                  region_offset);
740                 p->region_offset = 0;
741             } else {
742                 p->phys_offset = phys_offset;
743                 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
744                     (phys_offset & IO_MEM_ROMD))
745                     phys_offset += TARGET_PAGE_SIZE;
746             }
747         } else {
748             p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
749             p->phys_offset = phys_offset;
750             p->region_offset = region_offset;
751             if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
752                 (phys_offset & IO_MEM_ROMD)) {
753                 phys_offset += TARGET_PAGE_SIZE;
754             } else {
755                 hwaddr start_addr2, end_addr2;
756                 int need_subpage = 0;
757 
758                 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
759                               end_addr2, need_subpage);
760 
761                 if (need_subpage) {
762                     subpage = subpage_init((addr & TARGET_PAGE_MASK),
763                                            &p->phys_offset, IO_MEM_UNASSIGNED,
764                                            addr & TARGET_PAGE_MASK);
765                     subpage_register(subpage, start_addr2, end_addr2,
766                                      phys_offset, region_offset);
767                     p->region_offset = 0;
768                 }
769             }
770         }
771         region_offset += TARGET_PAGE_SIZE;
772         addr += TARGET_PAGE_SIZE;
773     } while (addr != end_addr);
774 
775     /* since each CPU stores ram addresses in its TLB cache, we must
776        reset the modified entries */
777     /* XXX: slow ! */
778     CPU_FOREACH(cpu) {
779         tlb_flush(cpu->env_ptr, 1);
780     }
781 }
782 
783 /* XXX: temporary until new memory mapping API */
cpu_get_physical_page_desc(hwaddr addr)784 ram_addr_t cpu_get_physical_page_desc(hwaddr addr)
785 {
786     PhysPageDesc *p;
787 
788     p = phys_page_find(addr >> TARGET_PAGE_BITS);
789     if (!p)
790         return IO_MEM_UNASSIGNED;
791     return p->phys_offset;
792 }
793 
qemu_register_coalesced_mmio(hwaddr addr,ram_addr_t size)794 void qemu_register_coalesced_mmio(hwaddr addr, ram_addr_t size)
795 {
796     if (kvm_enabled())
797         kvm_coalesce_mmio_region(addr, size);
798 }
799 
qemu_unregister_coalesced_mmio(hwaddr addr,ram_addr_t size)800 void qemu_unregister_coalesced_mmio(hwaddr addr, ram_addr_t size)
801 {
802     if (kvm_enabled())
803         kvm_uncoalesce_mmio_region(addr, size);
804 }
805 
qemu_mutex_lock_ramlist(void)806 void qemu_mutex_lock_ramlist(void)
807 {
808     qemu_mutex_lock(&ram_list.mutex);
809 }
810 
qemu_mutex_unlock_ramlist(void)811 void qemu_mutex_unlock_ramlist(void)
812 {
813     qemu_mutex_unlock(&ram_list.mutex);
814 }
815 
816 #if defined(__linux__) && !defined(CONFIG_ANDROID)
817 
818 #include <sys/vfs.h>
819 
820 #define HUGETLBFS_MAGIC       0x958458f6
821 
gethugepagesize(const char * path)822 static long gethugepagesize(const char *path)
823 {
824     struct statfs fs;
825     int ret;
826 
827     do {
828         ret = statfs(path, &fs);
829     } while (ret != 0 && errno == EINTR);
830 
831     if (ret != 0) {
832         perror(path);
833         return 0;
834     }
835 
836     if (fs.f_type != HUGETLBFS_MAGIC)
837         fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
838 
839     return fs.f_bsize;
840 }
841 
842 static sigjmp_buf sigjump;
843 
sigbus_handler(int signal)844 static void sigbus_handler(int signal)
845 {
846     siglongjmp(sigjump, 1);
847 }
848 
file_ram_alloc(RAMBlock * block,ram_addr_t memory,const char * path)849 static void *file_ram_alloc(RAMBlock *block,
850                             ram_addr_t memory,
851                             const char *path)
852 {
853     char *filename;
854     char *sanitized_name;
855     char *c;
856     void *area;
857     int fd;
858     unsigned long hpagesize;
859 
860     hpagesize = gethugepagesize(path);
861     if (!hpagesize) {
862         return NULL;
863     }
864 
865     if (memory < hpagesize) {
866         return NULL;
867     }
868 
869     if (kvm_enabled() && !kvm_has_sync_mmu()) {
870         fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
871         return NULL;
872     }
873 
874     /* Make name safe to use with mkstemp by replacing '/' with '_'. */
875     sanitized_name = g_strdup(block->mr->name);
876     for (c = sanitized_name; *c != '\0'; c++) {
877         if (*c == '/')
878             *c = '_';
879     }
880 
881     filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
882                                sanitized_name);
883     g_free(sanitized_name);
884 
885     fd = mkstemp(filename);
886     if (fd < 0) {
887         perror("unable to create backing store for hugepages");
888         g_free(filename);
889         return NULL;
890     }
891     unlink(filename);
892     g_free(filename);
893 
894     memory = (memory+hpagesize-1) & ~(hpagesize-1);
895 
896     /*
897      * ftruncate is not supported by hugetlbfs in older
898      * hosts, so don't bother bailing out on errors.
899      * If anything goes wrong with it under other filesystems,
900      * mmap will fail.
901      */
902     if (ftruncate(fd, memory))
903         perror("ftruncate");
904 
905     area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
906     if (area == MAP_FAILED) {
907         perror("file_ram_alloc: can't mmap RAM pages");
908         close(fd);
909         return (NULL);
910     }
911 
912     if (mem_prealloc) {
913         int ret, i;
914         struct sigaction act, oldact;
915         sigset_t set, oldset;
916 
917         memset(&act, 0, sizeof(act));
918         act.sa_handler = &sigbus_handler;
919         act.sa_flags = 0;
920 
921         ret = sigaction(SIGBUS, &act, &oldact);
922         if (ret) {
923             perror("file_ram_alloc: failed to install signal handler");
924             exit(1);
925         }
926 
927         /* unblock SIGBUS */
928         sigemptyset(&set);
929         sigaddset(&set, SIGBUS);
930         pthread_sigmask(SIG_UNBLOCK, &set, &oldset);
931 
932         if (sigsetjmp(sigjump, 1)) {
933             fprintf(stderr, "file_ram_alloc: failed to preallocate pages\n");
934             exit(1);
935         }
936 
937         /* MAP_POPULATE silently ignores failures */
938         for (i = 0; i < (memory/hpagesize)-1; i++) {
939             memset(area + (hpagesize*i), 0, 1);
940         }
941 
942         ret = sigaction(SIGBUS, &oldact, NULL);
943         if (ret) {
944             perror("file_ram_alloc: failed to reinstall signal handler");
945             exit(1);
946         }
947 
948         pthread_sigmask(SIG_SETMASK, &oldset, NULL);
949     }
950 
951     block->fd = fd;
952     return area;
953 }
954 #else
file_ram_alloc(RAMBlock * block,ram_addr_t memory,const char * path)955 static void *file_ram_alloc(RAMBlock *block,
956                             ram_addr_t memory,
957                             const char *path)
958 {
959     fprintf(stderr, "-mem-path not supported on this host\n");
960     exit(1);
961 }
962 #endif
963 
find_ram_offset(ram_addr_t size)964 static ram_addr_t find_ram_offset(ram_addr_t size)
965 {
966     RAMBlock *block, *next_block;
967     ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
968 
969     assert(size != 0); /* it would hand out same offset multiple times */
970 
971     if (QTAILQ_EMPTY(&ram_list.blocks))
972         return 0;
973 
974     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
975         ram_addr_t end, next = RAM_ADDR_MAX;
976 
977         end = block->offset + block->length;
978 
979         QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
980             if (next_block->offset >= end) {
981                 next = MIN(next, next_block->offset);
982             }
983         }
984         if (next - end >= size && next - end < mingap) {
985             offset = end;
986             mingap = next - end;
987         }
988     }
989 
990     if (offset == RAM_ADDR_MAX) {
991         fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
992                 (uint64_t)size);
993         abort();
994     }
995 
996     return offset;
997 }
998 
last_ram_offset(void)999 ram_addr_t last_ram_offset(void)
1000 {
1001     RAMBlock *block;
1002     ram_addr_t last = 0;
1003 
1004     QTAILQ_FOREACH(block, &ram_list.blocks, next)
1005         last = MAX(last, block->offset + block->length);
1006 
1007     return last;
1008 }
1009 
qemu_ram_setup_dump(void * addr,ram_addr_t size)1010 static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1011 {
1012 #ifndef CONFIG_ANDROID
1013     int ret;
1014 
1015     /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
1016     if (!qemu_opt_get_bool(qemu_get_machine_opts(),
1017                            "dump-guest-core", true)) {
1018         ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1019         if (ret) {
1020             perror("qemu_madvise");
1021             fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1022                             "but dump_guest_core=off specified\n");
1023         }
1024     }
1025 #endif  // !CONFIG_ANDROID
1026 }
1027 
qemu_ram_set_idstr(ram_addr_t addr,const char * name,DeviceState * dev)1028 void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1029 {
1030     RAMBlock *new_block, *block;
1031 
1032     new_block = NULL;
1033     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1034         if (block->offset == addr) {
1035             new_block = block;
1036             break;
1037         }
1038     }
1039     assert(new_block);
1040     assert(!new_block->idstr[0]);
1041 
1042     if (dev) {
1043         char *id = qdev_get_dev_path(dev);
1044         if (id) {
1045             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1046             g_free(id);
1047         }
1048     }
1049     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1050 
1051     /* This assumes the iothread lock is taken here too.  */
1052     qemu_mutex_lock_ramlist();
1053     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1054         if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
1055             fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1056                     new_block->idstr);
1057             abort();
1058         }
1059     }
1060     qemu_mutex_unlock_ramlist();
1061 }
1062 
memory_try_enable_merging(void * addr,size_t len)1063 static int memory_try_enable_merging(void *addr, size_t len)
1064 {
1065 #ifndef CONFIG_ANDROID
1066     if (!qemu_opt_get_bool(qemu_get_machine_opts(), "mem-merge", true)) {
1067         /* disabled by the user */
1068         return 0;
1069     }
1070 
1071     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1072 #else  // CONFIG_ANDROID
1073     return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1074 #endif  // CONFIG_ANDROID
1075 }
1076 
qemu_ram_alloc_from_ptr(DeviceState * dev,const char * name,ram_addr_t size,void * host)1077 ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
1078                                    ram_addr_t size, void *host)
1079 {
1080     RAMBlock *block, *new_block;
1081 
1082     size = TARGET_PAGE_ALIGN(size);
1083     new_block = g_malloc0(sizeof(*new_block));
1084     new_block->fd = -1;
1085 
1086     /* This assumes the iothread lock is taken here too.  */
1087     qemu_mutex_lock_ramlist();
1088     //new_block->mr = mr;
1089     new_block->offset = find_ram_offset(size);
1090     if (host) {
1091         new_block->host = host;
1092         new_block->flags |= RAM_PREALLOC_MASK;
1093     } else if (xen_enabled()) {
1094         if (mem_path) {
1095             fprintf(stderr, "-mem-path not supported with Xen\n");
1096             exit(1);
1097         }
1098         //xen_ram_alloc(new_block->offset, size, mr);
1099     } else {
1100         if (mem_path) {
1101             if (phys_mem_alloc != qemu_anon_ram_alloc) {
1102                 /*
1103                  * file_ram_alloc() needs to allocate just like
1104                  * phys_mem_alloc, but we haven't bothered to provide
1105                  * a hook there.
1106                  */
1107                 fprintf(stderr,
1108                         "-mem-path not supported with this accelerator\n");
1109                 exit(1);
1110             }
1111             new_block->host = file_ram_alloc(new_block, size, mem_path);
1112         }
1113         if (!new_block->host) {
1114             new_block->host = phys_mem_alloc(size);
1115             if (!new_block->host) {
1116                 fprintf(stderr, "Cannot set up guest memory '%s': %s\n",
1117                         name, strerror(errno));
1118                 exit(1);
1119             }
1120 #ifdef CONFIG_HAX
1121             if (hax_enabled()) {
1122                 /*
1123                  * In HAX, qemu allocates the virtual address, and HAX kernel
1124                  * module populates the region with physical memory. Currently
1125                  * we don’t populate guest memory on demand, thus we should
1126                  * make sure that sufficient amount of memory is available in
1127                  * advance.
1128                  */
1129                 int ret = hax_populate_ram(
1130                         (uint64_t)(uintptr_t)new_block->host,
1131                         (uint32_t)size);
1132                 if (ret < 0) {
1133                     fprintf(stderr, "Hax failed to populate ram\n");
1134                     exit(-1);
1135                 }
1136             }
1137 #endif  // CONFIG_HAX
1138             memory_try_enable_merging(new_block->host, size);
1139         }
1140     }
1141     new_block->length = size;
1142 
1143     if (dev) {
1144         char *id = qdev_get_dev_path(dev);
1145         if (id) {
1146             snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
1147             g_free(id);
1148         }
1149     }
1150     pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1151 
1152     /* Keep the list sorted from biggest to smallest block.  */
1153     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1154         if (block->length < new_block->length) {
1155             break;
1156         }
1157     }
1158     if (block) {
1159         QTAILQ_INSERT_BEFORE(block, new_block, next);
1160     } else {
1161         QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next);
1162     }
1163     ram_list.mru_block = NULL;
1164 
1165     ram_list.version++;
1166     qemu_mutex_unlock_ramlist();
1167 
1168     ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
1169                                        last_ram_offset() >> TARGET_PAGE_BITS);
1170     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
1171            0xff, size >> TARGET_PAGE_BITS);
1172     //cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
1173 
1174     qemu_ram_setup_dump(new_block->host, size);
1175     //qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
1176     //qemu_madvise(new_block->host, size, QEMU_MADV_DONTFORK);
1177 
1178     if (kvm_enabled())
1179         kvm_setup_guest_memory(new_block->host, size);
1180 
1181     return new_block->offset;
1182 }
1183 
qemu_ram_alloc(DeviceState * dev,const char * name,ram_addr_t size)1184 ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
1185 {
1186     return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
1187 }
1188 
qemu_ram_free_from_ptr(ram_addr_t addr)1189 void qemu_ram_free_from_ptr(ram_addr_t addr)
1190 {
1191     RAMBlock *block;
1192 
1193     /* This assumes the iothread lock is taken here too.  */
1194     qemu_mutex_lock_ramlist();
1195     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1196         if (addr == block->offset) {
1197             QTAILQ_REMOVE(&ram_list.blocks, block, next);
1198             ram_list.mru_block = NULL;
1199             ram_list.version++;
1200             g_free(block);
1201             break;
1202         }
1203     }
1204     qemu_mutex_unlock_ramlist();
1205 }
1206 
qemu_ram_free(ram_addr_t addr)1207 void qemu_ram_free(ram_addr_t addr)
1208 {
1209     RAMBlock *block;
1210 
1211     /* This assumes the iothread lock is taken here too.  */
1212     qemu_mutex_lock_ramlist();
1213     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1214         if (addr == block->offset) {
1215             QTAILQ_REMOVE(&ram_list.blocks, block, next);
1216             ram_list.mru_block = NULL;
1217             ram_list.version++;
1218             if (block->flags & RAM_PREALLOC_MASK) {
1219                 ;
1220             } else if (xen_enabled()) {
1221                 //xen_invalidate_map_cache_entry(block->host);
1222 #ifndef _WIN32
1223             } else if (block->fd >= 0) {
1224                 munmap(block->host, block->length);
1225                 close(block->fd);
1226 #endif
1227             } else {
1228                 qemu_anon_ram_free(block->host, block->length);
1229             }
1230             g_free(block);
1231             break;
1232         }
1233     }
1234     qemu_mutex_unlock_ramlist();
1235 
1236 }
1237 
1238 #ifndef _WIN32
qemu_ram_remap(ram_addr_t addr,ram_addr_t length)1239 void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1240 {
1241     RAMBlock *block;
1242     ram_addr_t offset;
1243     int flags;
1244     void *area, *vaddr;
1245 
1246     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1247         offset = addr - block->offset;
1248         if (offset < block->length) {
1249             vaddr = block->host + offset;
1250             if (block->flags & RAM_PREALLOC_MASK) {
1251                 ;
1252             } else if (xen_enabled()) {
1253                 abort();
1254             } else {
1255                 flags = MAP_FIXED;
1256                 munmap(vaddr, length);
1257                 if (block->fd >= 0) {
1258 #ifdef MAP_POPULATE
1259                     flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
1260                         MAP_PRIVATE;
1261 #else
1262                     flags |= MAP_PRIVATE;
1263 #endif
1264                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1265                                 flags, block->fd, offset);
1266                 } else {
1267                     /*
1268                      * Remap needs to match alloc.  Accelerators that
1269                      * set phys_mem_alloc never remap.  If they did,
1270                      * we'd need a remap hook here.
1271                      */
1272                     assert(phys_mem_alloc == qemu_anon_ram_alloc);
1273 
1274                     flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1275                     area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1276                                 flags, -1, 0);
1277                 }
1278                 if (area != vaddr) {
1279                     fprintf(stderr, "Could not remap addr: "
1280                             RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
1281                             length, addr);
1282                     exit(1);
1283                 }
1284                 memory_try_enable_merging(vaddr, length);
1285                 qemu_ram_setup_dump(vaddr, length);
1286             }
1287             return;
1288         }
1289     }
1290 }
1291 #endif /* !_WIN32 */
1292 
1293 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1294    With the exception of the softmmu code in this file, this should
1295    only be used for local memory (e.g. video ram) that the device owns,
1296    and knows it isn't going to access beyond the end of the block.
1297 
1298    It should not be used for general purpose DMA.
1299    Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
1300  */
qemu_get_ram_ptr(ram_addr_t addr)1301 void *qemu_get_ram_ptr(ram_addr_t addr)
1302 {
1303     RAMBlock *block = qemu_get_ram_block(addr);
1304 #if 0
1305     if (xen_enabled()) {
1306         /* We need to check if the requested address is in the RAM
1307          * because we don't want to map the entire memory in QEMU.
1308          * In that case just map until the end of the page.
1309          */
1310         if (block->offset == 0) {
1311             return xen_map_cache(addr, 0, 0);
1312         } else if (block->host == NULL) {
1313             block->host =
1314                 xen_map_cache(block->offset, block->length, 1);
1315         }
1316     }
1317 #endif
1318     return block->host + (addr - block->offset);
1319 }
1320 
1321 /* Return a host pointer to ram allocated with qemu_ram_alloc.
1322  * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
1323  */
qemu_safe_ram_ptr(ram_addr_t addr)1324 void *qemu_safe_ram_ptr(ram_addr_t addr)
1325 {
1326     RAMBlock *block;
1327 
1328     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1329         if (addr - block->offset < block->length) {
1330             return block->host + (addr - block->offset);
1331         }
1332     }
1333 
1334     fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1335     abort();
1336 
1337     return NULL;
1338 }
1339 
1340 /* Some of the softmmu routines need to translate from a host pointer
1341    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host(void * ptr,ram_addr_t * ram_addr)1342 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
1343 {
1344     RAMBlock *block;
1345     uint8_t *host = ptr;
1346 #if 0
1347     if (xen_enabled()) {
1348         *ram_addr = xen_ram_addr_from_mapcache(ptr);
1349         return qemu_get_ram_block(*ram_addr)->mr;
1350     }
1351 #endif
1352     block = ram_list.mru_block;
1353     if (block && block->host && host - block->host < block->length) {
1354         goto found;
1355     }
1356 
1357     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
1358         /* This case append when the block is not mapped. */
1359         if (block->host == NULL) {
1360             continue;
1361         }
1362         if (host - block->host < block->length) {
1363             goto found;
1364         }
1365     }
1366 
1367     return -1;
1368 
1369 found:
1370     *ram_addr = block->offset + (host - block->host);
1371     return 0;
1372 }
1373 
1374 /* Some of the softmmu routines need to translate from a host pointer
1375    (typically a TLB entry) back to a ram offset.  */
qemu_ram_addr_from_host_nofail(void * ptr)1376 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1377 {
1378     ram_addr_t ram_addr;
1379 
1380     if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
1381         fprintf(stderr, "Bad ram pointer %p\n", ptr);
1382         abort();
1383     }
1384     return ram_addr;
1385 }
1386 
unassigned_mem_readb(void * opaque,hwaddr addr)1387 static uint32_t unassigned_mem_readb(void *opaque, hwaddr addr)
1388 {
1389 #ifdef DEBUG_UNASSIGNED
1390     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1391 #endif
1392 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1393     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
1394 #endif
1395     return 0;
1396 }
1397 
unassigned_mem_readw(void * opaque,hwaddr addr)1398 static uint32_t unassigned_mem_readw(void *opaque, hwaddr addr)
1399 {
1400 #ifdef DEBUG_UNASSIGNED
1401     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1402 #endif
1403 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1404     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
1405 #endif
1406     return 0;
1407 }
1408 
unassigned_mem_readl(void * opaque,hwaddr addr)1409 static uint32_t unassigned_mem_readl(void *opaque, hwaddr addr)
1410 {
1411 #ifdef DEBUG_UNASSIGNED
1412     printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
1413 #endif
1414 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1415     cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
1416 #endif
1417     return 0;
1418 }
1419 
unassigned_mem_writeb(void * opaque,hwaddr addr,uint32_t val)1420 static void unassigned_mem_writeb(void *opaque, hwaddr addr, uint32_t val)
1421 {
1422 #ifdef DEBUG_UNASSIGNED
1423     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
1424 #endif
1425 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1426     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
1427 #endif
1428 }
1429 
unassigned_mem_writew(void * opaque,hwaddr addr,uint32_t val)1430 static void unassigned_mem_writew(void *opaque, hwaddr addr, uint32_t val)
1431 {
1432 #ifdef DEBUG_UNASSIGNED
1433     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
1434 #endif
1435 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1436     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
1437 #endif
1438 }
1439 
unassigned_mem_writel(void * opaque,hwaddr addr,uint32_t val)1440 static void unassigned_mem_writel(void *opaque, hwaddr addr, uint32_t val)
1441 {
1442 #ifdef DEBUG_UNASSIGNED
1443     printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
1444 #endif
1445 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
1446     cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
1447 #endif
1448 }
1449 
1450 static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
1451     unassigned_mem_readb,
1452     unassigned_mem_readw,
1453     unassigned_mem_readl,
1454 };
1455 
1456 static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
1457     unassigned_mem_writeb,
1458     unassigned_mem_writew,
1459     unassigned_mem_writel,
1460 };
1461 
notdirty_mem_writeb(void * opaque,hwaddr ram_addr,uint32_t val)1462 static void notdirty_mem_writeb(void *opaque, hwaddr ram_addr,
1463                                 uint32_t val)
1464 {
1465     int dirty_flags;
1466     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1467     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1468 #if !defined(CONFIG_USER_ONLY)
1469         tb_invalidate_phys_page_fast0(ram_addr, 1);
1470         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1471 #endif
1472     }
1473     stb_p(qemu_get_ram_ptr(ram_addr), val);
1474     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1475     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1476     /* we remove the notdirty callback only if the code has been
1477        flushed */
1478     if (dirty_flags == 0xff)
1479         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1480 }
1481 
notdirty_mem_writew(void * opaque,hwaddr ram_addr,uint32_t val)1482 static void notdirty_mem_writew(void *opaque, hwaddr ram_addr,
1483                                 uint32_t val)
1484 {
1485     int dirty_flags;
1486     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1487     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1488 #if !defined(CONFIG_USER_ONLY)
1489         tb_invalidate_phys_page_fast0(ram_addr, 2);
1490         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1491 #endif
1492     }
1493     stw_p(qemu_get_ram_ptr(ram_addr), val);
1494     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1495     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1496     /* we remove the notdirty callback only if the code has been
1497        flushed */
1498     if (dirty_flags == 0xff)
1499         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1500 }
1501 
notdirty_mem_writel(void * opaque,hwaddr ram_addr,uint32_t val)1502 static void notdirty_mem_writel(void *opaque, hwaddr ram_addr,
1503                                 uint32_t val)
1504 {
1505     int dirty_flags;
1506     dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1507     if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1508 #if !defined(CONFIG_USER_ONLY)
1509         tb_invalidate_phys_page_fast0(ram_addr, 4);
1510         dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
1511 #endif
1512     }
1513     stl_p(qemu_get_ram_ptr(ram_addr), val);
1514     dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1515     cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
1516     /* we remove the notdirty callback only if the code has been
1517        flushed */
1518     if (dirty_flags == 0xff)
1519         tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
1520 }
1521 
1522 static CPUReadMemoryFunc * const error_mem_read[3] = {
1523     NULL, /* never used */
1524     NULL, /* never used */
1525     NULL, /* never used */
1526 };
1527 
1528 static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1529     notdirty_mem_writeb,
1530     notdirty_mem_writew,
1531     notdirty_mem_writel,
1532 };
1533 
tb_check_watchpoint(CPUArchState * env)1534 static void tb_check_watchpoint(CPUArchState* env)
1535 {
1536     TranslationBlock *tb = tb_find_pc(env->mem_io_pc);
1537     if (!tb) {
1538         cpu_abort(env, "check_watchpoint: could not find TB for "
1539                   "pc=%p", (void *)env->mem_io_pc);
1540     }
1541     cpu_restore_state(env, env->mem_io_pc);
1542     tb_phys_invalidate(tb, -1);
1543 }
1544 
1545 /* Generate a debug exception if a watchpoint has been hit.  */
check_watchpoint(int offset,int len_mask,int flags)1546 static void check_watchpoint(int offset, int len_mask, int flags)
1547 {
1548     CPUState *cpu = current_cpu;
1549     CPUArchState *env = cpu->env_ptr;
1550     target_ulong pc, cs_base;
1551     target_ulong vaddr;
1552     CPUWatchpoint *wp;
1553     int cpu_flags;
1554 
1555     if (env->watchpoint_hit) {
1556         /* We re-entered the check after replacing the TB. Now raise
1557          * the debug interrupt so that is will trigger after the
1558          * current instruction. */
1559         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
1560         return;
1561     }
1562     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
1563     QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1564         if ((vaddr == (wp->vaddr & len_mask) ||
1565              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
1566             wp->flags |= BP_WATCHPOINT_HIT;
1567             if (!env->watchpoint_hit) {
1568                 env->watchpoint_hit = wp;
1569                 tb_check_watchpoint(env);
1570                 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
1571                     env->exception_index = EXCP_DEBUG;
1572                     cpu_loop_exit(env);
1573                 } else {
1574                     cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
1575                     tb_gen_code(env, pc, cs_base, cpu_flags, 1);
1576                     cpu_resume_from_signal(env, NULL);
1577                 }
1578             }
1579         } else {
1580             wp->flags &= ~BP_WATCHPOINT_HIT;
1581         }
1582     }
1583 }
1584 
1585 /* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
1586    so these check for a hit then pass through to the normal out-of-line
1587    phys routines.  */
watch_mem_readb(void * opaque,hwaddr addr)1588 static uint32_t watch_mem_readb(void *opaque, hwaddr addr)
1589 {
1590     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
1591     return ldub_phys(addr);
1592 }
1593 
watch_mem_readw(void * opaque,hwaddr addr)1594 static uint32_t watch_mem_readw(void *opaque, hwaddr addr)
1595 {
1596     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
1597     return lduw_phys(addr);
1598 }
1599 
watch_mem_readl(void * opaque,hwaddr addr)1600 static uint32_t watch_mem_readl(void *opaque, hwaddr addr)
1601 {
1602     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
1603     return ldl_phys(addr);
1604 }
1605 
watch_mem_writeb(void * opaque,hwaddr addr,uint32_t val)1606 static void watch_mem_writeb(void *opaque, hwaddr addr,
1607                              uint32_t val)
1608 {
1609     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
1610     stb_phys(addr, val);
1611 }
1612 
watch_mem_writew(void * opaque,hwaddr addr,uint32_t val)1613 static void watch_mem_writew(void *opaque, hwaddr addr,
1614                              uint32_t val)
1615 {
1616     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
1617     stw_phys(addr, val);
1618 }
1619 
watch_mem_writel(void * opaque,hwaddr addr,uint32_t val)1620 static void watch_mem_writel(void *opaque, hwaddr addr,
1621                              uint32_t val)
1622 {
1623     check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
1624     stl_phys(addr, val);
1625 }
1626 
1627 static CPUReadMemoryFunc * const watch_mem_read[3] = {
1628     watch_mem_readb,
1629     watch_mem_readw,
1630     watch_mem_readl,
1631 };
1632 
1633 static CPUWriteMemoryFunc * const watch_mem_write[3] = {
1634     watch_mem_writeb,
1635     watch_mem_writew,
1636     watch_mem_writel,
1637 };
1638 
subpage_readlen(subpage_t * mmio,hwaddr addr,unsigned int len)1639 static inline uint32_t subpage_readlen (subpage_t *mmio, hwaddr addr,
1640                                  unsigned int len)
1641 {
1642     uint32_t ret;
1643     unsigned int idx;
1644 
1645     idx = SUBPAGE_IDX(addr);
1646 #if defined(DEBUG_SUBPAGE)
1647     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
1648            mmio, len, addr, idx);
1649 #endif
1650     ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
1651                                        addr + mmio->region_offset[idx][0][len]);
1652 
1653     return ret;
1654 }
1655 
subpage_writelen(subpage_t * mmio,hwaddr addr,uint32_t value,unsigned int len)1656 static inline void subpage_writelen (subpage_t *mmio, hwaddr addr,
1657                               uint32_t value, unsigned int len)
1658 {
1659     unsigned int idx;
1660 
1661     idx = SUBPAGE_IDX(addr);
1662 #if defined(DEBUG_SUBPAGE)
1663     printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
1664            mmio, len, addr, idx, value);
1665 #endif
1666     (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
1667                                   addr + mmio->region_offset[idx][1][len],
1668                                   value);
1669 }
1670 
subpage_readb(void * opaque,hwaddr addr)1671 static uint32_t subpage_readb (void *opaque, hwaddr addr)
1672 {
1673 #if defined(DEBUG_SUBPAGE)
1674     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
1675 #endif
1676 
1677     return subpage_readlen(opaque, addr, 0);
1678 }
1679 
subpage_writeb(void * opaque,hwaddr addr,uint32_t value)1680 static void subpage_writeb (void *opaque, hwaddr addr,
1681                             uint32_t value)
1682 {
1683 #if defined(DEBUG_SUBPAGE)
1684     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
1685 #endif
1686     subpage_writelen(opaque, addr, value, 0);
1687 }
1688 
subpage_readw(void * opaque,hwaddr addr)1689 static uint32_t subpage_readw (void *opaque, hwaddr addr)
1690 {
1691 #if defined(DEBUG_SUBPAGE)
1692     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
1693 #endif
1694 
1695     return subpage_readlen(opaque, addr, 1);
1696 }
1697 
subpage_writew(void * opaque,hwaddr addr,uint32_t value)1698 static void subpage_writew (void *opaque, hwaddr addr,
1699                             uint32_t value)
1700 {
1701 #if defined(DEBUG_SUBPAGE)
1702     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
1703 #endif
1704     subpage_writelen(opaque, addr, value, 1);
1705 }
1706 
subpage_readl(void * opaque,hwaddr addr)1707 static uint32_t subpage_readl (void *opaque, hwaddr addr)
1708 {
1709 #if defined(DEBUG_SUBPAGE)
1710     printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
1711 #endif
1712 
1713     return subpage_readlen(opaque, addr, 2);
1714 }
1715 
subpage_writel(void * opaque,hwaddr addr,uint32_t value)1716 static void subpage_writel (void *opaque,
1717                          hwaddr addr, uint32_t value)
1718 {
1719 #if defined(DEBUG_SUBPAGE)
1720     printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
1721 #endif
1722     subpage_writelen(opaque, addr, value, 2);
1723 }
1724 
1725 static CPUReadMemoryFunc * const subpage_read[] = {
1726     &subpage_readb,
1727     &subpage_readw,
1728     &subpage_readl,
1729 };
1730 
1731 static CPUWriteMemoryFunc * const subpage_write[] = {
1732     &subpage_writeb,
1733     &subpage_writew,
1734     &subpage_writel,
1735 };
1736 
subpage_register(subpage_t * mmio,uint32_t start,uint32_t end,ram_addr_t memory,ram_addr_t region_offset)1737 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1738                              ram_addr_t memory, ram_addr_t region_offset)
1739 {
1740     int idx, eidx;
1741     unsigned int i;
1742 
1743     if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
1744         return -1;
1745     idx = SUBPAGE_IDX(start);
1746     eidx = SUBPAGE_IDX(end);
1747 #if defined(DEBUG_SUBPAGE)
1748     printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
1749            mmio, start, end, idx, eidx, memory);
1750 #endif
1751     memory >>= IO_MEM_SHIFT;
1752     for (; idx <= eidx; idx++) {
1753         for (i = 0; i < 4; i++) {
1754             if (_io_mem_read[memory][i]) {
1755                 mmio->mem_read[idx][i] = &_io_mem_read[memory][i];
1756                 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
1757                 mmio->region_offset[idx][0][i] = region_offset;
1758             }
1759             if (_io_mem_write[memory][i]) {
1760                 mmio->mem_write[idx][i] = &_io_mem_write[memory][i];
1761                 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
1762                 mmio->region_offset[idx][1][i] = region_offset;
1763             }
1764         }
1765     }
1766 
1767     return 0;
1768 }
1769 
subpage_init(hwaddr base,ram_addr_t * phys,ram_addr_t orig_memory,ram_addr_t region_offset)1770 static void *subpage_init (hwaddr base, ram_addr_t *phys,
1771                            ram_addr_t orig_memory, ram_addr_t region_offset)
1772 {
1773     subpage_t *mmio;
1774     int subpage_memory;
1775 
1776     mmio = g_malloc0(sizeof(subpage_t));
1777 
1778     mmio->base = base;
1779     subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
1780 #if defined(DEBUG_SUBPAGE)
1781     printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
1782            mmio, base, TARGET_PAGE_SIZE, subpage_memory);
1783 #endif
1784     *phys = subpage_memory | IO_MEM_SUBPAGE;
1785     subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
1786                          region_offset);
1787 
1788     return mmio;
1789 }
1790 
get_free_io_mem_idx(void)1791 static int get_free_io_mem_idx(void)
1792 {
1793     int i;
1794 
1795     for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
1796         if (!io_mem_used[i]) {
1797             io_mem_used[i] = 1;
1798             return i;
1799         }
1800     fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
1801     return -1;
1802 }
1803 
1804 /* mem_read and mem_write are arrays of functions containing the
1805    function to access byte (index 0), word (index 1) and dword (index
1806    2). Functions can be omitted with a NULL function pointer.
1807    If io_index is non zero, the corresponding io zone is
1808    modified. If it is zero, a new io zone is allocated. The return
1809    value can be used with cpu_register_physical_memory(). (-1) is
1810    returned if error. */
cpu_register_io_memory_fixed(int io_index,CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)1811 static int cpu_register_io_memory_fixed(int io_index,
1812                                         CPUReadMemoryFunc * const *mem_read,
1813                                         CPUWriteMemoryFunc * const *mem_write,
1814                                         void *opaque)
1815 {
1816     int i, subwidth = 0;
1817 
1818     if (io_index <= 0) {
1819         io_index = get_free_io_mem_idx();
1820         if (io_index == -1)
1821             return io_index;
1822     } else {
1823         io_index >>= IO_MEM_SHIFT;
1824         if (io_index >= IO_MEM_NB_ENTRIES)
1825             return -1;
1826     }
1827 
1828     for(i = 0;i < 3; i++) {
1829         if (!mem_read[i] || !mem_write[i])
1830             subwidth = IO_MEM_SUBWIDTH;
1831         _io_mem_read[io_index][i] = mem_read[i];
1832         _io_mem_write[io_index][i] = mem_write[i];
1833     }
1834     io_mem_opaque[io_index] = opaque;
1835     return (io_index << IO_MEM_SHIFT) | subwidth;
1836 }
1837 
cpu_register_io_memory(CPUReadMemoryFunc * const * mem_read,CPUWriteMemoryFunc * const * mem_write,void * opaque)1838 int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
1839                            CPUWriteMemoryFunc * const *mem_write,
1840                            void *opaque)
1841 {
1842     return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1843 }
1844 
cpu_unregister_io_memory(int io_table_address)1845 void cpu_unregister_io_memory(int io_table_address)
1846 {
1847     int i;
1848     int io_index = io_table_address >> IO_MEM_SHIFT;
1849 
1850     for (i=0;i < 3; i++) {
1851         _io_mem_read[io_index][i] = unassigned_mem_read[i];
1852         _io_mem_write[io_index][i] = unassigned_mem_write[i];
1853     }
1854     io_mem_opaque[io_index] = NULL;
1855     io_mem_used[io_index] = 0;
1856 }
1857 
io_mem_init(void)1858 static void io_mem_init(void)
1859 {
1860     int i;
1861 
1862     cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
1863     cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
1864     cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
1865     for (i=0; i<5; i++)
1866         io_mem_used[i] = 1;
1867 
1868     io_mem_watch = cpu_register_io_memory(watch_mem_read,
1869                                           watch_mem_write, NULL);
1870 }
1871 
1872 #endif /* !defined(CONFIG_USER_ONLY) */
1873 
1874 /* physical memory access (slow version, mainly for debug) */
1875 #if defined(CONFIG_USER_ONLY)
cpu_physical_memory_rw(hwaddr addr,void * buf,int len,int is_write)1876 void cpu_physical_memory_rw(hwaddr addr, void *buf,
1877                             int len, int is_write)
1878 {
1879     int l, flags;
1880     target_ulong page;
1881     void * p;
1882 
1883     while (len > 0) {
1884         page = addr & TARGET_PAGE_MASK;
1885         l = (page + TARGET_PAGE_SIZE) - addr;
1886         if (l > len)
1887             l = len;
1888         flags = page_get_flags(page);
1889         if (!(flags & PAGE_VALID))
1890             return;
1891         if (is_write) {
1892             if (!(flags & PAGE_WRITE))
1893                 return;
1894             /* XXX: this code should not depend on lock_user */
1895             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
1896                 /* FIXME - should this return an error rather than just fail? */
1897                 return;
1898             memcpy(p, buf, l);
1899             unlock_user(p, addr, l);
1900         } else {
1901             if (!(flags & PAGE_READ))
1902                 return;
1903             /* XXX: this code should not depend on lock_user */
1904             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
1905                 /* FIXME - should this return an error rather than just fail? */
1906                 return;
1907             memcpy(buf, p, l);
1908             unlock_user(p, addr, 0);
1909         }
1910         len -= l;
1911         buf += l;
1912         addr += l;
1913     }
1914 }
1915 
1916 #else
1917 
invalidate_and_set_dirty(hwaddr addr,hwaddr length)1918 static void invalidate_and_set_dirty(hwaddr addr,
1919                                      hwaddr length)
1920 {
1921     if (!cpu_physical_memory_is_dirty(addr)) {
1922         /* invalidate code */
1923         tb_invalidate_phys_page_range(addr, addr + length, 0);
1924         /* set dirty bit */
1925         cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
1926     }
1927 }
1928 
cpu_physical_memory_rw(hwaddr addr,void * buf,int len,int is_write)1929 void cpu_physical_memory_rw(hwaddr addr, void *buf,
1930                             int len, int is_write)
1931 {
1932     int l, io_index;
1933     uint8_t *ptr;
1934     uint32_t val;
1935     hwaddr page;
1936     ram_addr_t pd;
1937     uint8_t* buf8 = (uint8_t*)buf;
1938     PhysPageDesc *p;
1939 
1940     while (len > 0) {
1941         page = addr & TARGET_PAGE_MASK;
1942         l = (page + TARGET_PAGE_SIZE) - addr;
1943         if (l > len)
1944             l = len;
1945         p = phys_page_find(page >> TARGET_PAGE_BITS);
1946         if (!p) {
1947             pd = IO_MEM_UNASSIGNED;
1948         } else {
1949             pd = p->phys_offset;
1950         }
1951 
1952         if (is_write) {
1953             if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1954                 hwaddr addr1 = addr;
1955                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1956                 if (p)
1957                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1958                 /* XXX: could force cpu_single_env to NULL to avoid
1959                    potential bugs */
1960                 if (l >= 4 && ((addr1 & 3) == 0)) {
1961                     /* 32 bit write access */
1962                     val = ldl_p(buf8);
1963                     io_mem_write(io_index, addr1, val, 4);
1964                     l = 4;
1965                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1966                     /* 16 bit write access */
1967                     val = lduw_p(buf8);
1968                     io_mem_write(io_index, addr1, val, 2);
1969                     l = 2;
1970                 } else {
1971                     /* 8 bit write access */
1972                     val = ldub_p(buf8);
1973                     io_mem_write(io_index, addr1, val, 1);
1974                     l = 1;
1975                 }
1976             } else {
1977                 ram_addr_t addr1;
1978                 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1979                 /* RAM case */
1980                 ptr = qemu_get_ram_ptr(addr1);
1981                 memcpy(ptr, buf8, l);
1982                 invalidate_and_set_dirty(addr1, l);
1983             }
1984         } else {
1985             if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1986                 !(pd & IO_MEM_ROMD)) {
1987                 hwaddr addr1 = addr;
1988                 /* I/O case */
1989                 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990                 if (p)
1991                     addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1992                 if (l >= 4 && ((addr1 & 3) == 0)) {
1993                     /* 32 bit read access */
1994                     val = io_mem_read(io_index, addr1, 4);
1995                     stl_p(buf8, val);
1996                     l = 4;
1997                 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1998                     /* 16 bit read access */
1999                     val = io_mem_read(io_index, addr1, 2);
2000                     stw_p(buf8, val);
2001                     l = 2;
2002                 } else {
2003                     /* 8 bit read access */
2004                     val = io_mem_read(io_index, addr1, 1);
2005                     stb_p(buf8, val);
2006                     l = 1;
2007                 }
2008             } else {
2009                 /* RAM case */
2010                 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
2011                     (addr & ~TARGET_PAGE_MASK);
2012                 memcpy(buf8, ptr, l);
2013             }
2014         }
2015         len -= l;
2016         buf8 += l;
2017         addr += l;
2018     }
2019 }
2020 
2021 /* used for ROM loading : can write in RAM and ROM */
cpu_physical_memory_write_rom(hwaddr addr,const void * buf,int len)2022 void cpu_physical_memory_write_rom(hwaddr addr,
2023                                    const void *buf, int len)
2024 {
2025     int l;
2026     uint8_t *ptr;
2027     hwaddr page;
2028     unsigned long pd;
2029     const uint8_t* buf8 = (const uint8_t*)buf;
2030     PhysPageDesc *p;
2031 
2032     while (len > 0) {
2033         page = addr & TARGET_PAGE_MASK;
2034         l = (page + TARGET_PAGE_SIZE) - addr;
2035         if (l > len)
2036             l = len;
2037         p = phys_page_find(page >> TARGET_PAGE_BITS);
2038         if (!p) {
2039             pd = IO_MEM_UNASSIGNED;
2040         } else {
2041             pd = p->phys_offset;
2042         }
2043 
2044         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2045             (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2046             !(pd & IO_MEM_ROMD)) {
2047             /* do nothing */
2048         } else {
2049             unsigned long addr1;
2050             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2051             /* ROM/RAM case */
2052             ptr = qemu_get_ram_ptr(addr1);
2053             memcpy(ptr, buf8, l);
2054             invalidate_and_set_dirty(addr1, l);
2055         }
2056         len -= l;
2057         buf8 += l;
2058         addr += l;
2059     }
2060 }
2061 
2062 typedef struct {
2063     void *buffer;
2064     hwaddr addr;
2065     hwaddr len;
2066 } BounceBuffer;
2067 
2068 static BounceBuffer bounce;
2069 
2070 typedef struct MapClient {
2071     void *opaque;
2072     void (*callback)(void *opaque);
2073     QLIST_ENTRY(MapClient) link;
2074 } MapClient;
2075 
2076 static QLIST_HEAD(map_client_list, MapClient) map_client_list
2077     = QLIST_HEAD_INITIALIZER(map_client_list);
2078 
cpu_register_map_client(void * opaque,void (* callback)(void * opaque))2079 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
2080 {
2081     MapClient *client = g_malloc(sizeof(*client));
2082 
2083     client->opaque = opaque;
2084     client->callback = callback;
2085     QLIST_INSERT_HEAD(&map_client_list, client, link);
2086     return client;
2087 }
2088 
cpu_unregister_map_client(void * _client)2089 static void cpu_unregister_map_client(void *_client)
2090 {
2091     MapClient *client = (MapClient *)_client;
2092 
2093     QLIST_REMOVE(client, link);
2094     g_free(client);
2095 }
2096 
cpu_notify_map_clients(void)2097 static void cpu_notify_map_clients(void)
2098 {
2099     MapClient *client;
2100 
2101     while (!QLIST_EMPTY(&map_client_list)) {
2102         client = QLIST_FIRST(&map_client_list);
2103         client->callback(client->opaque);
2104         cpu_unregister_map_client(client);
2105     }
2106 }
2107 
2108 /* Map a physical memory region into a host virtual address.
2109  * May map a subset of the requested range, given by and returned in *plen.
2110  * May return NULL if resources needed to perform the mapping are exhausted.
2111  * Use only for reads OR writes - not for read-modify-write operations.
2112  * Use cpu_register_map_client() to know when retrying the map operation is
2113  * likely to succeed.
2114  */
cpu_physical_memory_map(hwaddr addr,hwaddr * plen,int is_write)2115 void *cpu_physical_memory_map(hwaddr addr,
2116                               hwaddr *plen,
2117                               int is_write)
2118 {
2119     hwaddr len = *plen;
2120     hwaddr done = 0;
2121     int l;
2122     uint8_t *ret = NULL;
2123     uint8_t *ptr;
2124     hwaddr page;
2125     unsigned long pd;
2126     PhysPageDesc *p;
2127     unsigned long addr1;
2128 
2129     while (len > 0) {
2130         page = addr & TARGET_PAGE_MASK;
2131         l = (page + TARGET_PAGE_SIZE) - addr;
2132         if (l > len)
2133             l = len;
2134         p = phys_page_find(page >> TARGET_PAGE_BITS);
2135         if (!p) {
2136             pd = IO_MEM_UNASSIGNED;
2137         } else {
2138             pd = p->phys_offset;
2139         }
2140 
2141         if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2142             if (done || bounce.buffer) {
2143                 break;
2144             }
2145             bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
2146             bounce.addr = addr;
2147             bounce.len = l;
2148             if (!is_write) {
2149                 cpu_physical_memory_read(addr, bounce.buffer, l);
2150             }
2151             ptr = bounce.buffer;
2152         } else {
2153             addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2154             ptr = qemu_get_ram_ptr(addr1);
2155         }
2156         if (!done) {
2157             ret = ptr;
2158         } else if (ret + done != ptr) {
2159             break;
2160         }
2161 
2162         len -= l;
2163         addr += l;
2164         done += l;
2165     }
2166     *plen = done;
2167     return ret;
2168 }
2169 
2170 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
2171  * Will also mark the memory as dirty if is_write == 1.  access_len gives
2172  * the amount of memory that was actually read or written by the caller.
2173  */
cpu_physical_memory_unmap(void * buffer,hwaddr len,int is_write,hwaddr access_len)2174 void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2175                                int is_write, hwaddr access_len)
2176 {
2177     if (buffer != bounce.buffer) {
2178         if (is_write) {
2179             ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
2180             while (access_len) {
2181                 unsigned l;
2182                 l = TARGET_PAGE_SIZE;
2183                 if (l > access_len)
2184                     l = access_len;
2185                 invalidate_and_set_dirty(addr1, l);
2186                 addr1 += l;
2187                 access_len -= l;
2188             }
2189         }
2190         return;
2191     }
2192     if (is_write) {
2193         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
2194     }
2195     qemu_vfree(bounce.buffer);
2196     bounce.buffer = NULL;
2197     cpu_notify_map_clients();
2198 }
2199 
2200 /* warning: addr must be aligned */
ldl_phys_internal(hwaddr addr,enum device_endian endian)2201 static inline uint32_t ldl_phys_internal(hwaddr addr,
2202                                          enum device_endian endian)
2203 {
2204     int io_index;
2205     uint8_t *ptr;
2206     uint32_t val;
2207     unsigned long pd;
2208     PhysPageDesc *p;
2209 
2210     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2211     if (!p) {
2212         pd = IO_MEM_UNASSIGNED;
2213     } else {
2214         pd = p->phys_offset;
2215     }
2216 
2217     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2218         !(pd & IO_MEM_ROMD)) {
2219         /* I/O case */
2220         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2221         if (p)
2222             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2223         val = io_mem_read(io_index, addr, 4);
2224 #if defined(TARGET_WORDS_BIGENDIAN)
2225         if (endian == DEVICE_LITTLE_ENDIAN) {
2226             val = bswap32(val);
2227         }
2228 #else
2229         if (endian == DEVICE_BIG_ENDIAN) {
2230             val = bswap32(val);
2231         }
2232 #endif
2233     } else {
2234         /* RAM case */
2235         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
2236             (addr & ~TARGET_PAGE_MASK);
2237         switch (endian) {
2238             case DEVICE_LITTLE_ENDIAN:
2239                 val = ldl_le_p(ptr);
2240                 break;
2241             case DEVICE_BIG_ENDIAN:
2242                 val = ldl_be_p(ptr);
2243                 break;
2244             default:
2245                 val = ldl_p(ptr);
2246                 break;
2247         }
2248     }
2249     return val;
2250 }
2251 
ldl_phys(hwaddr addr)2252 uint32_t ldl_phys(hwaddr addr)
2253 {
2254     return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2255 }
2256 
ldl_le_phys(hwaddr addr)2257 uint32_t ldl_le_phys(hwaddr addr)
2258 {
2259     return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2260 }
2261 
ldl_be_phys(hwaddr addr)2262 uint32_t ldl_be_phys(hwaddr addr)
2263 {
2264     return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
2265 }
2266 
2267 /* warning: addr must be aligned */
ldq_phys_internal(hwaddr addr,enum device_endian endian)2268 static inline uint64_t ldq_phys_internal(hwaddr addr,
2269                                          enum device_endian endian)
2270 {
2271     int io_index;
2272     uint8_t *ptr;
2273     uint64_t val;
2274     unsigned long pd;
2275     PhysPageDesc *p;
2276 
2277     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2278     if (!p) {
2279         pd = IO_MEM_UNASSIGNED;
2280     } else {
2281         pd = p->phys_offset;
2282     }
2283 
2284     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2285         !(pd & IO_MEM_ROMD)) {
2286         /* I/O case */
2287         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2288         if (p)
2289             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2290 
2291         /* XXX This is broken when device endian != cpu endian.
2292                Fix and add "endian" variable check */
2293 #ifdef TARGET_WORDS_BIGENDIAN
2294         val = (uint64_t)io_mem_read(io_index, addr, 4) << 32;
2295         val |= io_mem_read(io_index, addr + 4, 4);
2296 #else
2297         val = io_mem_read(io_index, addr, 4);
2298         val |= (uint64_t)io_mem_read(io_index, addr + 4, 4) << 32;
2299 #endif
2300     } else {
2301         /* RAM case */
2302         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
2303             (addr & ~TARGET_PAGE_MASK);
2304         switch (endian) {
2305         case DEVICE_LITTLE_ENDIAN:
2306             val = ldq_le_p(ptr);
2307             break;
2308         case DEVICE_BIG_ENDIAN:
2309             val = ldq_be_p(ptr);
2310             break;
2311         default:
2312             val = ldq_p(ptr);
2313             break;
2314         }
2315     }
2316     return val;
2317 }
2318 
ldq_phys(hwaddr addr)2319 uint64_t ldq_phys(hwaddr addr)
2320 {
2321     return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2322 }
2323 
ldq_le_phys(hwaddr addr)2324 uint64_t ldq_le_phys(hwaddr addr)
2325 {
2326     return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2327 }
2328 
ldq_be_phys(hwaddr addr)2329 uint64_t ldq_be_phys(hwaddr addr)
2330 {
2331     return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
2332 }
2333 
2334 /* XXX: optimize */
ldub_phys(hwaddr addr)2335 uint32_t ldub_phys(hwaddr addr)
2336 {
2337     uint8_t val;
2338     cpu_physical_memory_read(addr, &val, 1);
2339     return val;
2340 }
2341 
2342 /* XXX: optimize */
lduw_phys_internal(hwaddr addr,enum device_endian endian)2343 static inline uint32_t lduw_phys_internal(hwaddr addr,
2344                                           enum device_endian endian)
2345 {
2346     int io_index;
2347     uint8_t *ptr;
2348     uint64_t val;
2349     unsigned long pd;
2350     PhysPageDesc *p;
2351 
2352     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2353     if (!p) {
2354         pd = IO_MEM_UNASSIGNED;
2355     } else {
2356         pd = p->phys_offset;
2357     }
2358 
2359     if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2360         !(pd & IO_MEM_ROMD)) {
2361         /* I/O case */
2362         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2363         if (p)
2364             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2365         val = io_mem_read(io_index, addr, 2);
2366 #if defined(TARGET_WORDS_BIGENDIAN)
2367         if (endian == DEVICE_LITTLE_ENDIAN) {
2368             val = bswap16(val);
2369         }
2370 #else
2371         if (endian == DEVICE_BIG_ENDIAN) {
2372             val = bswap16(val);
2373         }
2374 #endif
2375     } else {
2376         /* RAM case */
2377         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
2378             (addr & ~TARGET_PAGE_MASK);
2379         switch (endian) {
2380         case DEVICE_LITTLE_ENDIAN:
2381             val = lduw_le_p(ptr);
2382             break;
2383         case DEVICE_BIG_ENDIAN:
2384             val = lduw_be_p(ptr);
2385             break;
2386         default:
2387             val = lduw_p(ptr);
2388             break;
2389         }
2390     }
2391     return val;
2392 }
2393 
lduw_phys(hwaddr addr)2394 uint32_t lduw_phys(hwaddr addr)
2395 {
2396     return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
2397 }
2398 
lduw_le_phys(hwaddr addr)2399 uint32_t lduw_le_phys(hwaddr addr)
2400 {
2401     return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
2402 }
2403 
lduw_be_phys(hwaddr addr)2404 uint32_t lduw_be_phys(hwaddr addr)
2405 {
2406     return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
2407 }
2408 
2409 /* warning: addr must be aligned. The ram page is not masked as dirty
2410    and the code inside is not invalidated. It is useful if the dirty
2411    bits are used to track modified PTEs */
stl_phys_notdirty(hwaddr addr,uint32_t val)2412 void stl_phys_notdirty(hwaddr addr, uint32_t val)
2413 {
2414     int io_index;
2415     uint8_t *ptr;
2416     unsigned long pd;
2417     PhysPageDesc *p;
2418 
2419     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2420     if (!p) {
2421         pd = IO_MEM_UNASSIGNED;
2422     } else {
2423         pd = p->phys_offset;
2424     }
2425 
2426     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2427         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2428         if (p)
2429             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2430         io_mem_write(io_index, addr, val, 4);
2431     } else {
2432         unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2433         ptr = qemu_get_ram_ptr(addr1);
2434         stl_p(ptr, val);
2435 
2436         if (unlikely(in_migration)) {
2437             if (!cpu_physical_memory_is_dirty(addr1)) {
2438                 /* invalidate code */
2439                 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2440                 /* set dirty bit */
2441                 cpu_physical_memory_set_dirty_flags(
2442                     addr1, (0xff & ~CODE_DIRTY_FLAG));
2443             }
2444         }
2445     }
2446 }
2447 
stq_phys_notdirty(hwaddr addr,uint64_t val)2448 void stq_phys_notdirty(hwaddr addr, uint64_t val)
2449 {
2450     int io_index;
2451     uint8_t *ptr;
2452     unsigned long pd;
2453     PhysPageDesc *p;
2454 
2455     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2456     if (!p) {
2457         pd = IO_MEM_UNASSIGNED;
2458     } else {
2459         pd = p->phys_offset;
2460     }
2461 
2462     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2463         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2464         if (p)
2465             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2466 #ifdef TARGET_WORDS_BIGENDIAN
2467         io_mem_write(io_index, addr, val >> 32, 4);
2468         io_mem_write(io_index, addr + 4, val, 4);
2469 #else
2470         io_mem_write(io_index, addr, val, 4);
2471         io_mem_write(io_index, addr + 4, val >> 32, 4);
2472 #endif
2473     } else {
2474         ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
2475             (addr & ~TARGET_PAGE_MASK);
2476         stq_p(ptr, val);
2477     }
2478 }
2479 
2480 /* warning: addr must be aligned */
stl_phys_internal(hwaddr addr,uint32_t val,enum device_endian endian)2481 static inline void stl_phys_internal(hwaddr addr, uint32_t val,
2482                                      enum device_endian endian)
2483 {
2484     int io_index;
2485     uint8_t *ptr;
2486     unsigned long pd;
2487     PhysPageDesc *p;
2488 
2489     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2490     if (!p) {
2491         pd = IO_MEM_UNASSIGNED;
2492     } else {
2493         pd = p->phys_offset;
2494     }
2495 
2496     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2497         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2498         if (p)
2499             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2500 #if defined(TARGET_WORDS_BIGENDIAN)
2501         if (endian == DEVICE_LITTLE_ENDIAN) {
2502             val = bswap32(val);
2503         }
2504 #else
2505         if (endian == DEVICE_BIG_ENDIAN) {
2506             val = bswap32(val);
2507         }
2508 #endif
2509         io_mem_write(io_index, addr, val, 4);
2510     } else {
2511         unsigned long addr1;
2512         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2513         /* RAM case */
2514         ptr = qemu_get_ram_ptr(addr1);
2515         switch (endian) {
2516         case DEVICE_LITTLE_ENDIAN:
2517             stl_le_p(ptr, val);
2518             break;
2519         case DEVICE_BIG_ENDIAN:
2520             stl_be_p(ptr, val);
2521             break;
2522         default:
2523             stl_p(ptr, val);
2524             break;
2525         }
2526         invalidate_and_set_dirty(addr1, 4);
2527     }
2528 }
2529 
stl_phys(hwaddr addr,uint32_t val)2530 void stl_phys(hwaddr addr, uint32_t val)
2531 {
2532     stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2533 }
2534 
stl_le_phys(hwaddr addr,uint32_t val)2535 void stl_le_phys(hwaddr addr, uint32_t val)
2536 {
2537     stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2538 }
2539 
stl_be_phys(hwaddr addr,uint32_t val)2540 void stl_be_phys(hwaddr addr, uint32_t val)
2541 {
2542     stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2543 }
2544 
2545 /* XXX: optimize */
stb_phys(hwaddr addr,uint32_t val)2546 void stb_phys(hwaddr addr, uint32_t val)
2547 {
2548     uint8_t v = val;
2549     cpu_physical_memory_write(addr, &v, 1);
2550 }
2551 
2552 /* XXX: optimize */
stw_phys_internal(hwaddr addr,uint32_t val,enum device_endian endian)2553 static inline void stw_phys_internal(hwaddr addr, uint32_t val,
2554                                      enum device_endian endian)
2555 {
2556     int io_index;
2557     uint8_t *ptr;
2558     unsigned long pd;
2559     PhysPageDesc *p;
2560 
2561     p = phys_page_find(addr >> TARGET_PAGE_BITS);
2562     if (!p) {
2563         pd = IO_MEM_UNASSIGNED;
2564     } else {
2565         pd = p->phys_offset;
2566     }
2567 
2568     if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2569         io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2570         if (p)
2571             addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2572 #if defined(TARGET_WORDS_BIGENDIAN)
2573         if (endian == DEVICE_LITTLE_ENDIAN) {
2574             val = bswap16(val);
2575         }
2576 #else
2577         if (endian == DEVICE_BIG_ENDIAN) {
2578             val = bswap16(val);
2579         }
2580 #endif
2581         io_mem_write(io_index, addr, val, 2);
2582     } else {
2583         unsigned long addr1;
2584         addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2585         /* RAM case */
2586         ptr = qemu_get_ram_ptr(addr1);
2587         switch (endian) {
2588         case DEVICE_LITTLE_ENDIAN:
2589             stw_le_p(ptr, val);
2590             break;
2591         case DEVICE_BIG_ENDIAN:
2592             stw_be_p(ptr, val);
2593             break;
2594         default:
2595             stw_p(ptr, val);
2596             break;
2597         }
2598         if (!cpu_physical_memory_is_dirty(addr1)) {
2599             /* invalidate code */
2600             tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
2601             /* set dirty bit */
2602             cpu_physical_memory_set_dirty_flags(addr1,
2603                 (0xff & ~CODE_DIRTY_FLAG));
2604         }
2605     }
2606 }
2607 
stw_phys(hwaddr addr,uint32_t val)2608 void stw_phys(hwaddr addr, uint32_t val)
2609 {
2610     stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
2611 }
2612 
stw_le_phys(hwaddr addr,uint32_t val)2613 void stw_le_phys(hwaddr addr, uint32_t val)
2614 {
2615     stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
2616 }
2617 
stw_be_phys(hwaddr addr,uint32_t val)2618 void stw_be_phys(hwaddr addr, uint32_t val)
2619 {
2620     stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
2621 }
2622 
2623 /* XXX: optimize */
stq_phys(hwaddr addr,uint64_t val)2624 void stq_phys(hwaddr addr, uint64_t val)
2625 {
2626     val = tswap64(val);
2627     cpu_physical_memory_write(addr, &val, 8);
2628 }
2629 
2630 
stq_le_phys(hwaddr addr,uint64_t val)2631 void stq_le_phys(hwaddr addr, uint64_t val)
2632 {
2633     val = cpu_to_le64(val);
2634     cpu_physical_memory_write(addr, &val, 8);
2635 }
2636 
stq_be_phys(hwaddr addr,uint64_t val)2637 void stq_be_phys(hwaddr addr, uint64_t val)
2638 {
2639     val = cpu_to_be64(val);
2640     cpu_physical_memory_write(addr, &val, 8);
2641 }
2642 
2643 #endif
2644 
2645 /* virtual memory access for debug (includes writing to ROM) */
cpu_memory_rw_debug(CPUState * cpu,target_ulong addr,void * buf,int len,int is_write)2646 int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
2647                         void *buf, int len, int is_write)
2648 {
2649     int l;
2650     hwaddr phys_addr;
2651     target_ulong page;
2652     uint8_t* buf8 = (uint8_t*)buf;
2653     CPUArchState *env = cpu->env_ptr;
2654 
2655     while (len > 0) {
2656         page = addr & TARGET_PAGE_MASK;
2657         phys_addr = cpu_get_phys_page_debug(env, page);
2658         /* if no physical page mapped, return an error */
2659         if (phys_addr == -1)
2660             return -1;
2661         l = (page + TARGET_PAGE_SIZE) - addr;
2662         if (l > len)
2663             l = len;
2664         phys_addr += (addr & ~TARGET_PAGE_MASK);
2665 #if !defined(CONFIG_USER_ONLY)
2666         if (is_write)
2667             cpu_physical_memory_write_rom(phys_addr, buf8, l);
2668         else
2669 #endif
2670             cpu_physical_memory_rw(phys_addr, buf8, l, is_write);
2671         len -= l;
2672         buf8 += l;
2673         addr += l;
2674     }
2675     return 0;
2676 }
2677