• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include <string.h>
4 
5 #include "cpu.h"
6 #include "exec-all.h"
7 #include "gdbstub.h"
8 #include "helpers.h"
9 #include "qemu-common.h"
10 #ifdef CONFIG_TRACE
11 #include "trace.h"
12 #endif
13 #ifdef CONFIG_MEMCHECK
14 #include "memcheck/memcheck_api.h"
15 #endif  // CONFIG_MEMCHECK
16 
17 static uint32_t cortexa8_cp15_c0_c1[8] =
18 { 0x1031, 0x11, 0x400, 0, 0x31100003, 0x20000000, 0x01202000, 0x11 };
19 
20 static uint32_t cortexa8_cp15_c0_c2[8] =
21 { 0x00101111, 0x12112111, 0x21232031, 0x11112131, 0x00111142, 0, 0, 0 };
22 
23 static uint32_t mpcore_cp15_c0_c1[8] =
24 { 0x111, 0x1, 0, 0x2, 0x01100103, 0x10020302, 0x01222000, 0 };
25 
26 static uint32_t mpcore_cp15_c0_c2[8] =
27 { 0x00100011, 0x12002111, 0x11221011, 0x01102131, 0x141, 0, 0, 0 };
28 
29 static uint32_t arm1136_cp15_c0_c1[8] =
30 { 0x111, 0x1, 0x2, 0x3, 0x01130003, 0x10030302, 0x01222110, 0 };
31 
32 static uint32_t arm1136_cp15_c0_c2[8] =
33 { 0x00140011, 0x12002111, 0x11231111, 0x01102131, 0x141, 0, 0, 0 };
34 
35 static uint32_t cpu_arm_find_by_name(const char *name);
36 
set_feature(CPUARMState * env,int feature)37 static inline void set_feature(CPUARMState *env, int feature)
38 {
39     env->features |= 1u << feature;
40 }
41 
cpu_reset_model_id(CPUARMState * env,uint32_t id)42 static void cpu_reset_model_id(CPUARMState *env, uint32_t id)
43 {
44     env->cp15.c0_cpuid = id;
45     switch (id) {
46     case ARM_CPUID_ARM926:
47         set_feature(env, ARM_FEATURE_VFP);
48         env->vfp.xregs[ARM_VFP_FPSID] = 0x41011090;
49         env->cp15.c0_cachetype = 0x1dd20d2;
50         env->cp15.c1_sys = 0x00090078;
51         break;
52     case ARM_CPUID_ARM946:
53         set_feature(env, ARM_FEATURE_MPU);
54         env->cp15.c0_cachetype = 0x0f004006;
55         env->cp15.c1_sys = 0x00000078;
56         break;
57     case ARM_CPUID_ARM1026:
58         set_feature(env, ARM_FEATURE_VFP);
59         set_feature(env, ARM_FEATURE_AUXCR);
60         env->vfp.xregs[ARM_VFP_FPSID] = 0x410110a0;
61         env->cp15.c0_cachetype = 0x1dd20d2;
62         env->cp15.c1_sys = 0x00090078;
63         break;
64     case ARM_CPUID_ARM1136_R2:
65     case ARM_CPUID_ARM1136:
66         set_feature(env, ARM_FEATURE_V6);
67         set_feature(env, ARM_FEATURE_VFP);
68         set_feature(env, ARM_FEATURE_AUXCR);
69         env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
70         env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
71         env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
72         memcpy(env->cp15.c0_c1, arm1136_cp15_c0_c1, 8 * sizeof(uint32_t));
73         memcpy(env->cp15.c0_c2, arm1136_cp15_c0_c2, 8 * sizeof(uint32_t));
74         env->cp15.c0_cachetype = 0x1dd20d2;
75         break;
76     case ARM_CPUID_ARM11MPCORE:
77         set_feature(env, ARM_FEATURE_V6);
78         set_feature(env, ARM_FEATURE_V6K);
79         set_feature(env, ARM_FEATURE_VFP);
80         set_feature(env, ARM_FEATURE_AUXCR);
81         env->vfp.xregs[ARM_VFP_FPSID] = 0x410120b4;
82         env->vfp.xregs[ARM_VFP_MVFR0] = 0x11111111;
83         env->vfp.xregs[ARM_VFP_MVFR1] = 0x00000000;
84         memcpy(env->cp15.c0_c1, mpcore_cp15_c0_c1, 8 * sizeof(uint32_t));
85         memcpy(env->cp15.c0_c2, mpcore_cp15_c0_c2, 8 * sizeof(uint32_t));
86         env->cp15.c0_cachetype = 0x1dd20d2;
87         break;
88     case ARM_CPUID_CORTEXA8:
89         set_feature(env, ARM_FEATURE_V6);
90         set_feature(env, ARM_FEATURE_V6K);
91         set_feature(env, ARM_FEATURE_V7);
92         set_feature(env, ARM_FEATURE_AUXCR);
93         set_feature(env, ARM_FEATURE_THUMB2);
94         set_feature(env, ARM_FEATURE_VFP);
95         set_feature(env, ARM_FEATURE_VFP3);
96         set_feature(env, ARM_FEATURE_NEON);
97         set_feature(env, ARM_FEATURE_THUMB2EE);
98         env->vfp.xregs[ARM_VFP_FPSID] = 0x410330c0;
99         env->vfp.xregs[ARM_VFP_MVFR0] = 0x11110222;
100         env->vfp.xregs[ARM_VFP_MVFR1] = 0x00011100;
101         memcpy(env->cp15.c0_c1, cortexa8_cp15_c0_c1, 8 * sizeof(uint32_t));
102         memcpy(env->cp15.c0_c2, cortexa8_cp15_c0_c2, 8 * sizeof(uint32_t));
103         env->cp15.c0_cachetype = 0x82048004;
104         env->cp15.c0_clid = (1 << 27) | (2 << 24) | 3;
105         env->cp15.c0_ccsid[0] = 0xe007e01a; /* 16k L1 dcache. */
106         env->cp15.c0_ccsid[1] = 0x2007e01a; /* 16k L1 icache. */
107         env->cp15.c0_ccsid[2] = 0xf0000000; /* No L2 icache. */
108         break;
109     case ARM_CPUID_CORTEXM3:
110         set_feature(env, ARM_FEATURE_V6);
111         set_feature(env, ARM_FEATURE_THUMB2);
112         set_feature(env, ARM_FEATURE_V7);
113         set_feature(env, ARM_FEATURE_M);
114         set_feature(env, ARM_FEATURE_DIV);
115         break;
116     case ARM_CPUID_ANY: /* For userspace emulation.  */
117         set_feature(env, ARM_FEATURE_V6);
118         set_feature(env, ARM_FEATURE_V6K);
119         set_feature(env, ARM_FEATURE_V7);
120         set_feature(env, ARM_FEATURE_THUMB2);
121         set_feature(env, ARM_FEATURE_VFP);
122         set_feature(env, ARM_FEATURE_VFP3);
123         set_feature(env, ARM_FEATURE_NEON);
124         set_feature(env, ARM_FEATURE_THUMB2EE);
125         set_feature(env, ARM_FEATURE_DIV);
126         break;
127     case ARM_CPUID_TI915T:
128     case ARM_CPUID_TI925T:
129         set_feature(env, ARM_FEATURE_OMAPCP);
130         env->cp15.c0_cpuid = ARM_CPUID_TI925T; /* Depends on wiring.  */
131         env->cp15.c0_cachetype = 0x5109149;
132         env->cp15.c1_sys = 0x00000070;
133         env->cp15.c15_i_max = 0x000;
134         env->cp15.c15_i_min = 0xff0;
135         break;
136     case ARM_CPUID_PXA250:
137     case ARM_CPUID_PXA255:
138     case ARM_CPUID_PXA260:
139     case ARM_CPUID_PXA261:
140     case ARM_CPUID_PXA262:
141         set_feature(env, ARM_FEATURE_XSCALE);
142         /* JTAG_ID is ((id << 28) | 0x09265013) */
143         env->cp15.c0_cachetype = 0xd172172;
144         env->cp15.c1_sys = 0x00000078;
145         break;
146     case ARM_CPUID_PXA270_A0:
147     case ARM_CPUID_PXA270_A1:
148     case ARM_CPUID_PXA270_B0:
149     case ARM_CPUID_PXA270_B1:
150     case ARM_CPUID_PXA270_C0:
151     case ARM_CPUID_PXA270_C5:
152         set_feature(env, ARM_FEATURE_XSCALE);
153         /* JTAG_ID is ((id << 28) | 0x09265013) */
154         set_feature(env, ARM_FEATURE_IWMMXT);
155         env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
156         env->cp15.c0_cachetype = 0xd172172;
157         env->cp15.c1_sys = 0x00000078;
158         break;
159     default:
160         cpu_abort(env, "Bad CPU ID: %x\n", id);
161         break;
162     }
163 }
164 
cpu_reset(CPUARMState * env)165 void cpu_reset(CPUARMState *env)
166 {
167     uint32_t id;
168 
169     if (qemu_loglevel_mask(CPU_LOG_RESET)) {
170         qemu_log("CPU Reset (CPU %d)\n", env->cpu_index);
171         log_cpu_state(env, 0);
172     }
173 
174     id = env->cp15.c0_cpuid;
175     memset(env, 0, offsetof(CPUARMState, breakpoints));
176     if (id)
177         cpu_reset_model_id(env, id);
178 #if defined (CONFIG_USER_ONLY)
179     env->uncached_cpsr = ARM_CPU_MODE_USR;
180     env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
181 #else
182     /* SVC mode with interrupts disabled.  */
183     env->uncached_cpsr = ARM_CPU_MODE_SVC | CPSR_A | CPSR_F | CPSR_I;
184     /* On ARMv7-M the CPSR_I is the value of the PRIMASK register, and is
185        clear at reset.  */
186     if (IS_M(env))
187         env->uncached_cpsr &= ~CPSR_I;
188     env->vfp.xregs[ARM_VFP_FPEXC] = 0;
189     env->cp15.c2_base_mask = 0xffffc000u;
190 #endif
191     env->regs[15] = 0;
192     tlb_flush(env, 1);
193 }
194 
vfp_gdb_get_reg(CPUState * env,uint8_t * buf,int reg)195 static int vfp_gdb_get_reg(CPUState *env, uint8_t *buf, int reg)
196 {
197     int nregs;
198 
199     /* VFP data registers are always little-endian.  */
200     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
201     if (reg < nregs) {
202         stfq_le_p(buf, env->vfp.regs[reg]);
203         return 8;
204     }
205     if (arm_feature(env, ARM_FEATURE_NEON)) {
206         /* Aliases for Q regs.  */
207         nregs += 16;
208         if (reg < nregs) {
209             stfq_le_p(buf, env->vfp.regs[(reg - 32) * 2]);
210             stfq_le_p(buf + 8, env->vfp.regs[(reg - 32) * 2 + 1]);
211             return 16;
212         }
213     }
214     switch (reg - nregs) {
215     case 0: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSID]); return 4;
216     case 1: stl_p(buf, env->vfp.xregs[ARM_VFP_FPSCR]); return 4;
217     case 2: stl_p(buf, env->vfp.xregs[ARM_VFP_FPEXC]); return 4;
218     }
219     return 0;
220 }
221 
vfp_gdb_set_reg(CPUState * env,uint8_t * buf,int reg)222 static int vfp_gdb_set_reg(CPUState *env, uint8_t *buf, int reg)
223 {
224     int nregs;
225 
226     nregs = arm_feature(env, ARM_FEATURE_VFP3) ? 32 : 16;
227     if (reg < nregs) {
228         env->vfp.regs[reg] = ldfq_le_p(buf);
229         return 8;
230     }
231     if (arm_feature(env, ARM_FEATURE_NEON)) {
232         nregs += 16;
233         if (reg < nregs) {
234             env->vfp.regs[(reg - 32) * 2] = ldfq_le_p(buf);
235             env->vfp.regs[(reg - 32) * 2 + 1] = ldfq_le_p(buf + 8);
236             return 16;
237         }
238     }
239     switch (reg - nregs) {
240     case 0: env->vfp.xregs[ARM_VFP_FPSID] = ldl_p(buf); return 4;
241     case 1: env->vfp.xregs[ARM_VFP_FPSCR] = ldl_p(buf); return 4;
242     case 2: env->vfp.xregs[ARM_VFP_FPEXC] = ldl_p(buf); return 4;
243     }
244     return 0;
245 }
246 
cpu_arm_init(const char * cpu_model)247 CPUARMState *cpu_arm_init(const char *cpu_model)
248 {
249     CPUARMState *env;
250     uint32_t id;
251     static int inited = 0;
252 
253     id = cpu_arm_find_by_name(cpu_model);
254     if (id == 0)
255         return NULL;
256     env = qemu_mallocz(sizeof(CPUARMState));
257     cpu_exec_init(env);
258     if (!inited) {
259         inited = 1;
260         arm_translate_init();
261     }
262 
263     env->cpu_model_str = cpu_model;
264     env->cp15.c0_cpuid = id;
265     cpu_reset(env);
266     if (arm_feature(env, ARM_FEATURE_NEON)) {
267         gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
268                                  51, "arm-neon.xml", 0);
269     } else if (arm_feature(env, ARM_FEATURE_VFP3)) {
270         gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
271                                  35, "arm-vfp3.xml", 0);
272     } else if (arm_feature(env, ARM_FEATURE_VFP)) {
273         gdb_register_coprocessor(env, vfp_gdb_get_reg, vfp_gdb_set_reg,
274                                  19, "arm-vfp.xml", 0);
275     }
276     qemu_init_vcpu(env);
277     return env;
278 }
279 
280 struct arm_cpu_t {
281     uint32_t id;
282     const char *name;
283 };
284 
285 static const struct arm_cpu_t arm_cpu_names[] = {
286     { ARM_CPUID_ARM926, "arm926"},
287     { ARM_CPUID_ARM946, "arm946"},
288     { ARM_CPUID_ARM1026, "arm1026"},
289     { ARM_CPUID_ARM1136, "arm1136"},
290     { ARM_CPUID_ARM1136_R2, "arm1136-r2"},
291     { ARM_CPUID_ARM11MPCORE, "arm11mpcore"},
292     { ARM_CPUID_CORTEXM3, "cortex-m3"},
293     { ARM_CPUID_CORTEXA8, "cortex-a8"},
294     { ARM_CPUID_TI925T, "ti925t" },
295     { ARM_CPUID_PXA250, "pxa250" },
296     { ARM_CPUID_PXA255, "pxa255" },
297     { ARM_CPUID_PXA260, "pxa260" },
298     { ARM_CPUID_PXA261, "pxa261" },
299     { ARM_CPUID_PXA262, "pxa262" },
300     { ARM_CPUID_PXA270, "pxa270" },
301     { ARM_CPUID_PXA270_A0, "pxa270-a0" },
302     { ARM_CPUID_PXA270_A1, "pxa270-a1" },
303     { ARM_CPUID_PXA270_B0, "pxa270-b0" },
304     { ARM_CPUID_PXA270_B1, "pxa270-b1" },
305     { ARM_CPUID_PXA270_C0, "pxa270-c0" },
306     { ARM_CPUID_PXA270_C5, "pxa270-c5" },
307     { ARM_CPUID_ANY, "any"},
308     { 0, NULL}
309 };
310 
arm_cpu_list(FILE * f,int (* cpu_fprintf)(FILE * f,const char * fmt,...))311 void arm_cpu_list(FILE *f, int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
312 {
313     int i;
314 
315     (*cpu_fprintf)(f, "Available CPUs:\n");
316     for (i = 0; arm_cpu_names[i].name; i++) {
317         (*cpu_fprintf)(f, "  %s\n", arm_cpu_names[i].name);
318     }
319 }
320 
321 /* return 0 if not found */
cpu_arm_find_by_name(const char * name)322 static uint32_t cpu_arm_find_by_name(const char *name)
323 {
324     int i;
325     uint32_t id;
326 
327     id = 0;
328     for (i = 0; arm_cpu_names[i].name; i++) {
329         if (strcmp(name, arm_cpu_names[i].name) == 0) {
330             id = arm_cpu_names[i].id;
331             break;
332         }
333     }
334     return id;
335 }
336 
cpu_arm_close(CPUARMState * env)337 void cpu_arm_close(CPUARMState *env)
338 {
339     free(env);
340 }
341 
cpsr_read(CPUARMState * env)342 uint32_t cpsr_read(CPUARMState *env)
343 {
344     int ZF;
345     ZF = (env->ZF == 0);
346     return env->uncached_cpsr | (env->NF & 0x80000000) | (ZF << 30) |
347         (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
348         | (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
349         | ((env->condexec_bits & 0xfc) << 8)
350         | (env->GE << 16);
351 }
352 
cpsr_write(CPUARMState * env,uint32_t val,uint32_t mask)353 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
354 {
355     if (mask & CPSR_NZCV) {
356         env->ZF = (~val) & CPSR_Z;
357         env->NF = val;
358         env->CF = (val >> 29) & 1;
359         env->VF = (val << 3) & 0x80000000;
360     }
361     if (mask & CPSR_Q)
362         env->QF = ((val & CPSR_Q) != 0);
363     if (mask & CPSR_T)
364         env->thumb = ((val & CPSR_T) != 0);
365     if (mask & CPSR_IT_0_1) {
366         env->condexec_bits &= ~3;
367         env->condexec_bits |= (val >> 25) & 3;
368     }
369     if (mask & CPSR_IT_2_7) {
370         env->condexec_bits &= 3;
371         env->condexec_bits |= (val >> 8) & 0xfc;
372     }
373     if (mask & CPSR_GE) {
374         env->GE = (val >> 16) & 0xf;
375     }
376 
377     if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
378         switch_mode(env, val & CPSR_M);
379     }
380     mask &= ~CACHED_CPSR_BITS;
381     env->uncached_cpsr = (env->uncached_cpsr & ~mask) | (val & mask);
382 }
383 
384 /* Sign/zero extend */
HELPER(sxtb16)385 uint32_t HELPER(sxtb16)(uint32_t x)
386 {
387     uint32_t res;
388     res = (uint16_t)(int8_t)x;
389     res |= (uint32_t)(int8_t)(x >> 16) << 16;
390     return res;
391 }
392 
HELPER(uxtb16)393 uint32_t HELPER(uxtb16)(uint32_t x)
394 {
395     uint32_t res;
396     res = (uint16_t)(uint8_t)x;
397     res |= (uint32_t)(uint8_t)(x >> 16) << 16;
398     return res;
399 }
400 
HELPER(clz)401 uint32_t HELPER(clz)(uint32_t x)
402 {
403     int count;
404     for (count = 32; x; count--)
405         x >>= 1;
406     return count;
407 }
408 
HELPER(sdiv)409 int32_t HELPER(sdiv)(int32_t num, int32_t den)
410 {
411     if (den == 0)
412       return 0;
413     return num / den;
414 }
415 
HELPER(udiv)416 uint32_t HELPER(udiv)(uint32_t num, uint32_t den)
417 {
418     if (den == 0)
419       return 0;
420     return num / den;
421 }
422 
HELPER(rbit)423 uint32_t HELPER(rbit)(uint32_t x)
424 {
425     x =  ((x & 0xff000000) >> 24)
426        | ((x & 0x00ff0000) >> 8)
427        | ((x & 0x0000ff00) << 8)
428        | ((x & 0x000000ff) << 24);
429     x =  ((x & 0xf0f0f0f0) >> 4)
430        | ((x & 0x0f0f0f0f) << 4);
431     x =  ((x & 0x88888888) >> 3)
432        | ((x & 0x44444444) >> 1)
433        | ((x & 0x22222222) << 1)
434        | ((x & 0x11111111) << 3);
435     return x;
436 }
437 
HELPER(abs)438 uint32_t HELPER(abs)(uint32_t x)
439 {
440     return ((int32_t)x < 0) ? -x : x;
441 }
442 
443 #if defined(CONFIG_USER_ONLY)
444 
do_interrupt(CPUState * env)445 void do_interrupt (CPUState *env)
446 {
447     env->exception_index = -1;
448 }
449 
450 /* Structure used to record exclusive memory locations.  */
451 typedef struct mmon_state {
452     struct mmon_state *next;
453     CPUARMState *cpu_env;
454     uint32_t addr;
455 } mmon_state;
456 
457 /* Chain of current locks.  */
458 static mmon_state* mmon_head = NULL;
459 
cpu_arm_handle_mmu_fault(CPUState * env,target_ulong address,int rw,int mmu_idx,int is_softmmu)460 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
461                               int mmu_idx, int is_softmmu)
462 {
463     if (rw == 2) {
464         env->exception_index = EXCP_PREFETCH_ABORT;
465         env->cp15.c6_insn = address;
466     } else {
467         env->exception_index = EXCP_DATA_ABORT;
468         env->cp15.c6_data = address;
469     }
470     return 1;
471 }
472 
allocate_mmon_state(CPUState * env)473 static void allocate_mmon_state(CPUState *env)
474 {
475     env->mmon_entry = malloc(sizeof (mmon_state));
476     memset (env->mmon_entry, 0, sizeof (mmon_state));
477     env->mmon_entry->cpu_env = env;
478     mmon_head = env->mmon_entry;
479 }
480 
481 /* Flush any monitor locks for the specified address.  */
flush_mmon(uint32_t addr)482 static void flush_mmon(uint32_t addr)
483 {
484     mmon_state *mon;
485 
486     for (mon = mmon_head; mon; mon = mon->next)
487       {
488         if (mon->addr != addr)
489           continue;
490 
491         mon->addr = 0;
492         break;
493       }
494 }
495 
496 /* Mark an address for exclusive access.  */
HELPER(mark_exclusive)497 void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
498 {
499     if (!env->mmon_entry)
500         allocate_mmon_state(env);
501     /* Clear any previous locks.  */
502     flush_mmon(addr);
503     env->mmon_entry->addr = addr;
504 }
505 
506 /* Test if an exclusive address is still exclusive.  Returns zero
507    if the address is still exclusive.   */
HELPER(test_exclusive)508 uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
509 {
510     int res;
511 
512     if (!env->mmon_entry)
513         return 1;
514     if (env->mmon_entry->addr == addr)
515         res = 0;
516     else
517         res = 1;
518     flush_mmon(addr);
519     return res;
520 }
521 
HELPER(clrex)522 void HELPER(clrex)(CPUState *env)
523 {
524     if (!(env->mmon_entry && env->mmon_entry->addr))
525         return;
526     flush_mmon(env->mmon_entry->addr);
527 }
528 
cpu_get_phys_page_debug(CPUState * env,target_ulong addr)529 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
530 {
531     return addr;
532 }
533 
534 /* These should probably raise undefined insn exceptions.  */
HELPER(set_cp)535 void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
536 {
537     int op1 = (insn >> 8) & 0xf;
538     cpu_abort(env, "cp%i insn %08x\n", op1, insn);
539     return;
540 }
541 
HELPER(get_cp)542 uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
543 {
544     int op1 = (insn >> 8) & 0xf;
545     cpu_abort(env, "cp%i insn %08x\n", op1, insn);
546     return 0;
547 }
548 
HELPER(set_cp15)549 void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
550 {
551     cpu_abort(env, "cp15 insn %08x\n", insn);
552 }
553 
HELPER(get_cp15)554 uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
555 {
556     cpu_abort(env, "cp15 insn %08x\n", insn);
557     return 0;
558 }
559 
560 /* These should probably raise undefined insn exceptions.  */
HELPER(v7m_msr)561 void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
562 {
563     cpu_abort(env, "v7m_mrs %d\n", reg);
564 }
565 
HELPER(v7m_mrs)566 uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
567 {
568     cpu_abort(env, "v7m_mrs %d\n", reg);
569     return 0;
570 }
571 
switch_mode(CPUState * env,int mode)572 void switch_mode(CPUState *env, int mode)
573 {
574     if (mode != ARM_CPU_MODE_USR)
575         cpu_abort(env, "Tried to switch out of user mode\n");
576 }
577 
HELPER(set_r13_banked)578 void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
579 {
580     cpu_abort(env, "banked r13 write\n");
581 }
582 
HELPER(get_r13_banked)583 uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
584 {
585     cpu_abort(env, "banked r13 read\n");
586     return 0;
587 }
588 
589 #else
590 
591 extern int semihosting_enabled;
592 
593 /* Map CPU modes onto saved register banks.  */
bank_number(int mode)594 static inline int bank_number (int mode)
595 {
596     switch (mode) {
597     case ARM_CPU_MODE_USR:
598     case ARM_CPU_MODE_SYS:
599         return 0;
600     case ARM_CPU_MODE_SVC:
601         return 1;
602     case ARM_CPU_MODE_ABT:
603         return 2;
604     case ARM_CPU_MODE_UND:
605         return 3;
606     case ARM_CPU_MODE_IRQ:
607         return 4;
608     case ARM_CPU_MODE_FIQ:
609         return 5;
610     }
611     cpu_abort(cpu_single_env, "Bad mode %x\n", mode);
612     return -1;
613 }
614 
switch_mode(CPUState * env,int mode)615 void switch_mode(CPUState *env, int mode)
616 {
617     int old_mode;
618     int i;
619 
620     old_mode = env->uncached_cpsr & CPSR_M;
621     if (mode == old_mode)
622         return;
623 
624     if (old_mode == ARM_CPU_MODE_FIQ) {
625         memcpy (env->fiq_regs, env->regs + 8, 5 * sizeof(uint32_t));
626         memcpy (env->regs + 8, env->usr_regs, 5 * sizeof(uint32_t));
627     } else if (mode == ARM_CPU_MODE_FIQ) {
628         memcpy (env->usr_regs, env->regs + 8, 5 * sizeof(uint32_t));
629         memcpy (env->regs + 8, env->fiq_regs, 5 * sizeof(uint32_t));
630     }
631 
632     i = bank_number(old_mode);
633     env->banked_r13[i] = env->regs[13];
634     env->banked_r14[i] = env->regs[14];
635     env->banked_spsr[i] = env->spsr;
636 
637     i = bank_number(mode);
638     env->regs[13] = env->banked_r13[i];
639     env->regs[14] = env->banked_r14[i];
640     env->spsr = env->banked_spsr[i];
641 }
642 
v7m_push(CPUARMState * env,uint32_t val)643 static void v7m_push(CPUARMState *env, uint32_t val)
644 {
645     env->regs[13] -= 4;
646     stl_phys(env->regs[13], val);
647 }
648 
v7m_pop(CPUARMState * env)649 static uint32_t v7m_pop(CPUARMState *env)
650 {
651     uint32_t val;
652     val = ldl_phys(env->regs[13]);
653     env->regs[13] += 4;
654     return val;
655 }
656 
657 /* Switch to V7M main or process stack pointer.  */
switch_v7m_sp(CPUARMState * env,int process)658 static void switch_v7m_sp(CPUARMState *env, int process)
659 {
660     uint32_t tmp;
661     if (env->v7m.current_sp != process) {
662         tmp = env->v7m.other_sp;
663         env->v7m.other_sp = env->regs[13];
664         env->regs[13] = tmp;
665         env->v7m.current_sp = process;
666     }
667 }
668 
do_v7m_exception_exit(CPUARMState * env)669 static void do_v7m_exception_exit(CPUARMState *env)
670 {
671     uint32_t type;
672     uint32_t xpsr;
673 
674     type = env->regs[15];
675     if (env->v7m.exception != 0)
676         armv7m_nvic_complete_irq(env->v7m.nvic, env->v7m.exception);
677 
678     /* Switch to the target stack.  */
679     switch_v7m_sp(env, (type & 4) != 0);
680     /* Pop registers.  */
681     env->regs[0] = v7m_pop(env);
682     env->regs[1] = v7m_pop(env);
683     env->regs[2] = v7m_pop(env);
684     env->regs[3] = v7m_pop(env);
685     env->regs[12] = v7m_pop(env);
686     env->regs[14] = v7m_pop(env);
687     env->regs[15] = v7m_pop(env);
688     xpsr = v7m_pop(env);
689     xpsr_write(env, xpsr, 0xfffffdff);
690     /* Undo stack alignment.  */
691     if (xpsr & 0x200)
692         env->regs[13] |= 4;
693     /* ??? The exception return type specifies Thread/Handler mode.  However
694        this is also implied by the xPSR value. Not sure what to do
695        if there is a mismatch.  */
696     /* ??? Likewise for mismatches between the CONTROL register and the stack
697        pointer.  */
698 }
699 
do_interrupt_v7m(CPUARMState * env)700 static void do_interrupt_v7m(CPUARMState *env)
701 {
702     uint32_t xpsr = xpsr_read(env);
703     uint32_t lr;
704     uint32_t addr;
705 
706     lr = 0xfffffff1;
707     if (env->v7m.current_sp)
708         lr |= 4;
709     if (env->v7m.exception == 0)
710         lr |= 8;
711 
712     /* For exceptions we just mark as pending on the NVIC, and let that
713        handle it.  */
714     /* TODO: Need to escalate if the current priority is higher than the
715        one we're raising.  */
716     switch (env->exception_index) {
717     case EXCP_UDEF:
718         armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_USAGE);
719         return;
720     case EXCP_SWI:
721         env->regs[15] += 2;
722         armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_SVC);
723         return;
724     case EXCP_PREFETCH_ABORT:
725     case EXCP_DATA_ABORT:
726         armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_MEM);
727         return;
728     case EXCP_BKPT:
729         if (semihosting_enabled) {
730             int nr;
731             nr = lduw_code(env->regs[15]) & 0xff;
732             if (nr == 0xab) {
733                 env->regs[15] += 2;
734                 env->regs[0] = do_arm_semihosting(env);
735                 return;
736             }
737         }
738         armv7m_nvic_set_pending(env->v7m.nvic, ARMV7M_EXCP_DEBUG);
739         return;
740     case EXCP_IRQ:
741         env->v7m.exception = armv7m_nvic_acknowledge_irq(env->v7m.nvic);
742         break;
743     case EXCP_EXCEPTION_EXIT:
744         do_v7m_exception_exit(env);
745         return;
746     default:
747         cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
748         return; /* Never happens.  Keep compiler happy.  */
749     }
750 
751     /* Align stack pointer.  */
752     /* ??? Should only do this if Configuration Control Register
753        STACKALIGN bit is set.  */
754     if (env->regs[13] & 4) {
755         env->regs[13] -= 4;
756         xpsr |= 0x200;
757     }
758     /* Switch to the handler mode.  */
759     v7m_push(env, xpsr);
760     v7m_push(env, env->regs[15]);
761     v7m_push(env, env->regs[14]);
762     v7m_push(env, env->regs[12]);
763     v7m_push(env, env->regs[3]);
764     v7m_push(env, env->regs[2]);
765     v7m_push(env, env->regs[1]);
766     v7m_push(env, env->regs[0]);
767     switch_v7m_sp(env, 0);
768     env->uncached_cpsr &= ~CPSR_IT;
769     env->regs[14] = lr;
770     addr = ldl_phys(env->v7m.vecbase + env->v7m.exception * 4);
771     env->regs[15] = addr & 0xfffffffe;
772     env->thumb = addr & 1;
773 }
774 
775 /* Handle a CPU exception.  */
do_interrupt(CPUARMState * env)776 void do_interrupt(CPUARMState *env)
777 {
778     uint32_t addr;
779     uint32_t mask;
780     int new_mode;
781     uint32_t offset;
782 
783 #ifdef CONFIG_TRACE
784     if (tracing) {
785         trace_exception(env->regs[15]);
786     }
787 #endif
788 
789     if (IS_M(env)) {
790         do_interrupt_v7m(env);
791         return;
792     }
793     /* TODO: Vectored interrupt controller.  */
794     switch (env->exception_index) {
795     case EXCP_UDEF:
796         new_mode = ARM_CPU_MODE_UND;
797         addr = 0x04;
798         mask = CPSR_I;
799         if (env->thumb)
800             offset = 2;
801         else
802             offset = 4;
803         break;
804     case EXCP_SWI:
805         if (semihosting_enabled) {
806             /* Check for semihosting interrupt.  */
807             if (env->thumb) {
808                 mask = lduw_code(env->regs[15] - 2) & 0xff;
809             } else {
810                 mask = ldl_code(env->regs[15] - 4) & 0xffffff;
811             }
812             /* Only intercept calls from privileged modes, to provide some
813                semblance of security.  */
814             if (((mask == 0x123456 && !env->thumb)
815                     || (mask == 0xab && env->thumb))
816                   && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
817                 env->regs[0] = do_arm_semihosting(env);
818                 return;
819             }
820         }
821         new_mode = ARM_CPU_MODE_SVC;
822         addr = 0x08;
823         mask = CPSR_I;
824         /* The PC already points to the next instruction.  */
825         offset = 0;
826         break;
827     case EXCP_BKPT:
828         /* See if this is a semihosting syscall.  */
829         if (env->thumb && semihosting_enabled) {
830             mask = lduw_code(env->regs[15]) & 0xff;
831             if (mask == 0xab
832                   && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) {
833                 env->regs[15] += 2;
834                 env->regs[0] = do_arm_semihosting(env);
835                 return;
836             }
837         }
838         /* Fall through to prefetch abort.  */
839     case EXCP_PREFETCH_ABORT:
840         new_mode = ARM_CPU_MODE_ABT;
841         addr = 0x0c;
842         mask = CPSR_A | CPSR_I;
843         offset = 4;
844         break;
845     case EXCP_DATA_ABORT:
846         new_mode = ARM_CPU_MODE_ABT;
847         addr = 0x10;
848         mask = CPSR_A | CPSR_I;
849         offset = 8;
850         break;
851     case EXCP_IRQ:
852         new_mode = ARM_CPU_MODE_IRQ;
853         addr = 0x18;
854         /* Disable IRQ and imprecise data aborts.  */
855         mask = CPSR_A | CPSR_I;
856         offset = 4;
857         break;
858     case EXCP_FIQ:
859         new_mode = ARM_CPU_MODE_FIQ;
860         addr = 0x1c;
861         /* Disable FIQ, IRQ and imprecise data aborts.  */
862         mask = CPSR_A | CPSR_I | CPSR_F;
863         offset = 4;
864         break;
865     default:
866         cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
867         return; /* Never happens.  Keep compiler happy.  */
868     }
869     /* High vectors.  */
870     if (env->cp15.c1_sys & (1 << 13)) {
871         addr += 0xffff0000;
872     }
873     switch_mode (env, new_mode);
874     env->spsr = cpsr_read(env);
875     /* Clear IT bits.  */
876     env->condexec_bits = 0;
877     /* Switch to the new mode, and switch to Arm mode.  */
878     /* ??? Thumb interrupt handlers not implemented.  */
879     env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
880     env->uncached_cpsr |= mask;
881     env->thumb = 0;
882     env->regs[14] = env->regs[15] + offset;
883     env->regs[15] = addr;
884     env->interrupt_request |= CPU_INTERRUPT_EXITTB;
885 }
886 
887 /* Check section/page access permissions.
888    Returns the page protection flags, or zero if the access is not
889    permitted.  */
check_ap(CPUState * env,int ap,int domain,int access_type,int is_user)890 static inline int check_ap(CPUState *env, int ap, int domain, int access_type,
891                            int is_user)
892 {
893   int prot_ro;
894 
895   if (domain == 3)
896     return PAGE_READ | PAGE_WRITE;
897 
898   if (access_type == 1)
899       prot_ro = 0;
900   else
901       prot_ro = PAGE_READ;
902 
903   switch (ap) {
904   case 0:
905       if (access_type == 1)
906           return 0;
907       switch ((env->cp15.c1_sys >> 8) & 3) {
908       case 1:
909           return is_user ? 0 : PAGE_READ;
910       case 2:
911           return PAGE_READ;
912       default:
913           return 0;
914       }
915   case 1:
916       return is_user ? 0 : PAGE_READ | PAGE_WRITE;
917   case 2:
918       if (is_user)
919           return prot_ro;
920       else
921           return PAGE_READ | PAGE_WRITE;
922   case 3:
923       return PAGE_READ | PAGE_WRITE;
924   case 4: /* Reserved.  */
925       return 0;
926   case 5:
927       return is_user ? 0 : prot_ro;
928   case 6:
929       return prot_ro;
930   case 7:
931       if (!arm_feature (env, ARM_FEATURE_V7))
932           return 0;
933       return prot_ro;
934   default:
935       abort();
936   }
937 }
938 
get_level1_table_address(CPUState * env,uint32_t address)939 static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
940 {
941     uint32_t table;
942 
943     if (address & env->cp15.c2_mask)
944         table = env->cp15.c2_base1 & 0xffffc000;
945     else
946         table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
947 
948     table |= (address >> 18) & 0x3ffc;
949     return table;
950 }
951 
get_phys_addr_v5(CPUState * env,uint32_t address,int access_type,int is_user,uint32_t * phys_ptr,int * prot)952 static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
953 			    int is_user, uint32_t *phys_ptr, int *prot)
954 {
955     int code;
956     uint32_t table;
957     uint32_t desc;
958     int type;
959     int ap;
960     int domain;
961     uint32_t phys_addr;
962 
963     /* Pagetable walk.  */
964     /* Lookup l1 descriptor.  */
965     table = get_level1_table_address(env, address);
966     desc = ldl_phys(table);
967     type = (desc & 3);
968     domain = (env->cp15.c3 >> ((desc >> 4) & 0x1e)) & 3;
969     if (type == 0) {
970         /* Section translation fault.  */
971         code = 5;
972         goto do_fault;
973     }
974     if (domain == 0 || domain == 2) {
975         if (type == 2)
976             code = 9; /* Section domain fault.  */
977         else
978             code = 11; /* Page domain fault.  */
979         goto do_fault;
980     }
981     if (type == 2) {
982         /* 1Mb section.  */
983         phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
984         ap = (desc >> 10) & 3;
985         code = 13;
986     } else {
987         /* Lookup l2 entry.  */
988 	if (type == 1) {
989 	    /* Coarse pagetable.  */
990 	    table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
991 	} else {
992 	    /* Fine pagetable.  */
993 	    table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
994 	}
995         desc = ldl_phys(table);
996         switch (desc & 3) {
997         case 0: /* Page translation fault.  */
998             code = 7;
999             goto do_fault;
1000         case 1: /* 64k page.  */
1001             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1002             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1003             break;
1004         case 2: /* 4k page.  */
1005             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1006             ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
1007             break;
1008         case 3: /* 1k page.  */
1009 	    if (type == 1) {
1010 		if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1011 		    phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1012 		} else {
1013 		    /* Page translation fault.  */
1014 		    code = 7;
1015 		    goto do_fault;
1016 		}
1017 	    } else {
1018 		phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
1019 	    }
1020             ap = (desc >> 4) & 3;
1021             break;
1022         default:
1023             /* Never happens, but compiler isn't smart enough to tell.  */
1024             abort();
1025         }
1026         code = 15;
1027     }
1028     *prot = check_ap(env, ap, domain, access_type, is_user);
1029     if (!*prot) {
1030         /* Access permission fault.  */
1031         goto do_fault;
1032     }
1033     *phys_ptr = phys_addr;
1034     return 0;
1035 do_fault:
1036     return code | (domain << 4);
1037 }
1038 
get_phys_addr_v6(CPUState * env,uint32_t address,int access_type,int is_user,uint32_t * phys_ptr,int * prot)1039 static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
1040 			    int is_user, uint32_t *phys_ptr, int *prot)
1041 {
1042     int code;
1043     uint32_t table;
1044     uint32_t desc;
1045     uint32_t xn;
1046     int type;
1047     int ap;
1048     int domain;
1049     uint32_t phys_addr;
1050 
1051     /* Pagetable walk.  */
1052     /* Lookup l1 descriptor.  */
1053     table = get_level1_table_address(env, address);
1054     desc = ldl_phys(table);
1055     type = (desc & 3);
1056     if (type == 0) {
1057         /* Section translation fault.  */
1058         code = 5;
1059         domain = 0;
1060         goto do_fault;
1061     } else if (type == 2 && (desc & (1 << 18))) {
1062         /* Supersection.  */
1063         domain = 0;
1064     } else {
1065         /* Section or page.  */
1066         domain = (desc >> 4) & 0x1e;
1067     }
1068     domain = (env->cp15.c3 >> domain) & 3;
1069     if (domain == 0 || domain == 2) {
1070         if (type == 2)
1071             code = 9; /* Section domain fault.  */
1072         else
1073             code = 11; /* Page domain fault.  */
1074         goto do_fault;
1075     }
1076     if (type == 2) {
1077         if (desc & (1 << 18)) {
1078             /* Supersection.  */
1079             phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
1080         } else {
1081             /* Section.  */
1082             phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
1083         }
1084         ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
1085         xn = desc & (1 << 4);
1086         code = 13;
1087     } else {
1088         /* Lookup l2 entry.  */
1089         table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
1090         desc = ldl_phys(table);
1091         ap = ((desc >> 4) & 3) | ((desc >> 7) & 4);
1092         switch (desc & 3) {
1093         case 0: /* Page translation fault.  */
1094             code = 7;
1095             goto do_fault;
1096         case 1: /* 64k page.  */
1097             phys_addr = (desc & 0xffff0000) | (address & 0xffff);
1098             xn = desc & (1 << 15);
1099             break;
1100         case 2: case 3: /* 4k page.  */
1101             phys_addr = (desc & 0xfffff000) | (address & 0xfff);
1102             xn = desc & 1;
1103             break;
1104         default:
1105             /* Never happens, but compiler isn't smart enough to tell.  */
1106             abort();
1107         }
1108         code = 15;
1109     }
1110     if (xn && access_type == 2)
1111         goto do_fault;
1112 
1113     /* The simplified model uses AP[0] as an access control bit.  */
1114     if ((env->cp15.c1_sys & (1 << 29)) && (ap & 1) == 0) {
1115         /* Access flag fault.  */
1116         code = (code == 15) ? 6 : 3;
1117         goto do_fault;
1118     }
1119     *prot = check_ap(env, ap, domain, access_type, is_user);
1120     if (!*prot) {
1121         /* Access permission fault.  */
1122         goto do_fault;
1123     }
1124     *phys_ptr = phys_addr;
1125     return 0;
1126 do_fault:
1127     return code | (domain << 4);
1128 }
1129 
get_phys_addr_mpu(CPUState * env,uint32_t address,int access_type,int is_user,uint32_t * phys_ptr,int * prot)1130 static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
1131 			     int is_user, uint32_t *phys_ptr, int *prot)
1132 {
1133     int n;
1134     uint32_t mask;
1135     uint32_t base;
1136 
1137     *phys_ptr = address;
1138     for (n = 7; n >= 0; n--) {
1139 	base = env->cp15.c6_region[n];
1140 	if ((base & 1) == 0)
1141 	    continue;
1142 	mask = 1 << ((base >> 1) & 0x1f);
1143 	/* Keep this shift separate from the above to avoid an
1144 	   (undefined) << 32.  */
1145 	mask = (mask << 1) - 1;
1146 	if (((base ^ address) & ~mask) == 0)
1147 	    break;
1148     }
1149     if (n < 0)
1150 	return 2;
1151 
1152     if (access_type == 2) {
1153 	mask = env->cp15.c5_insn;
1154     } else {
1155 	mask = env->cp15.c5_data;
1156     }
1157     mask = (mask >> (n * 4)) & 0xf;
1158     switch (mask) {
1159     case 0:
1160 	return 1;
1161     case 1:
1162 	if (is_user)
1163 	  return 1;
1164 	*prot = PAGE_READ | PAGE_WRITE;
1165 	break;
1166     case 2:
1167 	*prot = PAGE_READ;
1168 	if (!is_user)
1169 	    *prot |= PAGE_WRITE;
1170 	break;
1171     case 3:
1172 	*prot = PAGE_READ | PAGE_WRITE;
1173 	break;
1174     case 5:
1175 	if (is_user)
1176 	    return 1;
1177 	*prot = PAGE_READ;
1178 	break;
1179     case 6:
1180 	*prot = PAGE_READ;
1181 	break;
1182     default:
1183 	/* Bad permission.  */
1184 	return 1;
1185     }
1186     return 0;
1187 }
1188 
get_phys_addr(CPUState * env,uint32_t address,int access_type,int is_user,uint32_t * phys_ptr,int * prot)1189 static inline int get_phys_addr(CPUState *env, uint32_t address,
1190                                 int access_type, int is_user,
1191                                 uint32_t *phys_ptr, int *prot)
1192 {
1193     /* Fast Context Switch Extension.  */
1194     if (address < 0x02000000)
1195         address += env->cp15.c13_fcse;
1196 
1197     if ((env->cp15.c1_sys & 1) == 0) {
1198         /* MMU/MPU disabled.  */
1199         *phys_ptr = address;
1200         *prot = PAGE_READ | PAGE_WRITE;
1201         return 0;
1202     } else if (arm_feature(env, ARM_FEATURE_MPU)) {
1203 	return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
1204 				 prot);
1205     } else if (env->cp15.c1_sys & (1 << 23)) {
1206         return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
1207                                 prot);
1208     } else {
1209         return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
1210                                 prot);
1211     }
1212 }
1213 
cpu_arm_handle_mmu_fault(CPUState * env,target_ulong address,int access_type,int mmu_idx,int is_softmmu)1214 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
1215                               int access_type, int mmu_idx, int is_softmmu)
1216 {
1217     uint32_t phys_addr;
1218     int prot;
1219     int ret, is_user;
1220 
1221     is_user = mmu_idx == MMU_USER_IDX;
1222     ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
1223     if (ret == 0) {
1224         /* Map a single [sub]page.  */
1225         phys_addr &= ~(uint32_t)0x3ff;
1226         address &= ~(uint32_t)0x3ff;
1227         return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
1228                              is_softmmu);
1229     }
1230 
1231     if (access_type == 2) {
1232         env->cp15.c5_insn = ret;
1233         env->cp15.c6_insn = address;
1234         env->exception_index = EXCP_PREFETCH_ABORT;
1235     } else {
1236         env->cp15.c5_data = ret;
1237         if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
1238             env->cp15.c5_data |= (1 << 11);
1239         env->cp15.c6_data = address;
1240         env->exception_index = EXCP_DATA_ABORT;
1241     }
1242     return 1;
1243 }
1244 
cpu_get_phys_page_debug(CPUState * env,target_ulong addr)1245 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1246 {
1247     uint32_t phys_addr;
1248     int prot;
1249     int ret;
1250 
1251     ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
1252 
1253     if (ret != 0)
1254         return -1;
1255 
1256     return phys_addr;
1257 }
1258 
1259 /* Not really implemented.  Need to figure out a sane way of doing this.
1260    Maybe add generic watchpoint support and use that.  */
1261 
HELPER(mark_exclusive)1262 void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
1263 {
1264     env->mmon_addr = addr;
1265 }
1266 
HELPER(test_exclusive)1267 uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
1268 {
1269     return (env->mmon_addr != addr);
1270 }
1271 
HELPER(clrex)1272 void HELPER(clrex)(CPUState *env)
1273 {
1274     env->mmon_addr = -1;
1275 }
1276 
HELPER(set_cp)1277 void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
1278 {
1279     int cp_num = (insn >> 8) & 0xf;
1280     int cp_info = (insn >> 5) & 7;
1281     int src = (insn >> 16) & 0xf;
1282     int operand = insn & 0xf;
1283 
1284     if (env->cp[cp_num].cp_write)
1285         env->cp[cp_num].cp_write(env->cp[cp_num].opaque,
1286                                  cp_info, src, operand, val);
1287 }
1288 
HELPER(get_cp)1289 uint32_t HELPER(get_cp)(CPUState *env, uint32_t insn)
1290 {
1291     int cp_num = (insn >> 8) & 0xf;
1292     int cp_info = (insn >> 5) & 7;
1293     int dest = (insn >> 16) & 0xf;
1294     int operand = insn & 0xf;
1295 
1296     if (env->cp[cp_num].cp_read)
1297         return env->cp[cp_num].cp_read(env->cp[cp_num].opaque,
1298                                        cp_info, dest, operand);
1299     return 0;
1300 }
1301 
1302 /* Return basic MPU access permission bits.  */
simple_mpu_ap_bits(uint32_t val)1303 static uint32_t simple_mpu_ap_bits(uint32_t val)
1304 {
1305     uint32_t ret;
1306     uint32_t mask;
1307     int i;
1308     ret = 0;
1309     mask = 3;
1310     for (i = 0; i < 16; i += 2) {
1311         ret |= (val >> i) & mask;
1312         mask <<= 2;
1313     }
1314     return ret;
1315 }
1316 
1317 /* Pad basic MPU access permission bits to extended format.  */
extended_mpu_ap_bits(uint32_t val)1318 static uint32_t extended_mpu_ap_bits(uint32_t val)
1319 {
1320     uint32_t ret;
1321     uint32_t mask;
1322     int i;
1323     ret = 0;
1324     mask = 3;
1325     for (i = 0; i < 16; i += 2) {
1326         ret |= (val & mask) << i;
1327         mask <<= 2;
1328     }
1329     return ret;
1330 }
1331 
HELPER(set_cp15)1332 void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
1333 {
1334     int op1;
1335     int op2;
1336     int crm;
1337 
1338     op1 = (insn >> 21) & 7;
1339     op2 = (insn >> 5) & 7;
1340     crm = insn & 0xf;
1341     switch ((insn >> 16) & 0xf) {
1342     case 0:
1343         /* ID codes.  */
1344         if (arm_feature(env, ARM_FEATURE_XSCALE))
1345             break;
1346         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1347             break;
1348         if (arm_feature(env, ARM_FEATURE_V7)
1349                 && op1 == 2 && crm == 0 && op2 == 0) {
1350             env->cp15.c0_cssel = val & 0xf;
1351             break;
1352         }
1353         goto bad_reg;
1354     case 1: /* System configuration.  */
1355         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1356             op2 = 0;
1357         switch (op2) {
1358         case 0:
1359             if (!arm_feature(env, ARM_FEATURE_XSCALE) || crm == 0)
1360                 env->cp15.c1_sys = val;
1361             /* ??? Lots of these bits are not implemented.  */
1362             /* This may enable/disable the MMU, so do a TLB flush.  */
1363             tlb_flush(env, 1);
1364             break;
1365         case 1: /* Auxiliary cotrol register.  */
1366             if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1367                 env->cp15.c1_xscaleauxcr = val;
1368                 break;
1369             }
1370             /* Not implemented.  */
1371             break;
1372         case 2:
1373             if (arm_feature(env, ARM_FEATURE_XSCALE))
1374                 goto bad_reg;
1375             if (env->cp15.c1_coproc != val) {
1376                 env->cp15.c1_coproc = val;
1377                 /* ??? Is this safe when called from within a TB?  */
1378                 tb_flush(env);
1379             }
1380             break;
1381         default:
1382             goto bad_reg;
1383         }
1384         break;
1385     case 2: /* MMU Page table control / MPU cache control.  */
1386         if (arm_feature(env, ARM_FEATURE_MPU)) {
1387             switch (op2) {
1388             case 0:
1389                 env->cp15.c2_data = val;
1390                 break;
1391             case 1:
1392                 env->cp15.c2_insn = val;
1393                 break;
1394             default:
1395                 goto bad_reg;
1396             }
1397         } else {
1398 	    switch (op2) {
1399 	    case 0:
1400 		env->cp15.c2_base0 = val;
1401 		break;
1402 	    case 1:
1403 		env->cp15.c2_base1 = val;
1404 		break;
1405 	    case 2:
1406                 val &= 7;
1407                 env->cp15.c2_control = val;
1408 		env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> val);
1409                 env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> val);
1410 		break;
1411 	    default:
1412 		goto bad_reg;
1413 	    }
1414         }
1415         break;
1416     case 3: /* MMU Domain access control / MPU write buffer control.  */
1417         env->cp15.c3 = val;
1418         tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
1419         break;
1420     case 4: /* Reserved.  */
1421         goto bad_reg;
1422     case 5: /* MMU Fault status / MPU access permission.  */
1423         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1424             op2 = 0;
1425         switch (op2) {
1426         case 0:
1427             if (arm_feature(env, ARM_FEATURE_MPU))
1428                 val = extended_mpu_ap_bits(val);
1429             env->cp15.c5_data = val;
1430             break;
1431         case 1:
1432             if (arm_feature(env, ARM_FEATURE_MPU))
1433                 val = extended_mpu_ap_bits(val);
1434             env->cp15.c5_insn = val;
1435             break;
1436         case 2:
1437             if (!arm_feature(env, ARM_FEATURE_MPU))
1438                 goto bad_reg;
1439             env->cp15.c5_data = val;
1440             break;
1441         case 3:
1442             if (!arm_feature(env, ARM_FEATURE_MPU))
1443                 goto bad_reg;
1444             env->cp15.c5_insn = val;
1445             break;
1446         default:
1447             goto bad_reg;
1448         }
1449         break;
1450     case 6: /* MMU Fault address / MPU base/size.  */
1451         if (arm_feature(env, ARM_FEATURE_MPU)) {
1452             if (crm >= 8)
1453                 goto bad_reg;
1454             env->cp15.c6_region[crm] = val;
1455         } else {
1456             if (arm_feature(env, ARM_FEATURE_OMAPCP))
1457                 op2 = 0;
1458             switch (op2) {
1459             case 0:
1460                 env->cp15.c6_data = val;
1461                 break;
1462             case 1: /* ??? This is WFAR on armv6 */
1463             case 2:
1464                 env->cp15.c6_insn = val;
1465                 break;
1466             default:
1467                 goto bad_reg;
1468             }
1469         }
1470         break;
1471     case 7: /* Cache control.  */
1472         env->cp15.c15_i_max = 0x000;
1473         env->cp15.c15_i_min = 0xff0;
1474         /* No cache, so nothing to do.  */
1475         /* ??? MPCore has VA to PA translation functions.  */
1476         break;
1477     case 8: /* MMU TLB control.  */
1478         switch (op2) {
1479         case 0: /* Invalidate all.  */
1480             tlb_flush(env, 0);
1481             break;
1482         case 1: /* Invalidate single TLB entry.  */
1483 #if 0
1484             /* ??? This is wrong for large pages and sections.  */
1485             /* As an ugly hack to make linux work we always flush a 4K
1486                pages.  */
1487             val &= 0xfffff000;
1488             tlb_flush_page(env, val);
1489             tlb_flush_page(env, val + 0x400);
1490             tlb_flush_page(env, val + 0x800);
1491             tlb_flush_page(env, val + 0xc00);
1492 #else
1493             tlb_flush(env, 1);
1494 #endif
1495             break;
1496         case 2: /* Invalidate on ASID.  */
1497             tlb_flush(env, val == 0);
1498             break;
1499         case 3: /* Invalidate single entry on MVA.  */
1500             /* ??? This is like case 1, but ignores ASID.  */
1501             tlb_flush(env, 1);
1502             break;
1503         default:
1504             goto bad_reg;
1505         }
1506         break;
1507     case 9:
1508         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1509             break;
1510         switch (crm) {
1511         case 0: /* Cache lockdown.  */
1512 	    switch (op1) {
1513 	    case 0: /* L1 cache.  */
1514 		switch (op2) {
1515 		case 0:
1516 		    env->cp15.c9_data = val;
1517 		    break;
1518 		case 1:
1519 		    env->cp15.c9_insn = val;
1520 		    break;
1521 		default:
1522 		    goto bad_reg;
1523 		}
1524 		break;
1525 	    case 1: /* L2 cache.  */
1526 		/* Ignore writes to L2 lockdown/auxiliary registers.  */
1527 		break;
1528 	    default:
1529 		goto bad_reg;
1530 	    }
1531 	    break;
1532         case 1: /* TCM memory region registers.  */
1533             /* Not implemented.  */
1534             goto bad_reg;
1535         default:
1536             goto bad_reg;
1537         }
1538         break;
1539     case 10: /* MMU TLB lockdown.  */
1540         /* ??? TLB lockdown not implemented.  */
1541         break;
1542     case 12: /* Reserved.  */
1543         goto bad_reg;
1544     case 13: /* Process ID.  */
1545         switch (op2) {
1546         case 0:
1547             /* Unlike real hardware the qemu TLB uses virtual addresses,
1548                not modified virtual addresses, so this causes a TLB flush.
1549              */
1550             if (env->cp15.c13_fcse != val)
1551               tlb_flush(env, 1);
1552             env->cp15.c13_fcse = val;
1553             break;
1554         case 1:
1555             /* This changes the ASID, so do a TLB flush.  */
1556             if (env->cp15.c13_context != val
1557                 && !arm_feature(env, ARM_FEATURE_MPU))
1558               tlb_flush(env, 0);
1559             env->cp15.c13_context = val;
1560             break;
1561         case 2:
1562             env->cp15.c13_tls1 = val;
1563             break;
1564         case 3:
1565             env->cp15.c13_tls2 = val;
1566             break;
1567         case 4:
1568             env->cp15.c13_tls3 = val;
1569             break;
1570         default:
1571             goto bad_reg;
1572         }
1573         break;
1574     case 14: /* Reserved.  */
1575         goto bad_reg;
1576     case 15: /* Implementation specific.  */
1577         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1578             if (op2 == 0 && crm == 1) {
1579                 if (env->cp15.c15_cpar != (val & 0x3fff)) {
1580                     /* Changes cp0 to cp13 behavior, so needs a TB flush.  */
1581                     tb_flush(env);
1582                     env->cp15.c15_cpar = val & 0x3fff;
1583                 }
1584                 break;
1585             }
1586             goto bad_reg;
1587         }
1588         if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1589             switch (crm) {
1590             case 0:
1591                 break;
1592             case 1: /* Set TI925T configuration.  */
1593                 env->cp15.c15_ticonfig = val & 0xe7;
1594                 env->cp15.c0_cpuid = (val & (1 << 5)) ? /* OS_TYPE bit */
1595                         ARM_CPUID_TI915T : ARM_CPUID_TI925T;
1596                 break;
1597             case 2: /* Set I_max.  */
1598                 env->cp15.c15_i_max = val;
1599                 break;
1600             case 3: /* Set I_min.  */
1601                 env->cp15.c15_i_min = val;
1602                 break;
1603             case 4: /* Set thread-ID.  */
1604                 env->cp15.c15_threadid = val & 0xffff;
1605                 break;
1606             case 8: /* Wait-for-interrupt (deprecated).  */
1607                 cpu_interrupt(env, CPU_INTERRUPT_HALT);
1608                 break;
1609             default:
1610                 goto bad_reg;
1611             }
1612         }
1613         break;
1614     }
1615     return;
1616 bad_reg:
1617     /* ??? For debugging only.  Should raise illegal instruction exception.  */
1618     cpu_abort(env, "Unimplemented cp15 register write (c%d, c%d, {%d, %d})\n",
1619               (insn >> 16) & 0xf, crm, op1, op2);
1620 }
1621 
HELPER(get_cp15)1622 uint32_t HELPER(get_cp15)(CPUState *env, uint32_t insn)
1623 {
1624     int op1;
1625     int op2;
1626     int crm;
1627 
1628     op1 = (insn >> 21) & 7;
1629     op2 = (insn >> 5) & 7;
1630     crm = insn & 0xf;
1631     switch ((insn >> 16) & 0xf) {
1632     case 0: /* ID codes.  */
1633         switch (op1) {
1634         case 0:
1635             switch (crm) {
1636             case 0:
1637                 switch (op2) {
1638                 case 0: /* Device ID.  */
1639                     return env->cp15.c0_cpuid;
1640                 case 1: /* Cache Type.  */
1641 		    return env->cp15.c0_cachetype;
1642                 case 2: /* TCM status.  */
1643                     return 0;
1644                 case 3: /* TLB type register.  */
1645                     return 0; /* No lockable TLB entries.  */
1646                 case 5: /* CPU ID */
1647                     return env->cpu_index;
1648                 default:
1649                     goto bad_reg;
1650                 }
1651             case 1:
1652                 if (!arm_feature(env, ARM_FEATURE_V6))
1653                     goto bad_reg;
1654                 return env->cp15.c0_c1[op2];
1655             case 2:
1656                 if (!arm_feature(env, ARM_FEATURE_V6))
1657                     goto bad_reg;
1658                 return env->cp15.c0_c2[op2];
1659             case 3: case 4: case 5: case 6: case 7:
1660                 return 0;
1661             default:
1662                 goto bad_reg;
1663             }
1664         case 1:
1665             /* These registers aren't documented on arm11 cores.  However
1666                Linux looks at them anyway.  */
1667             if (!arm_feature(env, ARM_FEATURE_V6))
1668                 goto bad_reg;
1669             if (crm != 0)
1670                 goto bad_reg;
1671             if (!arm_feature(env, ARM_FEATURE_V7))
1672                 return 0;
1673 
1674             switch (op2) {
1675             case 0:
1676                 return env->cp15.c0_ccsid[env->cp15.c0_cssel];
1677             case 1:
1678                 return env->cp15.c0_clid;
1679             case 7:
1680                 return 0;
1681             }
1682             goto bad_reg;
1683         case 2:
1684             if (op2 != 0 || crm != 0)
1685                 goto bad_reg;
1686             return env->cp15.c0_cssel;
1687         default:
1688             goto bad_reg;
1689         }
1690     case 1: /* System configuration.  */
1691         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1692             op2 = 0;
1693         switch (op2) {
1694         case 0: /* Control register.  */
1695             return env->cp15.c1_sys;
1696         case 1: /* Auxiliary control register.  */
1697             if (arm_feature(env, ARM_FEATURE_XSCALE))
1698                 return env->cp15.c1_xscaleauxcr;
1699             if (!arm_feature(env, ARM_FEATURE_AUXCR))
1700                 goto bad_reg;
1701             switch (ARM_CPUID(env)) {
1702             case ARM_CPUID_ARM1026:
1703                 return 1;
1704             case ARM_CPUID_ARM1136:
1705             case ARM_CPUID_ARM1136_R2:
1706                 return 7;
1707             case ARM_CPUID_ARM11MPCORE:
1708                 return 1;
1709             case ARM_CPUID_CORTEXA8:
1710                 return 2;
1711             default:
1712                 goto bad_reg;
1713             }
1714         case 2: /* Coprocessor access register.  */
1715             if (arm_feature(env, ARM_FEATURE_XSCALE))
1716                 goto bad_reg;
1717             return env->cp15.c1_coproc;
1718         default:
1719             goto bad_reg;
1720         }
1721     case 2: /* MMU Page table control / MPU cache control.  */
1722         if (arm_feature(env, ARM_FEATURE_MPU)) {
1723             switch (op2) {
1724             case 0:
1725                 return env->cp15.c2_data;
1726                 break;
1727             case 1:
1728                 return env->cp15.c2_insn;
1729                 break;
1730             default:
1731                 goto bad_reg;
1732             }
1733         } else {
1734 	    switch (op2) {
1735 	    case 0:
1736 		return env->cp15.c2_base0;
1737 	    case 1:
1738 		return env->cp15.c2_base1;
1739 	    case 2:
1740                 return env->cp15.c2_control;
1741 	    default:
1742 		goto bad_reg;
1743 	    }
1744 	}
1745     case 3: /* MMU Domain access control / MPU write buffer control.  */
1746         return env->cp15.c3;
1747     case 4: /* Reserved.  */
1748         goto bad_reg;
1749     case 5: /* MMU Fault status / MPU access permission.  */
1750         if (arm_feature(env, ARM_FEATURE_OMAPCP))
1751             op2 = 0;
1752         switch (op2) {
1753         case 0:
1754             if (arm_feature(env, ARM_FEATURE_MPU))
1755                 return simple_mpu_ap_bits(env->cp15.c5_data);
1756             return env->cp15.c5_data;
1757         case 1:
1758             if (arm_feature(env, ARM_FEATURE_MPU))
1759                 return simple_mpu_ap_bits(env->cp15.c5_data);
1760             return env->cp15.c5_insn;
1761         case 2:
1762             if (!arm_feature(env, ARM_FEATURE_MPU))
1763                 goto bad_reg;
1764             return env->cp15.c5_data;
1765         case 3:
1766             if (!arm_feature(env, ARM_FEATURE_MPU))
1767                 goto bad_reg;
1768             return env->cp15.c5_insn;
1769         default:
1770             goto bad_reg;
1771         }
1772     case 6: /* MMU Fault address.  */
1773         if (arm_feature(env, ARM_FEATURE_MPU)) {
1774             if (crm >= 8)
1775                 goto bad_reg;
1776             return env->cp15.c6_region[crm];
1777         } else {
1778             if (arm_feature(env, ARM_FEATURE_OMAPCP))
1779                 op2 = 0;
1780 	    switch (op2) {
1781 	    case 0:
1782 		return env->cp15.c6_data;
1783 	    case 1:
1784 		if (arm_feature(env, ARM_FEATURE_V6)) {
1785 		    /* Watchpoint Fault Adrress.  */
1786 		    return 0; /* Not implemented.  */
1787 		} else {
1788 		    /* Instruction Fault Adrress.  */
1789 		    /* Arm9 doesn't have an IFAR, but implementing it anyway
1790 		       shouldn't do any harm.  */
1791 		    return env->cp15.c6_insn;
1792 		}
1793 	    case 2:
1794 		if (arm_feature(env, ARM_FEATURE_V6)) {
1795 		    /* Instruction Fault Adrress.  */
1796 		    return env->cp15.c6_insn;
1797 		} else {
1798 		    goto bad_reg;
1799 		}
1800 	    default:
1801 		goto bad_reg;
1802 	    }
1803         }
1804     case 7: /* Cache control.  */
1805         /* FIXME: Should only clear Z flag if destination is r15.  */
1806         env->ZF = 0;
1807         return 0;
1808     case 8: /* MMU TLB control.  */
1809         goto bad_reg;
1810     case 9: /* Cache lockdown.  */
1811         switch (op1) {
1812         case 0: /* L1 cache.  */
1813 	    if (arm_feature(env, ARM_FEATURE_OMAPCP))
1814 		return 0;
1815             switch (op2) {
1816             case 0:
1817                 return env->cp15.c9_data;
1818             case 1:
1819                 return env->cp15.c9_insn;
1820             default:
1821                 goto bad_reg;
1822             }
1823         case 1: /* L2 cache */
1824             if (crm != 0)
1825                 goto bad_reg;
1826             /* L2 Lockdown and Auxiliary control.  */
1827             return 0;
1828         default:
1829             goto bad_reg;
1830         }
1831     case 10: /* MMU TLB lockdown.  */
1832         /* ??? TLB lockdown not implemented.  */
1833         return 0;
1834     case 11: /* TCM DMA control.  */
1835     case 12: /* Reserved.  */
1836         goto bad_reg;
1837     case 13: /* Process ID.  */
1838         switch (op2) {
1839         case 0:
1840             return env->cp15.c13_fcse;
1841         case 1:
1842             return env->cp15.c13_context;
1843         case 2:
1844             return env->cp15.c13_tls1;
1845         case 3:
1846             return env->cp15.c13_tls2;
1847         case 4:
1848             return env->cp15.c13_tls3;
1849         default:
1850             goto bad_reg;
1851         }
1852     case 14: /* Reserved.  */
1853         goto bad_reg;
1854     case 15: /* Implementation specific.  */
1855         if (arm_feature(env, ARM_FEATURE_XSCALE)) {
1856             if (op2 == 0 && crm == 1)
1857                 return env->cp15.c15_cpar;
1858 
1859             goto bad_reg;
1860         }
1861         if (arm_feature(env, ARM_FEATURE_OMAPCP)) {
1862             switch (crm) {
1863             case 0:
1864                 return 0;
1865             case 1: /* Read TI925T configuration.  */
1866                 return env->cp15.c15_ticonfig;
1867             case 2: /* Read I_max.  */
1868                 return env->cp15.c15_i_max;
1869             case 3: /* Read I_min.  */
1870                 return env->cp15.c15_i_min;
1871             case 4: /* Read thread-ID.  */
1872                 return env->cp15.c15_threadid;
1873             case 8: /* TI925T_status */
1874                 return 0;
1875             }
1876             /* TODO: Peripheral port remap register:
1877              * On OMAP2 mcr p15, 0, rn, c15, c2, 4 sets up the interrupt
1878              * controller base address at $rn & ~0xfff and map size of
1879              * 0x200 << ($rn & 0xfff), when MMU is off.  */
1880             goto bad_reg;
1881         }
1882         return 0;
1883     }
1884 bad_reg:
1885     /* ??? For debugging only.  Should raise illegal instruction exception.  */
1886     cpu_abort(env, "Unimplemented cp15 register read (c%d, c%d, {%d, %d})\n",
1887               (insn >> 16) & 0xf, crm, op1, op2);
1888     return 0;
1889 }
1890 
HELPER(set_r13_banked)1891 void HELPER(set_r13_banked)(CPUState *env, uint32_t mode, uint32_t val)
1892 {
1893     env->banked_r13[bank_number(mode)] = val;
1894 }
1895 
HELPER(get_r13_banked)1896 uint32_t HELPER(get_r13_banked)(CPUState *env, uint32_t mode)
1897 {
1898     return env->banked_r13[bank_number(mode)];
1899 }
1900 
HELPER(v7m_mrs)1901 uint32_t HELPER(v7m_mrs)(CPUState *env, uint32_t reg)
1902 {
1903     switch (reg) {
1904     case 0: /* APSR */
1905         return xpsr_read(env) & 0xf8000000;
1906     case 1: /* IAPSR */
1907         return xpsr_read(env) & 0xf80001ff;
1908     case 2: /* EAPSR */
1909         return xpsr_read(env) & 0xff00fc00;
1910     case 3: /* xPSR */
1911         return xpsr_read(env) & 0xff00fdff;
1912     case 5: /* IPSR */
1913         return xpsr_read(env) & 0x000001ff;
1914     case 6: /* EPSR */
1915         return xpsr_read(env) & 0x0700fc00;
1916     case 7: /* IEPSR */
1917         return xpsr_read(env) & 0x0700edff;
1918     case 8: /* MSP */
1919         return env->v7m.current_sp ? env->v7m.other_sp : env->regs[13];
1920     case 9: /* PSP */
1921         return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
1922     case 16: /* PRIMASK */
1923         return (env->uncached_cpsr & CPSR_I) != 0;
1924     case 17: /* FAULTMASK */
1925         return (env->uncached_cpsr & CPSR_F) != 0;
1926     case 18: /* BASEPRI */
1927     case 19: /* BASEPRI_MAX */
1928         return env->v7m.basepri;
1929     case 20: /* CONTROL */
1930         return env->v7m.control;
1931     default:
1932         /* ??? For debugging only.  */
1933         cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
1934         return 0;
1935     }
1936 }
1937 
HELPER(v7m_msr)1938 void HELPER(v7m_msr)(CPUState *env, uint32_t reg, uint32_t val)
1939 {
1940     switch (reg) {
1941     case 0: /* APSR */
1942         xpsr_write(env, val, 0xf8000000);
1943         break;
1944     case 1: /* IAPSR */
1945         xpsr_write(env, val, 0xf8000000);
1946         break;
1947     case 2: /* EAPSR */
1948         xpsr_write(env, val, 0xfe00fc00);
1949         break;
1950     case 3: /* xPSR */
1951         xpsr_write(env, val, 0xfe00fc00);
1952         break;
1953     case 5: /* IPSR */
1954         /* IPSR bits are readonly.  */
1955         break;
1956     case 6: /* EPSR */
1957         xpsr_write(env, val, 0x0600fc00);
1958         break;
1959     case 7: /* IEPSR */
1960         xpsr_write(env, val, 0x0600fc00);
1961         break;
1962     case 8: /* MSP */
1963         if (env->v7m.current_sp)
1964             env->v7m.other_sp = val;
1965         else
1966             env->regs[13] = val;
1967         break;
1968     case 9: /* PSP */
1969         if (env->v7m.current_sp)
1970             env->regs[13] = val;
1971         else
1972             env->v7m.other_sp = val;
1973         break;
1974     case 16: /* PRIMASK */
1975         if (val & 1)
1976             env->uncached_cpsr |= CPSR_I;
1977         else
1978             env->uncached_cpsr &= ~CPSR_I;
1979         break;
1980     case 17: /* FAULTMASK */
1981         if (val & 1)
1982             env->uncached_cpsr |= CPSR_F;
1983         else
1984             env->uncached_cpsr &= ~CPSR_F;
1985         break;
1986     case 18: /* BASEPRI */
1987         env->v7m.basepri = val & 0xff;
1988         break;
1989     case 19: /* BASEPRI_MAX */
1990         val &= 0xff;
1991         if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
1992             env->v7m.basepri = val;
1993         break;
1994     case 20: /* CONTROL */
1995         env->v7m.control = val & 3;
1996         switch_v7m_sp(env, (val & 2) != 0);
1997         break;
1998     default:
1999         /* ??? For debugging only.  */
2000         cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
2001         return;
2002     }
2003 }
2004 
cpu_arm_set_cp_io(CPUARMState * env,int cpnum,ARMReadCPFunc * cp_read,ARMWriteCPFunc * cp_write,void * opaque)2005 void cpu_arm_set_cp_io(CPUARMState *env, int cpnum,
2006                 ARMReadCPFunc *cp_read, ARMWriteCPFunc *cp_write,
2007                 void *opaque)
2008 {
2009     if (cpnum < 0 || cpnum > 14) {
2010         cpu_abort(env, "Bad coprocessor number: %i\n", cpnum);
2011         return;
2012     }
2013 
2014     env->cp[cpnum].cp_read = cp_read;
2015     env->cp[cpnum].cp_write = cp_write;
2016     env->cp[cpnum].opaque = opaque;
2017 }
2018 
2019 #endif
2020 
2021 /* Note that signed overflow is undefined in C.  The following routines are
2022    careful to use unsigned types where modulo arithmetic is required.
2023    Failure to do so _will_ break on newer gcc.  */
2024 
2025 /* Signed saturating arithmetic.  */
2026 
2027 /* Perform 16-bit signed saturating addition.  */
add16_sat(uint16_t a,uint16_t b)2028 static inline uint16_t add16_sat(uint16_t a, uint16_t b)
2029 {
2030     uint16_t res;
2031 
2032     res = a + b;
2033     if (((res ^ a) & 0x8000) && !((a ^ b) & 0x8000)) {
2034         if (a & 0x8000)
2035             res = 0x8000;
2036         else
2037             res = 0x7fff;
2038     }
2039     return res;
2040 }
2041 
2042 /* Perform 8-bit signed saturating addition.  */
add8_sat(uint8_t a,uint8_t b)2043 static inline uint8_t add8_sat(uint8_t a, uint8_t b)
2044 {
2045     uint8_t res;
2046 
2047     res = a + b;
2048     if (((res ^ a) & 0x80) && !((a ^ b) & 0x80)) {
2049         if (a & 0x80)
2050             res = 0x80;
2051         else
2052             res = 0x7f;
2053     }
2054     return res;
2055 }
2056 
2057 /* Perform 16-bit signed saturating subtraction.  */
sub16_sat(uint16_t a,uint16_t b)2058 static inline uint16_t sub16_sat(uint16_t a, uint16_t b)
2059 {
2060     uint16_t res;
2061 
2062     res = a - b;
2063     if (((res ^ a) & 0x8000) && ((a ^ b) & 0x8000)) {
2064         if (a & 0x8000)
2065             res = 0x8000;
2066         else
2067             res = 0x7fff;
2068     }
2069     return res;
2070 }
2071 
2072 /* Perform 8-bit signed saturating subtraction.  */
sub8_sat(uint8_t a,uint8_t b)2073 static inline uint8_t sub8_sat(uint8_t a, uint8_t b)
2074 {
2075     uint8_t res;
2076 
2077     res = a - b;
2078     if (((res ^ a) & 0x80) && ((a ^ b) & 0x80)) {
2079         if (a & 0x80)
2080             res = 0x80;
2081         else
2082             res = 0x7f;
2083     }
2084     return res;
2085 }
2086 
2087 #define ADD16(a, b, n) RESULT(add16_sat(a, b), n, 16);
2088 #define SUB16(a, b, n) RESULT(sub16_sat(a, b), n, 16);
2089 #define ADD8(a, b, n)  RESULT(add8_sat(a, b), n, 8);
2090 #define SUB8(a, b, n)  RESULT(sub8_sat(a, b), n, 8);
2091 #define PFX q
2092 
2093 #include "op_addsub.h"
2094 
2095 /* Unsigned saturating arithmetic.  */
add16_usat(uint16_t a,uint16_t b)2096 static inline uint16_t add16_usat(uint16_t a, uint16_t b)
2097 {
2098     uint16_t res;
2099     res = a + b;
2100     if (res < a)
2101         res = 0xffff;
2102     return res;
2103 }
2104 
sub16_usat(uint16_t a,uint16_t b)2105 static inline uint16_t sub16_usat(uint16_t a, uint16_t b)
2106 {
2107     if (a < b)
2108         return a - b;
2109     else
2110         return 0;
2111 }
2112 
add8_usat(uint8_t a,uint8_t b)2113 static inline uint8_t add8_usat(uint8_t a, uint8_t b)
2114 {
2115     uint8_t res;
2116     res = a + b;
2117     if (res < a)
2118         res = 0xff;
2119     return res;
2120 }
2121 
sub8_usat(uint8_t a,uint8_t b)2122 static inline uint8_t sub8_usat(uint8_t a, uint8_t b)
2123 {
2124     if (a < b)
2125         return a - b;
2126     else
2127         return 0;
2128 }
2129 
2130 #define ADD16(a, b, n) RESULT(add16_usat(a, b), n, 16);
2131 #define SUB16(a, b, n) RESULT(sub16_usat(a, b), n, 16);
2132 #define ADD8(a, b, n)  RESULT(add8_usat(a, b), n, 8);
2133 #define SUB8(a, b, n)  RESULT(sub8_usat(a, b), n, 8);
2134 #define PFX uq
2135 
2136 #include "op_addsub.h"
2137 
2138 /* Signed modulo arithmetic.  */
2139 #define SARITH16(a, b, n, op) do { \
2140     int32_t sum; \
2141     sum = (int16_t)((uint16_t)(a) op (uint16_t)(b)); \
2142     RESULT(sum, n, 16); \
2143     if (sum >= 0) \
2144         ge |= 3 << (n * 2); \
2145     } while(0)
2146 
2147 #define SARITH8(a, b, n, op) do { \
2148     int32_t sum; \
2149     sum = (int8_t)((uint8_t)(a) op (uint8_t)(b)); \
2150     RESULT(sum, n, 8); \
2151     if (sum >= 0) \
2152         ge |= 1 << n; \
2153     } while(0)
2154 
2155 
2156 #define ADD16(a, b, n) SARITH16(a, b, n, +)
2157 #define SUB16(a, b, n) SARITH16(a, b, n, -)
2158 #define ADD8(a, b, n)  SARITH8(a, b, n, +)
2159 #define SUB8(a, b, n)  SARITH8(a, b, n, -)
2160 #define PFX s
2161 #define ARITH_GE
2162 
2163 #include "op_addsub.h"
2164 
2165 /* Unsigned modulo arithmetic.  */
2166 #define ADD16(a, b, n) do { \
2167     uint32_t sum; \
2168     sum = (uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b); \
2169     RESULT(sum, n, 16); \
2170     if ((sum >> 16) == 1) \
2171         ge |= 3 << (n * 2); \
2172     } while(0)
2173 
2174 #define ADD8(a, b, n) do { \
2175     uint32_t sum; \
2176     sum = (uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b); \
2177     RESULT(sum, n, 8); \
2178     if ((sum >> 8) == 1) \
2179         ge |= 1 << n; \
2180     } while(0)
2181 
2182 #define SUB16(a, b, n) do { \
2183     uint32_t sum; \
2184     sum = (uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b); \
2185     RESULT(sum, n, 16); \
2186     if ((sum >> 16) == 0) \
2187         ge |= 3 << (n * 2); \
2188     } while(0)
2189 
2190 #define SUB8(a, b, n) do { \
2191     uint32_t sum; \
2192     sum = (uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b); \
2193     RESULT(sum, n, 8); \
2194     if ((sum >> 8) == 0) \
2195         ge |= 1 << n; \
2196     } while(0)
2197 
2198 #define PFX u
2199 #define ARITH_GE
2200 
2201 #include "op_addsub.h"
2202 
2203 /* Halved signed arithmetic.  */
2204 #define ADD16(a, b, n) \
2205   RESULT(((int32_t)(int16_t)(a) + (int32_t)(int16_t)(b)) >> 1, n, 16)
2206 #define SUB16(a, b, n) \
2207   RESULT(((int32_t)(int16_t)(a) - (int32_t)(int16_t)(b)) >> 1, n, 16)
2208 #define ADD8(a, b, n) \
2209   RESULT(((int32_t)(int8_t)(a) + (int32_t)(int8_t)(b)) >> 1, n, 8)
2210 #define SUB8(a, b, n) \
2211   RESULT(((int32_t)(int8_t)(a) - (int32_t)(int8_t)(b)) >> 1, n, 8)
2212 #define PFX sh
2213 
2214 #include "op_addsub.h"
2215 
2216 /* Halved unsigned arithmetic.  */
2217 #define ADD16(a, b, n) \
2218   RESULT(((uint32_t)(uint16_t)(a) + (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2219 #define SUB16(a, b, n) \
2220   RESULT(((uint32_t)(uint16_t)(a) - (uint32_t)(uint16_t)(b)) >> 1, n, 16)
2221 #define ADD8(a, b, n) \
2222   RESULT(((uint32_t)(uint8_t)(a) + (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2223 #define SUB8(a, b, n) \
2224   RESULT(((uint32_t)(uint8_t)(a) - (uint32_t)(uint8_t)(b)) >> 1, n, 8)
2225 #define PFX uh
2226 
2227 #include "op_addsub.h"
2228 
do_usad(uint8_t a,uint8_t b)2229 static inline uint8_t do_usad(uint8_t a, uint8_t b)
2230 {
2231     if (a > b)
2232         return a - b;
2233     else
2234         return b - a;
2235 }
2236 
2237 /* Unsigned sum of absolute byte differences.  */
HELPER(usad8)2238 uint32_t HELPER(usad8)(uint32_t a, uint32_t b)
2239 {
2240     uint32_t sum;
2241     sum = do_usad(a, b);
2242     sum += do_usad(a >> 8, b >> 8);
2243     sum += do_usad(a >> 16, b >>16);
2244     sum += do_usad(a >> 24, b >> 24);
2245     return sum;
2246 }
2247 
2248 /* For ARMv6 SEL instruction.  */
HELPER(sel_flags)2249 uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
2250 {
2251     uint32_t mask;
2252 
2253     mask = 0;
2254     if (flags & 1)
2255         mask |= 0xff;
2256     if (flags & 2)
2257         mask |= 0xff00;
2258     if (flags & 4)
2259         mask |= 0xff0000;
2260     if (flags & 8)
2261         mask |= 0xff000000;
2262     return (a & mask) | (b & ~mask);
2263 }
2264 
HELPER(logicq_cc)2265 uint32_t HELPER(logicq_cc)(uint64_t val)
2266 {
2267     return (val >> 32) | (val != 0);
2268 }
2269 
2270 /* VFP support.  We follow the convention used for VFP instrunctions:
2271    Single precition routines have a "s" suffix, double precision a
2272    "d" suffix.  */
2273 
2274 /* Convert host exception flags to vfp form.  */
vfp_exceptbits_from_host(int host_bits)2275 static inline int vfp_exceptbits_from_host(int host_bits)
2276 {
2277     int target_bits = 0;
2278 
2279     if (host_bits & float_flag_invalid)
2280         target_bits |= 1;
2281     if (host_bits & float_flag_divbyzero)
2282         target_bits |= 2;
2283     if (host_bits & float_flag_overflow)
2284         target_bits |= 4;
2285     if (host_bits & float_flag_underflow)
2286         target_bits |= 8;
2287     if (host_bits & float_flag_inexact)
2288         target_bits |= 0x10;
2289     return target_bits;
2290 }
2291 
HELPER(vfp_get_fpscr)2292 uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
2293 {
2294     int i;
2295     uint32_t fpscr;
2296 
2297     fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
2298             | (env->vfp.vec_len << 16)
2299             | (env->vfp.vec_stride << 20);
2300     i = get_float_exception_flags(&env->vfp.fp_status);
2301     fpscr |= vfp_exceptbits_from_host(i);
2302     return fpscr;
2303 }
2304 
2305 /* Convert vfp exception flags to target form.  */
vfp_exceptbits_to_host(int target_bits)2306 static inline int vfp_exceptbits_to_host(int target_bits)
2307 {
2308     int host_bits = 0;
2309 
2310     if (target_bits & 1)
2311         host_bits |= float_flag_invalid;
2312     if (target_bits & 2)
2313         host_bits |= float_flag_divbyzero;
2314     if (target_bits & 4)
2315         host_bits |= float_flag_overflow;
2316     if (target_bits & 8)
2317         host_bits |= float_flag_underflow;
2318     if (target_bits & 0x10)
2319         host_bits |= float_flag_inexact;
2320     return host_bits;
2321 }
2322 
HELPER(vfp_set_fpscr)2323 void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
2324 {
2325     int i;
2326     uint32_t changed;
2327 
2328     changed = env->vfp.xregs[ARM_VFP_FPSCR];
2329     env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
2330     env->vfp.vec_len = (val >> 16) & 7;
2331     env->vfp.vec_stride = (val >> 20) & 3;
2332 
2333     changed ^= val;
2334     if (changed & (3 << 22)) {
2335         i = (val >> 22) & 3;
2336         switch (i) {
2337         case 0:
2338             i = float_round_nearest_even;
2339             break;
2340         case 1:
2341             i = float_round_up;
2342             break;
2343         case 2:
2344             i = float_round_down;
2345             break;
2346         case 3:
2347             i = float_round_to_zero;
2348             break;
2349         }
2350         set_float_rounding_mode(i, &env->vfp.fp_status);
2351     }
2352     if (changed & (1 << 24))
2353         set_flush_to_zero((val & (1 << 24)) != 0, &env->vfp.fp_status);
2354     if (changed & (1 << 25))
2355         set_default_nan_mode((val & (1 << 25)) != 0, &env->vfp.fp_status);
2356 
2357     i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
2358     set_float_exception_flags(i, &env->vfp.fp_status);
2359 }
2360 
2361 #define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
2362 
2363 #define VFP_BINOP(name) \
2364 float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
2365 { \
2366     return float32_ ## name (a, b, &env->vfp.fp_status); \
2367 } \
2368 float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
2369 { \
2370     return float64_ ## name (a, b, &env->vfp.fp_status); \
2371 }
2372 VFP_BINOP(add)
VFP_BINOP(sub)2373 VFP_BINOP(sub)
2374 VFP_BINOP(mul)
2375 VFP_BINOP(div)
2376 #undef VFP_BINOP
2377 
2378 float32 VFP_HELPER(neg, s)(float32 a)
2379 {
2380     return float32_chs(a);
2381 }
2382 
VFP_HELPER(neg,d)2383 float64 VFP_HELPER(neg, d)(float64 a)
2384 {
2385     return float64_chs(a);
2386 }
2387 
VFP_HELPER(abs,s)2388 float32 VFP_HELPER(abs, s)(float32 a)
2389 {
2390     return float32_abs(a);
2391 }
2392 
VFP_HELPER(abs,d)2393 float64 VFP_HELPER(abs, d)(float64 a)
2394 {
2395     return float64_abs(a);
2396 }
2397 
VFP_HELPER(sqrt,s)2398 float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
2399 {
2400     return float32_sqrt(a, &env->vfp.fp_status);
2401 }
2402 
VFP_HELPER(sqrt,d)2403 float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
2404 {
2405     return float64_sqrt(a, &env->vfp.fp_status);
2406 }
2407 
2408 /* XXX: check quiet/signaling case */
2409 #define DO_VFP_cmp(p, type) \
2410 void VFP_HELPER(cmp, p)(type a, type b, CPUState *env)  \
2411 { \
2412     uint32_t flags; \
2413     switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
2414     case 0: flags = 0x6; break; \
2415     case -1: flags = 0x8; break; \
2416     case 1: flags = 0x2; break; \
2417     default: case 2: flags = 0x3; break; \
2418     } \
2419     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2420         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2421 } \
2422 void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
2423 { \
2424     uint32_t flags; \
2425     switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
2426     case 0: flags = 0x6; break; \
2427     case -1: flags = 0x8; break; \
2428     case 1: flags = 0x2; break; \
2429     default: case 2: flags = 0x3; break; \
2430     } \
2431     env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
2432         | (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
2433 }
DO_VFP_cmp(s,float32)2434 DO_VFP_cmp(s, float32)
2435 DO_VFP_cmp(d, float64)
2436 #undef DO_VFP_cmp
2437 
2438 /* Helper routines to perform bitwise copies between float and int.  */
2439 static inline float32 vfp_itos(uint32_t i)
2440 {
2441     union {
2442         uint32_t i;
2443         float32 s;
2444     } v;
2445 
2446     v.i = i;
2447     return v.s;
2448 }
2449 
vfp_stoi(float32 s)2450 static inline uint32_t vfp_stoi(float32 s)
2451 {
2452     union {
2453         uint32_t i;
2454         float32 s;
2455     } v;
2456 
2457     v.s = s;
2458     return v.i;
2459 }
2460 
vfp_itod(uint64_t i)2461 static inline float64 vfp_itod(uint64_t i)
2462 {
2463     union {
2464         uint64_t i;
2465         float64 d;
2466     } v;
2467 
2468     v.i = i;
2469     return v.d;
2470 }
2471 
vfp_dtoi(float64 d)2472 static inline uint64_t vfp_dtoi(float64 d)
2473 {
2474     union {
2475         uint64_t i;
2476         float64 d;
2477     } v;
2478 
2479     v.d = d;
2480     return v.i;
2481 }
2482 
2483 /* Integer to float conversion.  */
VFP_HELPER(uito,s)2484 float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
2485 {
2486     return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2487 }
2488 
VFP_HELPER(uito,d)2489 float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
2490 {
2491     return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2492 }
2493 
VFP_HELPER(sito,s)2494 float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
2495 {
2496     return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
2497 }
2498 
VFP_HELPER(sito,d)2499 float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
2500 {
2501     return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
2502 }
2503 
2504 /* Float to integer conversion.  */
VFP_HELPER(toui,s)2505 float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
2506 {
2507     return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
2508 }
2509 
VFP_HELPER(toui,d)2510 float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
2511 {
2512     return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
2513 }
2514 
VFP_HELPER(tosi,s)2515 float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
2516 {
2517     return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
2518 }
2519 
VFP_HELPER(tosi,d)2520 float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
2521 {
2522     return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
2523 }
2524 
VFP_HELPER(touiz,s)2525 float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
2526 {
2527     return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2528 }
2529 
VFP_HELPER(touiz,d)2530 float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
2531 {
2532     return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
2533 }
2534 
VFP_HELPER(tosiz,s)2535 float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
2536 {
2537     return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
2538 }
2539 
VFP_HELPER(tosiz,d)2540 float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
2541 {
2542     return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
2543 }
2544 
2545 /* floating point conversion */
VFP_HELPER(fcvtd,s)2546 float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
2547 {
2548     return float32_to_float64(x, &env->vfp.fp_status);
2549 }
2550 
VFP_HELPER(fcvts,d)2551 float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
2552 {
2553     return float64_to_float32(x, &env->vfp.fp_status);
2554 }
2555 
2556 /* VFP3 fixed point conversion.  */
2557 #define VFP_CONV_FIX(name, p, ftype, itype, sign) \
2558 ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
2559 { \
2560     ftype tmp; \
2561     tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
2562                                   &env->vfp.fp_status); \
2563     return ftype##_scalbn(tmp, -(int)shift, &env->vfp.fp_status); \
2564 } \
2565 ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
2566 { \
2567     ftype tmp; \
2568     tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
2569     return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
2570         &env->vfp.fp_status)); \
2571 }
2572 
2573 VFP_CONV_FIX(sh, d, float64, int16, )
2574 VFP_CONV_FIX(sl, d, float64, int32, )
VFP_CONV_FIX(uh,d,float64,uint16,u)2575 VFP_CONV_FIX(uh, d, float64, uint16, u)
2576 VFP_CONV_FIX(ul, d, float64, uint32, u)
2577 VFP_CONV_FIX(sh, s, float32, int16, )
2578 VFP_CONV_FIX(sl, s, float32, int32, )
2579 VFP_CONV_FIX(uh, s, float32, uint16, u)
2580 VFP_CONV_FIX(ul, s, float32, uint32, u)
2581 #undef VFP_CONV_FIX
2582 
2583 float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
2584 {
2585     float_status *s = &env->vfp.fp_status;
2586     float32 two = int32_to_float32(2, s);
2587     return float32_sub(two, float32_mul(a, b, s), s);
2588 }
2589 
HELPER(rsqrts_f32)2590 float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
2591 {
2592     float_status *s = &env->vfp.fp_status;
2593     float32 three = int32_to_float32(3, s);
2594     return float32_sub(three, float32_mul(a, b, s), s);
2595 }
2596 
2597 /* NEON helpers.  */
2598 
2599 /* TODO: The architecture specifies the value that the estimate functions
2600    should return.  We return the exact reciprocal/root instead.  */
HELPER(recpe_f32)2601 float32 HELPER(recpe_f32)(float32 a, CPUState *env)
2602 {
2603     float_status *s = &env->vfp.fp_status;
2604     float32 one = int32_to_float32(1, s);
2605     return float32_div(one, a, s);
2606 }
2607 
HELPER(rsqrte_f32)2608 float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
2609 {
2610     float_status *s = &env->vfp.fp_status;
2611     float32 one = int32_to_float32(1, s);
2612     return float32_div(one, float32_sqrt(a, s), s);
2613 }
2614 
HELPER(recpe_u32)2615 uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
2616 {
2617     float_status *s = &env->vfp.fp_status;
2618     float32 tmp;
2619     tmp = int32_to_float32(a, s);
2620     tmp = float32_scalbn(tmp, -32, s);
2621     tmp = helper_recpe_f32(tmp, env);
2622     tmp = float32_scalbn(tmp, 31, s);
2623     return float32_to_int32(tmp, s);
2624 }
2625 
HELPER(rsqrte_u32)2626 uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
2627 {
2628     float_status *s = &env->vfp.fp_status;
2629     float32 tmp;
2630     tmp = int32_to_float32(a, s);
2631     tmp = float32_scalbn(tmp, -32, s);
2632     tmp = helper_rsqrte_f32(tmp, env);
2633     tmp = float32_scalbn(tmp, 31, s);
2634     return float32_to_int32(tmp, s);
2635 }
2636 
2637 #ifdef CONFIG_TRACE
2638 #include "trace.h"
HELPER(traceTicks)2639 void  HELPER(traceTicks)(uint32_t  ticks)
2640 {
2641     sim_time += ticks;
2642 }
2643 
HELPER(traceInsn)2644 void  HELPER(traceInsn)(void)
2645 {
2646     trace_insn_helper();
2647 }
2648 
2649 #if HOST_LONG_BITS == 32
HELPER(traceBB32)2650 void HELPER(traceBB32)(uint64_t  bb_num, uint32_t  tb)
2651 {
2652     trace_bb_helper(bb_num, (void*)tb);
2653 }
2654 #endif
2655 
2656 #if HOST_LONG_BITS == 64
HELPER(traceBB64)2657 void HELPER(traceBB64)(uint64_t  bb_num, uint64_t  tb)
2658 {
2659     trace_bb_helper(bb_num, (void*)tb);
2660 }
2661 #endif
2662 
2663 #endif /* CONFIG_TRACE */
2664 
HELPER(set_teecr)2665 void HELPER(set_teecr)(CPUState *env, uint32_t val)
2666 {
2667     val &= 1;
2668     if (env->teecr != val) {
2669         env->teecr = val;
2670         tb_flush(env);
2671     }
2672 }
2673 
2674 #ifdef CONFIG_MEMCHECK
HELPER(on_call)2675 void HELPER(on_call)(void* pc, void* ret) {
2676     memcheck_on_call((target_ulong)pc, (target_ulong)ret);
2677 }
2678 
HELPER(on_ret)2679 void HELPER(on_ret)(void* ret) {
2680     memcheck_on_ret((target_ulong)ret);
2681 }
2682 #endif  // CONFIG_MEMCHECK
2683