• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Tiny Code Generator for QEMU
3  *
4  * Copyright (c) 2008 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27     "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28     "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29     "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30     "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
31 };
32 #endif
33 
34 /* This is an 8 byte temp slot in the stack frame.  */
35 #define STACK_TEMP_OFS -16
36 
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
39 #else
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
41 #endif
42 
43 static const int tcg_target_reg_alloc_order[] = {
44     TCG_REG_R4,
45     TCG_REG_R5,
46     TCG_REG_R6,
47     TCG_REG_R7,
48     TCG_REG_R8,
49     TCG_REG_R9,
50     TCG_REG_R10,
51     TCG_REG_R11,
52     TCG_REG_R12,
53     TCG_REG_R13,
54 
55     TCG_REG_R17,
56     TCG_REG_R14,
57     TCG_REG_R15,
58     TCG_REG_R16,
59 
60     TCG_REG_R26,
61     TCG_REG_R25,
62     TCG_REG_R24,
63     TCG_REG_R23,
64 
65     TCG_REG_RET0,
66     TCG_REG_RET1,
67 };
68 
69 static const int tcg_target_call_iarg_regs[4] = {
70     TCG_REG_R26,
71     TCG_REG_R25,
72     TCG_REG_R24,
73     TCG_REG_R23,
74 };
75 
76 static const int tcg_target_call_oarg_regs[2] = {
77     TCG_REG_RET0,
78     TCG_REG_RET1,
79 };
80 
81 /* True iff val fits a signed field of width BITS.  */
check_fit_tl(tcg_target_long val,unsigned int bits)82 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83 {
84     return (val << ((sizeof(tcg_target_long) * 8 - bits))
85             >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86 }
87 
88 /* True iff depi can be used to compute (reg | MASK).
89    Accept a bit pattern like:
90       0....01....1
91       1....10....0
92       0..01..10..0
93    Copied from gcc sources.  */
or_mask_p(tcg_target_ulong mask)94 static inline int or_mask_p(tcg_target_ulong mask)
95 {
96     if (mask == 0 || mask == -1) {
97         return 0;
98     }
99     mask += mask & -mask;
100     return (mask & (mask - 1)) == 0;
101 }
102 
103 /* True iff depi or extru can be used to compute (reg & mask).
104    Accept a bit pattern like these:
105       0....01....1
106       1....10....0
107       1..10..01..1
108    Copied from gcc sources.  */
and_mask_p(tcg_target_ulong mask)109 static inline int and_mask_p(tcg_target_ulong mask)
110 {
111     return or_mask_p(~mask);
112 }
113 
low_sign_ext(int val,int len)114 static int low_sign_ext(int val, int len)
115 {
116     return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117 }
118 
reassemble_12(int as12)119 static int reassemble_12(int as12)
120 {
121     return (((as12 & 0x800) >> 11) |
122             ((as12 & 0x400) >> 8) |
123             ((as12 & 0x3ff) << 3));
124 }
125 
reassemble_17(int as17)126 static int reassemble_17(int as17)
127 {
128     return (((as17 & 0x10000) >> 16) |
129             ((as17 & 0x0f800) << 5) |
130             ((as17 & 0x00400) >> 8) |
131             ((as17 & 0x003ff) << 3));
132 }
133 
reassemble_21(int as21)134 static int reassemble_21(int as21)
135 {
136     return (((as21 & 0x100000) >> 20) |
137             ((as21 & 0x0ffe00) >> 8) |
138             ((as21 & 0x000180) << 7) |
139             ((as21 & 0x00007c) << 14) |
140             ((as21 & 0x000003) << 12));
141 }
142 
143 /* ??? Bizzarely, there is no PCREL12F relocation type.  I guess all
144    such relocations are simply fully handled by the assembler.  */
145 #define R_PARISC_PCREL12F  R_PARISC_NONE
146 
patch_reloc(uint8_t * code_ptr,int type,tcg_target_long value,tcg_target_long addend)147 static void patch_reloc(uint8_t *code_ptr, int type,
148                         tcg_target_long value, tcg_target_long addend)
149 {
150     uint32_t *insn_ptr = (uint32_t *)code_ptr;
151     uint32_t insn = *insn_ptr;
152     tcg_target_long pcrel;
153 
154     value += addend;
155     pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156 
157     switch (type) {
158     case R_PARISC_PCREL12F:
159         assert(check_fit_tl(pcrel, 12));
160         /* ??? We assume all patches are forward.  See tcg_out_brcond
161            re setting the NUL bit on the branch and eliding the nop.  */
162         assert(pcrel >= 0);
163         insn &= ~0x1ffdu;
164         insn |= reassemble_12(pcrel);
165         break;
166     case R_PARISC_PCREL17F:
167         assert(check_fit_tl(pcrel, 17));
168         insn &= ~0x1f1ffdu;
169         insn |= reassemble_17(pcrel);
170         break;
171     default:
172         tcg_abort();
173     }
174 
175     *insn_ptr = insn;
176 }
177 
178 /* maximum number of register used for input function arguments */
tcg_target_get_call_iarg_regs_count(int flags)179 static inline int tcg_target_get_call_iarg_regs_count(int flags)
180 {
181     return 4;
182 }
183 
184 /* parse target specific constraints */
target_parse_constraint(TCGArgConstraint * ct,const char ** pct_str)185 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
186 {
187     const char *ct_str;
188 
189     ct_str = *pct_str;
190     switch (ct_str[0]) {
191     case 'r':
192         ct->ct |= TCG_CT_REG;
193         tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194         break;
195     case 'L': /* qemu_ld/st constraint */
196         ct->ct |= TCG_CT_REG;
197         tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201         tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202         break;
203     case 'Z':
204         ct->ct |= TCG_CT_CONST_0;
205         break;
206     case 'I':
207         ct->ct |= TCG_CT_CONST_S11;
208         break;
209     case 'J':
210         ct->ct |= TCG_CT_CONST_S5;
211 	break;
212     case 'K':
213         ct->ct |= TCG_CT_CONST_MS11;
214         break;
215     case 'M':
216         ct->ct |= TCG_CT_CONST_AND;
217         break;
218     case 'O':
219         ct->ct |= TCG_CT_CONST_OR;
220         break;
221     default:
222         return -1;
223     }
224     ct_str++;
225     *pct_str = ct_str;
226     return 0;
227 }
228 
229 /* test if a constant matches the constraint */
tcg_target_const_match(tcg_target_long val,const TCGArgConstraint * arg_ct)230 static int tcg_target_const_match(tcg_target_long val,
231                                   const TCGArgConstraint *arg_ct)
232 {
233     int ct = arg_ct->ct;
234     if (ct & TCG_CT_CONST) {
235         return 1;
236     } else if (ct & TCG_CT_CONST_0) {
237         return val == 0;
238     } else if (ct & TCG_CT_CONST_S5) {
239         return check_fit_tl(val, 5);
240     } else if (ct & TCG_CT_CONST_S11) {
241         return check_fit_tl(val, 11);
242     } else if (ct & TCG_CT_CONST_MS11) {
243         return check_fit_tl(-val, 11);
244     } else if (ct & TCG_CT_CONST_AND) {
245         return and_mask_p(val);
246     } else if (ct & TCG_CT_CONST_OR) {
247         return or_mask_p(val);
248     }
249     return 0;
250 }
251 
252 #define INSN_OP(x)       ((x) << 26)
253 #define INSN_EXT3BR(x)   ((x) << 13)
254 #define INSN_EXT3SH(x)   ((x) << 10)
255 #define INSN_EXT4(x)     ((x) << 6)
256 #define INSN_EXT5(x)     (x)
257 #define INSN_EXT6(x)     ((x) << 6)
258 #define INSN_EXT7(x)     ((x) << 6)
259 #define INSN_EXT8A(x)    ((x) << 6)
260 #define INSN_EXT8B(x)    ((x) << 5)
261 #define INSN_T(x)        (x)
262 #define INSN_R1(x)       ((x) << 16)
263 #define INSN_R2(x)       ((x) << 21)
264 #define INSN_DEP_LEN(x)  (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x)  ((x) << 5)
267 #define INSN_COND(x)     ((x) << 13)
268 #define INSN_IM11(x)     low_sign_ext(x, 11)
269 #define INSN_IM14(x)     low_sign_ext(x, 14)
270 #define INSN_IM5(x)      (low_sign_ext(x, 5) << 16)
271 
272 #define COND_NEVER   0
273 #define COND_EQ      1
274 #define COND_LT      2
275 #define COND_LE      3
276 #define COND_LTU     4
277 #define COND_LEU     5
278 #define COND_SV      6
279 #define COND_OD      7
280 #define COND_FALSE   8
281 
282 #define INSN_ADD	(INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC	(INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI	(INSN_OP(0x2d))
285 #define INSN_ADDIL	(INSN_OP(0x0a))
286 #define INSN_ADDL	(INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND	(INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM	(INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR	(INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR	(INSN_OP(0x24))
291 #define INSN_DEP	(INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI	(INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS	(INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU	(INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL	(INSN_OP(0x08))
296 #define INSN_LDO	(INSN_OP(0x0d))
297 #define INSN_MTCTL	(INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR		(INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD	(INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB	(INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB	(INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI	(INSN_OP(0x25))
303 #define INSN_VEXTRS	(INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU	(INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD	(INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR	(INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP	(INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP	(INSN_OP(0x35) | INSN_EXT3SH(0))
309 
310 #define INSN_BL         (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N       (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR        (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV         (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N       (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4    (INSN_OP(0x39) | (1 << 13))
316 
317 #define INSN_LDB        (INSN_OP(0x10))
318 #define INSN_LDH        (INSN_OP(0x11))
319 #define INSN_LDW        (INSN_OP(0x12))
320 #define INSN_LDWM       (INSN_OP(0x13))
321 #define INSN_FLDDS      (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
322 
323 #define INSN_LDBX	(INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX	(INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX       (INSN_OP(0x03) | INSN_EXT4(2))
326 
327 #define INSN_STB        (INSN_OP(0x18))
328 #define INSN_STH        (INSN_OP(0x19))
329 #define INSN_STW        (INSN_OP(0x1a))
330 #define INSN_STWM       (INSN_OP(0x1b))
331 #define INSN_FSTDS      (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
332 
333 #define INSN_COMBT      (INSN_OP(0x20))
334 #define INSN_COMBF      (INSN_OP(0x22))
335 #define INSN_COMIBT     (INSN_OP(0x21))
336 #define INSN_COMIBF     (INSN_OP(0x23))
337 
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(void *);
340 
tcg_out_mov(TCGContext * s,TCGType type,int ret,int arg)341 static void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg)
342 {
343     /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344        but hppa-dis.c is unaware of this definition */
345     if (ret != arg) {
346         tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347                   | INSN_R2(TCG_REG_R0));
348     }
349 }
350 
tcg_out_movi(TCGContext * s,TCGType type,int ret,tcg_target_long arg)351 static void tcg_out_movi(TCGContext *s, TCGType type,
352                          int ret, tcg_target_long arg)
353 {
354     if (check_fit_tl(arg, 14)) {
355         tcg_out32(s, INSN_LDO | INSN_R1(ret)
356                   | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357     } else {
358         uint32_t hi, lo;
359         hi = arg >> 11;
360         lo = arg & 0x7ff;
361 
362         tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363         if (lo) {
364             tcg_out32(s, INSN_LDO | INSN_R1(ret)
365                       | INSN_R2(ret) | INSN_IM14(lo));
366         }
367     }
368 }
369 
tcg_out_ldst(TCGContext * s,int ret,int addr,tcg_target_long offset,int op)370 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371                          tcg_target_long offset, int op)
372 {
373     if (!check_fit_tl(offset, 14)) {
374         uint32_t hi, lo, op;
375 
376         hi = offset >> 11;
377         lo = offset & 0x7ff;
378 
379         if (addr == TCG_REG_R0) {
380             op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381         } else {
382             op = INSN_ADDIL | INSN_R2(addr);
383         }
384         tcg_out32(s, op | reassemble_21(hi));
385 
386         addr = TCG_REG_R1;
387 	offset = lo;
388     }
389 
390     if (ret != addr || offset != 0 || op != INSN_LDO) {
391         tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
392     }
393 }
394 
395 /* This function is required by tcg.c.  */
tcg_out_ld(TCGContext * s,TCGType type,int ret,int arg1,tcg_target_long arg2)396 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
397                               int arg1, tcg_target_long arg2)
398 {
399     tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400 }
401 
402 /* This function is required by tcg.c.  */
tcg_out_st(TCGContext * s,TCGType type,int ret,int arg1,tcg_target_long arg2)403 static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
404                               int arg1, tcg_target_long arg2)
405 {
406     tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407 }
408 
tcg_out_ldst_index(TCGContext * s,int data,int base,int index,int op)409 static void tcg_out_ldst_index(TCGContext *s, int data,
410                                int base, int index, int op)
411 {
412     tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413 }
414 
tcg_out_addi2(TCGContext * s,int ret,int arg1,tcg_target_long val)415 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416                                  tcg_target_long val)
417 {
418     tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419 }
420 
421 /* This function is required by tcg.c.  */
tcg_out_addi(TCGContext * s,int reg,tcg_target_long val)422 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
423 {
424     tcg_out_addi2(s, reg, reg, val);
425 }
426 
tcg_out_arith(TCGContext * s,int t,int r1,int r2,int op)427 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
428 {
429     tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430 }
431 
tcg_out_arithi(TCGContext * s,int t,int r1,tcg_target_long val,int op)432 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433                                   tcg_target_long val, int op)
434 {
435     assert(check_fit_tl(val, 11));
436     tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
437 }
438 
tcg_out_nop(TCGContext * s)439 static inline void tcg_out_nop(TCGContext *s)
440 {
441     tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442 }
443 
tcg_out_mtctl_sar(TCGContext * s,int arg)444 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
445 {
446     tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447 }
448 
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450    Note that here the bit ordering is reversed from the PA-RISC
451    standard, such that the right-most bit is 0.  */
tcg_out_extr(TCGContext * s,int ret,int arg,unsigned ofs,unsigned len,int sign)452 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453                                 unsigned ofs, unsigned len, int sign)
454 {
455     assert(ofs < 32 && len <= 32 - ofs);
456     tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457               | INSN_R1(ret) | INSN_R2(arg)
458               | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
459 }
460 
461 /* Likewise with OFS interpreted little-endian.  */
tcg_out_dep(TCGContext * s,int ret,int arg,unsigned ofs,unsigned len)462 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463                                unsigned ofs, unsigned len)
464 {
465     assert(ofs < 32 && len <= 32 - ofs);
466     tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467               | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468 }
469 
tcg_out_shd(TCGContext * s,int ret,int hi,int lo,unsigned count)470 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
471                                unsigned count)
472 {
473     assert(count < 32);
474     tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
475               | INSN_SHDEP_CP(count));
476 }
477 
tcg_out_vshd(TCGContext * s,int ret,int hi,int lo,int creg)478 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
479 {
480     tcg_out_mtctl_sar(s, creg);
481     tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
482 }
483 
tcg_out_ori(TCGContext * s,int ret,int arg,tcg_target_ulong m)484 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
485 {
486     int bs0, bs1;
487 
488     /* Note that the argument is constrained to match or_mask_p.  */
489     for (bs0 = 0; bs0 < 32; bs0++) {
490         if ((m & (1u << bs0)) != 0) {
491             break;
492         }
493     }
494     for (bs1 = bs0; bs1 < 32; bs1++) {
495         if ((m & (1u << bs1)) == 0) {
496             break;
497         }
498     }
499     assert(bs1 == 32 || (1ul << bs1) > m);
500 
501     tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
502     tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
503               | INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
504 }
505 
tcg_out_andi(TCGContext * s,int ret,int arg,tcg_target_ulong m)506 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
507 {
508     int ls0, ls1, ms0;
509 
510     /* Note that the argument is constrained to match and_mask_p.  */
511     for (ls0 = 0; ls0 < 32; ls0++) {
512         if ((m & (1u << ls0)) == 0) {
513             break;
514         }
515     }
516     for (ls1 = ls0; ls1 < 32; ls1++) {
517         if ((m & (1u << ls1)) != 0) {
518             break;
519         }
520     }
521     for (ms0 = ls1; ms0 < 32; ms0++) {
522         if ((m & (1u << ms0)) == 0) {
523             break;
524         }
525     }
526     assert (ms0 == 32);
527 
528     if (ls1 == 32) {
529         tcg_out_extr(s, ret, arg, 0, ls0, 0);
530     } else {
531         tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
532         tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
533                   | INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
534     }
535 }
536 
tcg_out_ext8s(TCGContext * s,int ret,int arg)537 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
538 {
539     tcg_out_extr(s, ret, arg, 0, 8, 1);
540 }
541 
tcg_out_ext16s(TCGContext * s,int ret,int arg)542 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
543 {
544     tcg_out_extr(s, ret, arg, 0, 16, 1);
545 }
546 
tcg_out_shli(TCGContext * s,int ret,int arg,int count)547 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
548 {
549     count &= 31;
550     tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551               | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
552 }
553 
tcg_out_shl(TCGContext * s,int ret,int arg,int creg)554 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
555 {
556     tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557     tcg_out_mtctl_sar(s, TCG_REG_R20);
558     tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
559 }
560 
tcg_out_shri(TCGContext * s,int ret,int arg,int count)561 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
562 {
563     count &= 31;
564     tcg_out_extr(s, ret, arg, count, 32 - count, 0);
565 }
566 
tcg_out_shr(TCGContext * s,int ret,int arg,int creg)567 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
568 {
569     tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
570 }
571 
tcg_out_sari(TCGContext * s,int ret,int arg,int count)572 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
573 {
574     count &= 31;
575     tcg_out_extr(s, ret, arg, count, 32 - count, 1);
576 }
577 
tcg_out_sar(TCGContext * s,int ret,int arg,int creg)578 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
579 {
580     tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581     tcg_out_mtctl_sar(s, TCG_REG_R20);
582     tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
583 }
584 
tcg_out_rotli(TCGContext * s,int ret,int arg,int count)585 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
586 {
587     count &= 31;
588     tcg_out_shd(s, ret, arg, arg, 32 - count);
589 }
590 
tcg_out_rotl(TCGContext * s,int ret,int arg,int creg)591 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
592 {
593     tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594     tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
595 }
596 
tcg_out_rotri(TCGContext * s,int ret,int arg,int count)597 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
598 {
599     count &= 31;
600     tcg_out_shd(s, ret, arg, arg, count);
601 }
602 
tcg_out_rotr(TCGContext * s,int ret,int arg,int creg)603 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
604 {
605     tcg_out_vshd(s, ret, arg, arg, creg);
606 }
607 
tcg_out_bswap16(TCGContext * s,int ret,int arg,int sign)608 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
609 {
610     if (ret != arg) {
611         tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg =  xxAB */
612     }
613     tcg_out_dep(s, ret, ret, 16, 8);          /* ret =  xBAB */
614     tcg_out_extr(s, ret, ret, 8, 16, sign);   /* ret =  ..BA */
615 }
616 
tcg_out_bswap32(TCGContext * s,int ret,int arg,int temp)617 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
618 {
619                                           /* arg =  ABCD */
620     tcg_out_rotri(s, temp, arg, 16);      /* temp = CDAB */
621     tcg_out_dep(s, temp, temp, 16, 8);    /* temp = CBAB */
622     tcg_out_shd(s, ret, arg, temp, 8);    /* ret =  DCBA */
623 }
624 
tcg_out_call(TCGContext * s,void * func)625 static void tcg_out_call(TCGContext *s, void *func)
626 {
627     tcg_target_long val, hi, lo, disp;
628 
629     val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630     disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
631 
632     if (check_fit_tl(disp, 17)) {
633         tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634     } else {
635         hi = val >> 11;
636         lo = val & 0x7ff;
637 
638         tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639         tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640                   | reassemble_17(lo >> 2));
641         tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
642     }
643 }
644 
tcg_out_xmpyu(TCGContext * s,int retl,int reth,int arg1,int arg2)645 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646                           int arg1, int arg2)
647 {
648     /* Store both words into the stack for copy to the FPU.  */
649     tcg_out_ldst(s, arg1, TCG_REG_SP, STACK_TEMP_OFS, INSN_STW);
650     tcg_out_ldst(s, arg2, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_STW);
651 
652     /* Load both words into the FPU at the same time.  We get away
653        with this because we can address the left and right half of the
654        FPU registers individually once loaded.  */
655     /* fldds stack_temp(sp),fr22 */
656     tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_SP)
657               | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
658 
659     /* xmpyu fr22r,fr22,fr22 */
660     tcg_out32(s, 0x3ad64796);
661 
662     /* Store the 64-bit result back into the stack.  */
663     /* fstds stack_temp(sp),fr22 */
664     tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_SP)
665               | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
666 
667     /* Load the pieces of the result that the caller requested.  */
668     if (reth) {
669         tcg_out_ldst(s, reth, TCG_REG_SP, STACK_TEMP_OFS, INSN_LDW);
670     }
671     if (retl) {
672         tcg_out_ldst(s, retl, TCG_REG_SP, STACK_TEMP_OFS + 4, INSN_LDW);
673     }
674 }
675 
tcg_out_add2(TCGContext * s,int destl,int desth,int al,int ah,int bl,int bh,int blconst)676 static void tcg_out_add2(TCGContext *s, int destl, int desth,
677                          int al, int ah, int bl, int bh, int blconst)
678 {
679     int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
680 
681     if (blconst) {
682         tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
683     } else {
684         tcg_out_arith(s, tmp, al, bl, INSN_ADD);
685     }
686     tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
687 
688     tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
689 }
690 
tcg_out_sub2(TCGContext * s,int destl,int desth,int al,int ah,int bl,int bh,int alconst,int blconst)691 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
692                          int bl, int bh, int alconst, int blconst)
693 {
694     int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
695 
696     if (alconst) {
697         if (blconst) {
698             tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
699             bl = TCG_REG_R20;
700         }
701         tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
702     } else if (blconst) {
703         tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
704     } else {
705         tcg_out_arith(s, tmp, al, bl, INSN_SUB);
706     }
707     tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
708 
709     tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
710 }
711 
tcg_out_branch(TCGContext * s,int label_index,int nul)712 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
713 {
714     TCGLabel *l = &s->labels[label_index];
715     uint32_t op = nul ? INSN_BL_N : INSN_BL;
716 
717     if (l->has_value) {
718         tcg_target_long val = l->u.value;
719 
720         val -= (tcg_target_long)s->code_ptr + 8;
721         val >>= 2;
722         assert(check_fit_tl(val, 17));
723 
724         tcg_out32(s, op | reassemble_17(val));
725     } else {
726         /* We need to keep the offset unchanged for retranslation.  */
727         uint32_t old_insn = *(uint32_t *)s->code_ptr;
728 
729         tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
730         tcg_out32(s, op | (old_insn & 0x1f1ffdu));
731     }
732 }
733 
734 static const uint8_t tcg_cond_to_cmp_cond[10] =
735 {
736     [TCG_COND_EQ] = COND_EQ,
737     [TCG_COND_NE] = COND_EQ | COND_FALSE,
738     [TCG_COND_LT] = COND_LT,
739     [TCG_COND_GE] = COND_LT | COND_FALSE,
740     [TCG_COND_LE] = COND_LE,
741     [TCG_COND_GT] = COND_LE | COND_FALSE,
742     [TCG_COND_LTU] = COND_LTU,
743     [TCG_COND_GEU] = COND_LTU | COND_FALSE,
744     [TCG_COND_LEU] = COND_LEU,
745     [TCG_COND_GTU] = COND_LEU | COND_FALSE,
746 };
747 
tcg_out_brcond(TCGContext * s,int cond,TCGArg c1,TCGArg c2,int c2const,int label_index)748 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
749                            TCGArg c2, int c2const, int label_index)
750 {
751     TCGLabel *l = &s->labels[label_index];
752     int op, pacond;
753 
754     /* Note that COMIB operates as if the immediate is the first
755        operand.  We model brcond with the immediate in the second
756        to better match what targets are likely to give us.  For
757        consistency, model COMB with reversed operands as well.  */
758     pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
759 
760     if (c2const) {
761         op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
762         op |= INSN_IM5(c2);
763     } else {
764         op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
765         op |= INSN_R1(c2);
766     }
767     op |= INSN_R2(c1);
768     op |= INSN_COND(pacond & 7);
769 
770     if (l->has_value) {
771         tcg_target_long val = l->u.value;
772 
773         val -= (tcg_target_long)s->code_ptr + 8;
774         val >>= 2;
775         assert(check_fit_tl(val, 12));
776 
777         /* ??? Assume that all branches to defined labels are backward.
778            Which means that if the nul bit is set, the delay slot is
779            executed if the branch is taken, and not executed in fallthru.  */
780         tcg_out32(s, op | reassemble_12(val));
781         tcg_out_nop(s);
782     } else {
783         /* We need to keep the offset unchanged for retranslation.  */
784         uint32_t old_insn = *(uint32_t *)s->code_ptr;
785 
786         tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
787         /* ??? Assume that all branches to undefined labels are forward.
788            Which means that if the nul bit is set, the delay slot is
789            not executed if the branch is taken, which is what we want.  */
790         tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
791     }
792 }
793 
tcg_out_comclr(TCGContext * s,int cond,TCGArg ret,TCGArg c1,TCGArg c2,int c2const)794 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
795                            TCGArg c1, TCGArg c2, int c2const)
796 {
797     int op, pacond;
798 
799     /* Note that COMICLR operates as if the immediate is the first
800        operand.  We model setcond with the immediate in the second
801        to better match what targets are likely to give us.  For
802        consistency, model COMCLR with reversed operands as well.  */
803     pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
804 
805     if (c2const) {
806         op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
807     } else {
808         op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
809     }
810     op |= INSN_COND(pacond & 7);
811     op |= pacond & COND_FALSE ? 1 << 12 : 0;
812 
813     tcg_out32(s, op);
814 }
815 
tcg_out_brcond2(TCGContext * s,int cond,TCGArg al,TCGArg ah,TCGArg bl,int blconst,TCGArg bh,int bhconst,int label_index)816 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
817                             TCGArg bl, int blconst, TCGArg bh, int bhconst,
818                             int label_index)
819 {
820     switch (cond) {
821     case TCG_COND_EQ:
822     case TCG_COND_NE:
823         tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
824         tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
825         break;
826 
827     default:
828         tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
829         tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
830         tcg_out_brcond(s, tcg_unsigned_cond(cond),
831                        al, bl, blconst, label_index);
832         break;
833     }
834 }
835 
tcg_out_setcond(TCGContext * s,int cond,TCGArg ret,TCGArg c1,TCGArg c2,int c2const)836 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
837                             TCGArg c1, TCGArg c2, int c2const)
838 {
839     tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
840     tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
841 }
842 
tcg_out_setcond2(TCGContext * s,int cond,TCGArg ret,TCGArg al,TCGArg ah,TCGArg bl,int blconst,TCGArg bh,int bhconst)843 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
844                              TCGArg al, TCGArg ah, TCGArg bl, int blconst,
845                              TCGArg bh, int bhconst)
846 {
847     int scratch = TCG_REG_R20;
848 
849     if (ret != al && ret != ah
850         && (blconst || ret != bl)
851         && (bhconst || ret != bh)) {
852         scratch = ret;
853     }
854 
855     switch (cond) {
856     case TCG_COND_EQ:
857     case TCG_COND_NE:
858         tcg_out_setcond(s, cond, scratch, al, bl, blconst);
859         tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
860         tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
861         break;
862 
863     default:
864         tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
865         tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
866         tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
867         tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
868         tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
869         break;
870     }
871 
872     tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
873 }
874 
875 #if defined(CONFIG_SOFTMMU)
876 #include "../../softmmu_defs.h"
877 
878 static void *qemu_ld_helpers[4] = {
879     __ldb_mmu,
880     __ldw_mmu,
881     __ldl_mmu,
882     __ldq_mmu,
883 };
884 
885 static void *qemu_st_helpers[4] = {
886     __stb_mmu,
887     __stw_mmu,
888     __stl_mmu,
889     __stq_mmu,
890 };
891 
892 /* Load and compare a TLB entry, and branch if TLB miss.  OFFSET is set to
893    the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
894    TLB for the memory index.  The return value is the offset from ENV
895    contained in R1 afterward (to be used when loading ADDEND); if the
896    return value is 0, R1 is not used.  */
897 
tcg_out_tlb_read(TCGContext * s,int r0,int r1,int addrlo,int addrhi,int s_bits,int lab_miss,int offset)898 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
899                             int addrhi, int s_bits, int lab_miss, int offset)
900 {
901     int ret;
902 
903     /* Extracting the index into the TLB.  The "normal C operation" is
904           r1 = addr_reg >> TARGET_PAGE_BITS;
905           r1 &= CPU_TLB_SIZE - 1;
906           r1 <<= CPU_TLB_ENTRY_BITS;
907        What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
908        and place them at CPU_TLB_ENTRY_BITS.  We can combine the first two
909        operations with an EXTRU.  Unfortunately, the current value of
910        CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
911        add that follows.  */
912     tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
913     tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
914     tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
915 
916     /* Make sure that both the addr_{read,write} and addend can be
917        read with a 14-bit offset from the same base register.  */
918     if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
919         ret = 0;
920     } else {
921         ret = (offset + 0x400) & ~0x7ff;
922         offset = ret - offset;
923         tcg_out_addi2(s, TCG_REG_R1, r1, ret);
924         r1 = TCG_REG_R1;
925     }
926 
927     /* Load the entry from the computed slot.  */
928     if (TARGET_LONG_BITS == 64) {
929         tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
930         tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
931     } else {
932         tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
933     }
934 
935     /* Compute the value that ought to appear in the TLB for a hit, namely, the page
936        of the address.  We include the low N bits of the address to catch unaligned
937        accesses and force them onto the slow path.  Do this computation after having
938        issued the load from the TLB slot to give the load time to complete.  */
939     tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
940 
941     /* If not equal, jump to lab_miss. */
942     if (TARGET_LONG_BITS == 64) {
943         tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
944                         r0, 0, addrhi, 0, lab_miss);
945     } else {
946         tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
947     }
948 
949     return ret;
950 }
951 #endif
952 
tcg_out_qemu_ld_direct(TCGContext * s,int datalo_reg,int datahi_reg,int addr_reg,int addend_reg,int opc)953 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
954                                    int addr_reg, int addend_reg, int opc)
955 {
956 #ifdef TARGET_WORDS_BIGENDIAN
957     const int bswap = 0;
958 #else
959     const int bswap = 1;
960 #endif
961 
962     switch (opc) {
963     case 0:
964         tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
965         break;
966     case 0 | 4:
967         tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
968         tcg_out_ext8s(s, datalo_reg, datalo_reg);
969         break;
970     case 1:
971         tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
972         if (bswap) {
973             tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
974         }
975         break;
976     case 1 | 4:
977         tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
978         if (bswap) {
979             tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
980         } else {
981             tcg_out_ext16s(s, datalo_reg, datalo_reg);
982         }
983         break;
984     case 2:
985         tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
986         if (bswap) {
987             tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
988         }
989         break;
990     case 3:
991         if (bswap) {
992             int t = datahi_reg;
993             datahi_reg = datalo_reg;
994             datalo_reg = t;
995         }
996         /* We can't access the low-part with a reg+reg addressing mode,
997            so perform the addition now and use reg_ofs addressing mode.  */
998         if (addend_reg != TCG_REG_R0) {
999             tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1000             addr_reg = TCG_REG_R20;
1001 	}
1002         /* Make sure not to clobber the base register.  */
1003         if (datahi_reg == addr_reg) {
1004             tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1005             tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1006         } else {
1007             tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1008             tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1009         }
1010         if (bswap) {
1011             tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1012             tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1013         }
1014         break;
1015     default:
1016         tcg_abort();
1017     }
1018 }
1019 
tcg_out_qemu_ld(TCGContext * s,const TCGArg * args,int opc)1020 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1021 {
1022     int datalo_reg = *args++;
1023     /* Note that datahi_reg is only used for 64-bit loads.  */
1024     int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1025     int addrlo_reg = *args++;
1026 
1027 #if defined(CONFIG_SOFTMMU)
1028     /* Note that addrhi_reg is only used for 64-bit guests.  */
1029     int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1030     int mem_index = *args;
1031     int lab1, lab2, argreg, offset;
1032 
1033     lab1 = gen_new_label();
1034     lab2 = gen_new_label();
1035 
1036     offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1037     offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1038                               opc & 3, lab1, offset);
1039 
1040     /* TLB Hit.  */
1041     tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1042                offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1043     tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
1044     tcg_out_branch(s, lab2, 1);
1045 
1046     /* TLB Miss.  */
1047     /* label1: */
1048     tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1049 
1050     argreg = TCG_REG_R26;
1051     tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1052     if (TARGET_LONG_BITS == 64) {
1053         tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1054     }
1055     tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1056 
1057     tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1058 
1059     switch (opc) {
1060     case 0:
1061         tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1062         break;
1063     case 0 | 4:
1064         tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1065         break;
1066     case 1:
1067         tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1068         break;
1069     case 1 | 4:
1070         tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1071         break;
1072     case 2:
1073     case 2 | 4:
1074         tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1075         break;
1076     case 3:
1077         tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1078         tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1079         break;
1080     default:
1081         tcg_abort();
1082     }
1083 
1084     /* label2: */
1085     tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1086 #else
1087     tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1088                            (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1089 #endif
1090 }
1091 
tcg_out_qemu_st_direct(TCGContext * s,int datalo_reg,int datahi_reg,int addr_reg,int opc)1092 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1093                                    int addr_reg, int opc)
1094 {
1095 #ifdef TARGET_WORDS_BIGENDIAN
1096     const int bswap = 0;
1097 #else
1098     const int bswap = 1;
1099 #endif
1100 
1101     switch (opc) {
1102     case 0:
1103         tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1104         break;
1105     case 1:
1106         if (bswap) {
1107             tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1108             datalo_reg = TCG_REG_R20;
1109         }
1110         tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1111         break;
1112     case 2:
1113         if (bswap) {
1114             tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1115             datalo_reg = TCG_REG_R20;
1116         }
1117         tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1118         break;
1119     case 3:
1120         if (bswap) {
1121             tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1122             tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1123             datahi_reg = TCG_REG_R20;
1124             datalo_reg = TCG_REG_R23;
1125         }
1126         tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1127         tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1128         break;
1129     default:
1130         tcg_abort();
1131     }
1132 
1133 }
1134 
tcg_out_qemu_st(TCGContext * s,const TCGArg * args,int opc)1135 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1136 {
1137     int datalo_reg = *args++;
1138     /* Note that datahi_reg is only used for 64-bit loads.  */
1139     int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1140     int addrlo_reg = *args++;
1141 
1142 #if defined(CONFIG_SOFTMMU)
1143     /* Note that addrhi_reg is only used for 64-bit guests.  */
1144     int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1145     int mem_index = *args;
1146     int lab1, lab2, argreg, offset;
1147 
1148     lab1 = gen_new_label();
1149     lab2 = gen_new_label();
1150 
1151     offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1152     offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1153                               opc, lab1, offset);
1154 
1155     /* TLB Hit.  */
1156     tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1157                offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1158 
1159     /* There are no indexed stores, so we must do this addition explitly.
1160        Careful to avoid R20, which is used for the bswaps to follow.  */
1161     tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1162     tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1163     tcg_out_branch(s, lab2, 1);
1164 
1165     /* TLB Miss.  */
1166     /* label1: */
1167     tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1168 
1169     argreg = TCG_REG_R26;
1170     tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1171     if (TARGET_LONG_BITS == 64) {
1172         tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1173     }
1174 
1175     switch(opc) {
1176     case 0:
1177         tcg_out_andi(s, argreg--, datalo_reg, 0xff);
1178         tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1179         break;
1180     case 1:
1181         tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
1182         tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1183         break;
1184     case 2:
1185         tcg_out_mov(s, TCG_TYPE_I32, argreg--, datalo_reg);
1186         tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1187         break;
1188     case 3:
1189         /* Because of the alignment required by the 64-bit data argument,
1190            we will always use R23/R24.  Also, we will always run out of
1191            argument registers for storing mem_index, so that will have
1192            to go on the stack.  */
1193         if (mem_index == 0) {
1194             argreg = TCG_REG_R0;
1195         } else {
1196             argreg = TCG_REG_R20;
1197             tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1198         }
1199         tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R23, datahi_reg);
1200         tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R24, datalo_reg);
1201         tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_SP,
1202                    TCG_TARGET_CALL_STACK_OFFSET - 4);
1203         break;
1204     default:
1205         tcg_abort();
1206     }
1207 
1208     tcg_out_call(s, qemu_st_helpers[opc]);
1209 
1210     /* label2: */
1211     tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
1212 #else
1213     /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1214        explicitly.  Careful to avoid R20, which is used for the bswaps to follow.  */
1215     if (GUEST_BASE != 0) {
1216         tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1217         addrlo_reg = TCG_REG_R31;
1218     }
1219     tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1220 #endif
1221 }
1222 
tcg_out_exit_tb(TCGContext * s,TCGArg arg)1223 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1224 {
1225     if (!check_fit_tl(arg, 14)) {
1226         uint32_t hi, lo;
1227         hi = arg & ~0x7ff;
1228         lo = arg & 0x7ff;
1229         if (lo) {
1230             tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1231             tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1232             tcg_out_addi(s, TCG_REG_RET0, lo);
1233             return;
1234         }
1235         arg = hi;
1236     }
1237     tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1238     tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1239 }
1240 
tcg_out_goto_tb(TCGContext * s,TCGArg arg)1241 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1242 {
1243     if (s->tb_jmp_offset) {
1244         /* direct jump method */
1245         fprintf(stderr, "goto_tb direct\n");
1246         tcg_abort();
1247     } else {
1248         /* indirect jump method */
1249         tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1250                    (tcg_target_long)(s->tb_next + arg));
1251         tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1252     }
1253     s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1254 }
1255 
tcg_out_op(TCGContext * s,TCGOpcode opc,const TCGArg * args,const int * const_args)1256 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1257                               const int *const_args)
1258 {
1259     switch (opc) {
1260     case INDEX_op_exit_tb:
1261         tcg_out_exit_tb(s, args[0]);
1262         break;
1263     case INDEX_op_goto_tb:
1264         tcg_out_goto_tb(s, args[0]);
1265         break;
1266 
1267     case INDEX_op_call:
1268         if (const_args[0]) {
1269             tcg_out_call(s, (void *)args[0]);
1270         } else {
1271             /* ??? FIXME: the value in the register in args[0] is almost
1272                certainly a procedure descriptor, not a code address.  We
1273                probably need to use the millicode $$dyncall routine.  */
1274             tcg_abort();
1275         }
1276         break;
1277 
1278     case INDEX_op_jmp:
1279         fprintf(stderr, "unimplemented jmp\n");
1280         tcg_abort();
1281         break;
1282 
1283     case INDEX_op_br:
1284         tcg_out_branch(s, args[0], 1);
1285         break;
1286 
1287     case INDEX_op_movi_i32:
1288         tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1289         break;
1290 
1291     case INDEX_op_ld8u_i32:
1292         tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1293         break;
1294     case INDEX_op_ld8s_i32:
1295         tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1296         tcg_out_ext8s(s, args[0], args[0]);
1297         break;
1298     case INDEX_op_ld16u_i32:
1299         tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1300         break;
1301     case INDEX_op_ld16s_i32:
1302         tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1303         tcg_out_ext16s(s, args[0], args[0]);
1304         break;
1305     case INDEX_op_ld_i32:
1306         tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1307         break;
1308 
1309     case INDEX_op_st8_i32:
1310         tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1311         break;
1312     case INDEX_op_st16_i32:
1313         tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1314         break;
1315     case INDEX_op_st_i32:
1316         tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1317         break;
1318 
1319     case INDEX_op_add_i32:
1320         if (const_args[2]) {
1321             tcg_out_addi2(s, args[0], args[1], args[2]);
1322         } else {
1323             tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1324         }
1325         break;
1326 
1327     case INDEX_op_sub_i32:
1328         if (const_args[1]) {
1329             if (const_args[2]) {
1330                 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1331             } else {
1332                 /* Recall that SUBI is a reversed subtract.  */
1333                 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1334             }
1335         } else if (const_args[2]) {
1336             tcg_out_addi2(s, args[0], args[1], -args[2]);
1337         } else {
1338             tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1339         }
1340         break;
1341 
1342     case INDEX_op_and_i32:
1343         if (const_args[2]) {
1344             tcg_out_andi(s, args[0], args[1], args[2]);
1345         } else {
1346             tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1347         }
1348         break;
1349 
1350     case INDEX_op_or_i32:
1351         if (const_args[2]) {
1352             tcg_out_ori(s, args[0], args[1], args[2]);
1353         } else {
1354             tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1355         }
1356         break;
1357 
1358     case INDEX_op_xor_i32:
1359         tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1360         break;
1361 
1362     case INDEX_op_andc_i32:
1363         if (const_args[2]) {
1364             tcg_out_andi(s, args[0], args[1], ~args[2]);
1365         } else {
1366             tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1367         }
1368         break;
1369 
1370     case INDEX_op_shl_i32:
1371         if (const_args[2]) {
1372             tcg_out_shli(s, args[0], args[1], args[2]);
1373         } else {
1374             tcg_out_shl(s, args[0], args[1], args[2]);
1375         }
1376         break;
1377 
1378     case INDEX_op_shr_i32:
1379         if (const_args[2]) {
1380             tcg_out_shri(s, args[0], args[1], args[2]);
1381         } else {
1382             tcg_out_shr(s, args[0], args[1], args[2]);
1383         }
1384         break;
1385 
1386     case INDEX_op_sar_i32:
1387         if (const_args[2]) {
1388             tcg_out_sari(s, args[0], args[1], args[2]);
1389         } else {
1390             tcg_out_sar(s, args[0], args[1], args[2]);
1391         }
1392         break;
1393 
1394     case INDEX_op_rotl_i32:
1395         if (const_args[2]) {
1396             tcg_out_rotli(s, args[0], args[1], args[2]);
1397         } else {
1398             tcg_out_rotl(s, args[0], args[1], args[2]);
1399         }
1400         break;
1401 
1402     case INDEX_op_rotr_i32:
1403         if (const_args[2]) {
1404             tcg_out_rotri(s, args[0], args[1], args[2]);
1405         } else {
1406             tcg_out_rotr(s, args[0], args[1], args[2]);
1407         }
1408         break;
1409 
1410     case INDEX_op_mul_i32:
1411         tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1412         break;
1413     case INDEX_op_mulu2_i32:
1414         tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1415         break;
1416 
1417     case INDEX_op_bswap16_i32:
1418         tcg_out_bswap16(s, args[0], args[1], 0);
1419         break;
1420     case INDEX_op_bswap32_i32:
1421         tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1422         break;
1423 
1424     case INDEX_op_not_i32:
1425         tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1426         break;
1427     case INDEX_op_ext8s_i32:
1428         tcg_out_ext8s(s, args[0], args[1]);
1429         break;
1430     case INDEX_op_ext16s_i32:
1431         tcg_out_ext16s(s, args[0], args[1]);
1432         break;
1433 
1434     case INDEX_op_brcond_i32:
1435         tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1436         break;
1437     case INDEX_op_brcond2_i32:
1438         tcg_out_brcond2(s, args[4], args[0], args[1],
1439                         args[2], const_args[2],
1440                         args[3], const_args[3], args[5]);
1441         break;
1442 
1443     case INDEX_op_setcond_i32:
1444         tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1445         break;
1446     case INDEX_op_setcond2_i32:
1447         tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1448                          args[3], const_args[3], args[4], const_args[4]);
1449         break;
1450 
1451     case INDEX_op_add2_i32:
1452         tcg_out_add2(s, args[0], args[1], args[2], args[3],
1453                      args[4], args[5], const_args[4]);
1454         break;
1455 
1456     case INDEX_op_sub2_i32:
1457         tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1458                      args[4], args[5], const_args[2], const_args[4]);
1459         break;
1460 
1461     case INDEX_op_qemu_ld8u:
1462         tcg_out_qemu_ld(s, args, 0);
1463         break;
1464     case INDEX_op_qemu_ld8s:
1465         tcg_out_qemu_ld(s, args, 0 | 4);
1466         break;
1467     case INDEX_op_qemu_ld16u:
1468         tcg_out_qemu_ld(s, args, 1);
1469         break;
1470     case INDEX_op_qemu_ld16s:
1471         tcg_out_qemu_ld(s, args, 1 | 4);
1472         break;
1473     case INDEX_op_qemu_ld32:
1474         tcg_out_qemu_ld(s, args, 2);
1475         break;
1476     case INDEX_op_qemu_ld64:
1477         tcg_out_qemu_ld(s, args, 3);
1478         break;
1479 
1480     case INDEX_op_qemu_st8:
1481         tcg_out_qemu_st(s, args, 0);
1482         break;
1483     case INDEX_op_qemu_st16:
1484         tcg_out_qemu_st(s, args, 1);
1485         break;
1486     case INDEX_op_qemu_st32:
1487         tcg_out_qemu_st(s, args, 2);
1488         break;
1489     case INDEX_op_qemu_st64:
1490         tcg_out_qemu_st(s, args, 3);
1491         break;
1492 
1493     default:
1494         fprintf(stderr, "unknown opcode 0x%x\n", opc);
1495         tcg_abort();
1496     }
1497 }
1498 
1499 static const TCGTargetOpDef hppa_op_defs[] = {
1500     { INDEX_op_exit_tb, { } },
1501     { INDEX_op_goto_tb, { } },
1502 
1503     { INDEX_op_call, { "ri" } },
1504     { INDEX_op_jmp, { "r" } },
1505     { INDEX_op_br, { } },
1506 
1507     { INDEX_op_mov_i32, { "r", "r" } },
1508     { INDEX_op_movi_i32, { "r" } },
1509 
1510     { INDEX_op_ld8u_i32, { "r", "r" } },
1511     { INDEX_op_ld8s_i32, { "r", "r" } },
1512     { INDEX_op_ld16u_i32, { "r", "r" } },
1513     { INDEX_op_ld16s_i32, { "r", "r" } },
1514     { INDEX_op_ld_i32, { "r", "r" } },
1515     { INDEX_op_st8_i32, { "rZ", "r" } },
1516     { INDEX_op_st16_i32, { "rZ", "r" } },
1517     { INDEX_op_st_i32, { "rZ", "r" } },
1518 
1519     { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1520     { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1521     { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1522     { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1523     { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1524     /* Note that the second argument will be inverted, which means
1525        we want a constant whose inversion matches M, and that O = ~M.
1526        See the implementation of and_mask_p.  */
1527     { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1528 
1529     { INDEX_op_mul_i32, { "r", "r", "r" } },
1530     { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1531 
1532     { INDEX_op_shl_i32, { "r", "r", "ri" } },
1533     { INDEX_op_shr_i32, { "r", "r", "ri" } },
1534     { INDEX_op_sar_i32, { "r", "r", "ri" } },
1535     { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1536     { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1537 
1538     { INDEX_op_bswap16_i32, { "r", "r" } },
1539     { INDEX_op_bswap32_i32, { "r", "r" } },
1540     { INDEX_op_not_i32, { "r", "r" } },
1541 
1542     { INDEX_op_ext8s_i32, { "r", "r" } },
1543     { INDEX_op_ext16s_i32, { "r", "r" } },
1544 
1545     { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1546     { INDEX_op_brcond2_i32,  { "rZ", "rZ", "rJ", "rJ" } },
1547 
1548     { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1549     { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1550 
1551     { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1552     { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1553 
1554 #if TARGET_LONG_BITS == 32
1555     { INDEX_op_qemu_ld8u, { "r", "L" } },
1556     { INDEX_op_qemu_ld8s, { "r", "L" } },
1557     { INDEX_op_qemu_ld16u, { "r", "L" } },
1558     { INDEX_op_qemu_ld16s, { "r", "L" } },
1559     { INDEX_op_qemu_ld32, { "r", "L" } },
1560     { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1561 
1562     { INDEX_op_qemu_st8, { "LZ", "L" } },
1563     { INDEX_op_qemu_st16, { "LZ", "L" } },
1564     { INDEX_op_qemu_st32, { "LZ", "L" } },
1565     { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1566 #else
1567     { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1568     { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1569     { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1570     { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1571     { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1572     { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1573 
1574     { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1575     { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1576     { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1577     { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1578 #endif
1579     { -1 },
1580 };
1581 
1582 static int tcg_target_callee_save_regs[] = {
1583     /* R2, the return address register, is saved specially
1584        in the caller's frame.  */
1585     /* R3, the frame pointer, is not currently modified.  */
1586     TCG_REG_R4,
1587     TCG_REG_R5,
1588     TCG_REG_R6,
1589     TCG_REG_R7,
1590     TCG_REG_R8,
1591     TCG_REG_R9,
1592     TCG_REG_R10,
1593     TCG_REG_R11,
1594     TCG_REG_R12,
1595     TCG_REG_R13,
1596     TCG_REG_R14,
1597     TCG_REG_R15,
1598     TCG_REG_R16,
1599     /* R17 is the global env, so no need to save.  */
1600     TCG_REG_R18
1601 };
1602 
tcg_target_qemu_prologue(TCGContext * s)1603 static void tcg_target_qemu_prologue(TCGContext *s)
1604 {
1605     int frame_size, i;
1606 
1607     /* Allocate space for the fixed frame marker.  */
1608     frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1609     frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1610 
1611     /* Allocate space for the saved registers.  */
1612     frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1613 
1614     /* Align the allocated space.  */
1615     frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1616                   & -TCG_TARGET_STACK_ALIGN);
1617 
1618     /* The return address is stored in the caller's frame.  */
1619     tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -20);
1620 
1621     /* Allocate stack frame, saving the first register at the same time.  */
1622     tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1623                  TCG_REG_SP, frame_size, INSN_STWM);
1624 
1625     /* Save all callee saved registers.  */
1626     for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1627         tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1628                    TCG_REG_SP, -frame_size + i * 4);
1629     }
1630 
1631 #ifdef CONFIG_USE_GUEST_BASE
1632     if (GUEST_BASE != 0) {
1633         tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1634         tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1635     }
1636 #endif
1637 
1638     /* Jump to TB, and adjust R18 to be the return address.  */
1639     tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R26));
1640     tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1641 
1642     /* Restore callee saved registers.  */
1643     tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_SP, -frame_size - 20);
1644     for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1645         tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1646                    TCG_REG_SP, -frame_size + i * 4);
1647     }
1648 
1649     /* Deallocate stack frame and return.  */
1650     tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1651     tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1652                  TCG_REG_SP, -frame_size, INSN_LDWM);
1653 }
1654 
tcg_target_init(TCGContext * s)1655 static void tcg_target_init(TCGContext *s)
1656 {
1657     tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1658 
1659     tcg_regset_clear(tcg_target_call_clobber_regs);
1660     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1661     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1662     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1663     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1664     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1665     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1666     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1667     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1668     tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1669 
1670     tcg_regset_clear(s->reserved_regs);
1671     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0);  /* hardwired to zero */
1672     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1);  /* addil target */
1673     tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP);  /* link register */
1674     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3);  /* frame pointer */
1675     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1676     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1677     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1678     tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP);  /* data pointer */
1679     tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);  /* stack pointer */
1680     tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1681 
1682     tcg_add_target_add_op_defs(hppa_op_defs);
1683 }
1684