1
2 /*---------------------------------------------------------------*/
3 /*--- begin host_mips_defs.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2010-2013 RT-RK
11 mips-valgrind@rt-rk.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #include "libvex_basictypes.h"
32 #include "libvex.h"
33 #include "libvex_trc_values.h"
34
35 #include "main_util.h"
36 #include "host_generic_regs.h"
37 #include "host_mips_defs.h"
38
39 /* guest_COND offset. */
40 #define COND_OFFSET(__mode64) (__mode64 ? 612 : 448)
41
42 /* Register number for guest state pointer in host code. */
43 #define GuestSP 23
44
45 #define MkHRegGPR(_n, _mode64) \
46 mkHReg(_n, _mode64 ? HRcInt64 : HRcInt32, False)
47
48 #define MkHRegFPR(_n, _mode64) \
49 mkHReg(_n, _mode64 ? HRcFlt64 : HRcFlt32, False)
50
51 /*---------------- Registers ----------------*/
52
ppHRegMIPS(HReg reg,Bool mode64)53 void ppHRegMIPS(HReg reg, Bool mode64)
54 {
55 Int r;
56 static const HChar *ireg32_names[35]
57 = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
58 "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
59 "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
60 "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
61 "%32", "%33", "%34",
62 };
63
64 static const HChar *freg32_names[32]
65 = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
66 "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
67 "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
68 "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
69 };
70
71 static const HChar *freg64_names[32]
72 = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
73 "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
74 };
75
76 /* Be generic for all virtual regs. */
77 if (hregIsVirtual(reg)) {
78 ppHReg(reg);
79 return;
80 }
81
82 /* But specific for real regs. */
83 vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
84 hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64);
85
86 /* But specific for real regs. */
87 switch (hregClass(reg)) {
88 case HRcInt32:
89 r = hregNumber(reg);
90 vassert(r >= 0 && r < 32);
91 vex_printf("%s", ireg32_names[r]);
92 return;
93 case HRcInt64:
94 r = hregNumber (reg);
95 vassert (r >= 0 && r < 32);
96 vex_printf ("%s", ireg32_names[r]);
97 return;
98 case HRcFlt32:
99 r = hregNumber(reg);
100 vassert(r >= 0 && r < 32);
101 vex_printf("%s", freg32_names[r]);
102 return;
103 case HRcFlt64:
104 r = hregNumber(reg);
105 vassert(r >= 0 && r < 32);
106 vex_printf("%s", freg64_names[r]);
107 return;
108 default:
109 vpanic("ppHRegMIPS");
110 break;
111 }
112
113 return;
114 }
115
hregMIPS_GPR0(Bool mode64)116 HReg hregMIPS_GPR0(Bool mode64)
117 {
118 return MkHRegGPR(0, mode64);
119 }
120
hregMIPS_GPR1(Bool mode64)121 HReg hregMIPS_GPR1(Bool mode64)
122 {
123 return MkHRegGPR(1, mode64);
124 }
125
hregMIPS_GPR2(Bool mode64)126 HReg hregMIPS_GPR2(Bool mode64)
127 {
128 return MkHRegGPR(2, mode64);
129 }
130
hregMIPS_GPR3(Bool mode64)131 HReg hregMIPS_GPR3(Bool mode64)
132 {
133 return MkHRegGPR(3, mode64);
134 }
135
hregMIPS_GPR4(Bool mode64)136 HReg hregMIPS_GPR4(Bool mode64)
137 {
138 return MkHRegGPR(4, mode64);
139 }
140
hregMIPS_GPR5(Bool mode64)141 HReg hregMIPS_GPR5(Bool mode64)
142 {
143 return MkHRegGPR(5, mode64);
144 }
145
hregMIPS_GPR6(Bool mode64)146 HReg hregMIPS_GPR6(Bool mode64)
147 {
148 return MkHRegGPR(6, mode64);
149 }
150
hregMIPS_GPR7(Bool mode64)151 HReg hregMIPS_GPR7(Bool mode64)
152 {
153 return MkHRegGPR(7, mode64);
154 }
155
hregMIPS_GPR8(Bool mode64)156 HReg hregMIPS_GPR8(Bool mode64)
157 {
158 return MkHRegGPR(8, mode64);
159 }
160
hregMIPS_GPR9(Bool mode64)161 HReg hregMIPS_GPR9(Bool mode64)
162 {
163 return MkHRegGPR(9, mode64);
164 }
165
hregMIPS_GPR10(Bool mode64)166 HReg hregMIPS_GPR10(Bool mode64)
167 {
168 return MkHRegGPR(10, mode64);
169 }
170
hregMIPS_GPR11(Bool mode64)171 HReg hregMIPS_GPR11(Bool mode64)
172 {
173 return MkHRegGPR(11, mode64);
174 }
175
hregMIPS_GPR12(Bool mode64)176 HReg hregMIPS_GPR12(Bool mode64)
177 {
178 return MkHRegGPR(12, mode64);
179 }
180
hregMIPS_GPR13(Bool mode64)181 HReg hregMIPS_GPR13(Bool mode64)
182 {
183 return MkHRegGPR(13, mode64);
184 }
185
hregMIPS_GPR14(Bool mode64)186 HReg hregMIPS_GPR14(Bool mode64)
187 {
188 return MkHRegGPR(14, mode64);
189 }
190
hregMIPS_GPR15(Bool mode64)191 HReg hregMIPS_GPR15(Bool mode64)
192 {
193 return MkHRegGPR(15, mode64);
194 }
195
hregMIPS_GPR16(Bool mode64)196 HReg hregMIPS_GPR16(Bool mode64)
197 {
198 return MkHRegGPR(16, mode64);
199 }
200
hregMIPS_GPR17(Bool mode64)201 HReg hregMIPS_GPR17(Bool mode64)
202 {
203 return MkHRegGPR(17, mode64);
204 }
205
hregMIPS_GPR18(Bool mode64)206 HReg hregMIPS_GPR18(Bool mode64)
207 {
208 return MkHRegGPR(18, mode64);
209 }
210
hregMIPS_GPR19(Bool mode64)211 HReg hregMIPS_GPR19(Bool mode64)
212 {
213 return MkHRegGPR(19, mode64);
214 }
215
hregMIPS_GPR20(Bool mode64)216 HReg hregMIPS_GPR20(Bool mode64)
217 {
218 return MkHRegGPR(20, mode64);
219 }
220
hregMIPS_GPR21(Bool mode64)221 HReg hregMIPS_GPR21(Bool mode64)
222 {
223 return MkHRegGPR(21, mode64);
224 }
225
hregMIPS_GPR22(Bool mode64)226 HReg hregMIPS_GPR22(Bool mode64)
227 {
228 return MkHRegGPR(22, mode64);
229 }
230
hregMIPS_GPR23(Bool mode64)231 HReg hregMIPS_GPR23(Bool mode64)
232 {
233 return MkHRegGPR(23, mode64);
234 }
235
hregMIPS_GPR24(Bool mode64)236 HReg hregMIPS_GPR24(Bool mode64)
237 {
238 return MkHRegGPR(24, mode64);
239 }
240
hregMIPS_GPR25(Bool mode64)241 HReg hregMIPS_GPR25(Bool mode64)
242 {
243 return MkHRegGPR(25, mode64);
244 }
245
hregMIPS_GPR26(Bool mode64)246 HReg hregMIPS_GPR26(Bool mode64)
247 {
248 return MkHRegGPR(26, mode64);
249 }
250
hregMIPS_GPR27(Bool mode64)251 HReg hregMIPS_GPR27(Bool mode64)
252 {
253 return MkHRegGPR(27, mode64);
254 }
255
hregMIPS_GPR28(Bool mode64)256 HReg hregMIPS_GPR28(Bool mode64)
257 {
258 return MkHRegGPR(28, mode64);
259 }
260
hregMIPS_GPR29(Bool mode64)261 HReg hregMIPS_GPR29(Bool mode64)
262 {
263 return MkHRegGPR(29, mode64);
264 }
265
hregMIPS_GPR30(Bool mode64)266 HReg hregMIPS_GPR30(Bool mode64)
267 {
268 return MkHRegGPR(30, mode64);
269 }
270
hregMIPS_GPR31(Bool mode64)271 HReg hregMIPS_GPR31(Bool mode64)
272 {
273 return MkHRegGPR(31, mode64);
274 }
275
hregMIPS_F0(Bool mode64)276 HReg hregMIPS_F0(Bool mode64)
277 {
278 return MkHRegFPR(0, mode64);
279 }
280
hregMIPS_F1(Bool mode64)281 HReg hregMIPS_F1(Bool mode64)
282 {
283 return MkHRegFPR(1, mode64);
284 }
285
hregMIPS_F2(Bool mode64)286 HReg hregMIPS_F2(Bool mode64)
287 {
288 return MkHRegFPR(2, mode64);
289 }
290
hregMIPS_F3(Bool mode64)291 HReg hregMIPS_F3(Bool mode64)
292 {
293 return MkHRegFPR(3, mode64);
294 }
295
hregMIPS_F4(Bool mode64)296 HReg hregMIPS_F4(Bool mode64)
297 {
298 return MkHRegFPR(4, mode64);
299 }
300
hregMIPS_F5(Bool mode64)301 HReg hregMIPS_F5(Bool mode64)
302 {
303 return MkHRegFPR(5, mode64);
304 }
305
hregMIPS_F6(Bool mode64)306 HReg hregMIPS_F6(Bool mode64)
307 {
308 return MkHRegFPR(6, mode64);
309 }
310
hregMIPS_F7(Bool mode64)311 HReg hregMIPS_F7(Bool mode64)
312 {
313 return MkHRegFPR(7, mode64);
314 }
315
hregMIPS_F8(Bool mode64)316 HReg hregMIPS_F8(Bool mode64)
317 {
318 return MkHRegFPR(8, mode64);
319 }
320
hregMIPS_F9(Bool mode64)321 HReg hregMIPS_F9(Bool mode64)
322 {
323 return MkHRegFPR(9, mode64);
324 }
325
hregMIPS_F10(Bool mode64)326 HReg hregMIPS_F10(Bool mode64)
327 {
328 return MkHRegFPR(10, mode64);
329 }
330
hregMIPS_F11(Bool mode64)331 HReg hregMIPS_F11(Bool mode64)
332 {
333 return MkHRegFPR(11, mode64);
334 }
335
hregMIPS_F12(Bool mode64)336 HReg hregMIPS_F12(Bool mode64)
337 {
338 return MkHRegFPR(12, mode64);
339 }
340
hregMIPS_F13(Bool mode64)341 HReg hregMIPS_F13(Bool mode64)
342 {
343 return MkHRegFPR(13, mode64);
344 }
345
hregMIPS_F14(Bool mode64)346 HReg hregMIPS_F14(Bool mode64)
347 {
348 return MkHRegFPR(14, mode64);
349 }
350
hregMIPS_F15(Bool mode64)351 HReg hregMIPS_F15(Bool mode64)
352 {
353 return MkHRegFPR(15, mode64);
354 }
355
hregMIPS_F16(Bool mode64)356 HReg hregMIPS_F16(Bool mode64)
357 {
358 return MkHRegFPR(16, mode64);
359 }
360
hregMIPS_F17(Bool mode64)361 HReg hregMIPS_F17(Bool mode64)
362 {
363 return MkHRegFPR(17, mode64);
364 }
365
hregMIPS_F18(Bool mode64)366 HReg hregMIPS_F18(Bool mode64)
367 {
368 return MkHRegFPR(18, mode64);
369 }
370
hregMIPS_F19(Bool mode64)371 HReg hregMIPS_F19(Bool mode64)
372 {
373 return MkHRegFPR(19, mode64);
374 }
375
hregMIPS_F20(Bool mode64)376 HReg hregMIPS_F20(Bool mode64)
377 {
378 return MkHRegFPR(20, mode64);
379 }
380
hregMIPS_F21(Bool mode64)381 HReg hregMIPS_F21(Bool mode64)
382 {
383 return MkHRegFPR(21, mode64);
384 }
385
hregMIPS_F22(Bool mode64)386 HReg hregMIPS_F22(Bool mode64)
387 {
388 return MkHRegFPR(22, mode64);
389 }
390
hregMIPS_F23(Bool mode64)391 HReg hregMIPS_F23(Bool mode64)
392 {
393 return MkHRegFPR(23, mode64);
394 }
395
hregMIPS_F24(Bool mode64)396 HReg hregMIPS_F24(Bool mode64)
397 {
398 return MkHRegFPR(24, mode64);
399 }
400
hregMIPS_F25(Bool mode64)401 HReg hregMIPS_F25(Bool mode64)
402 {
403 return MkHRegFPR(25, mode64);
404 }
405
hregMIPS_F26(Bool mode64)406 HReg hregMIPS_F26(Bool mode64)
407 {
408 return MkHRegFPR(26, mode64);
409 }
410
hregMIPS_F27(Bool mode64)411 HReg hregMIPS_F27(Bool mode64)
412 {
413 return MkHRegFPR(27, mode64);
414 }
415
hregMIPS_F28(Bool mode64)416 HReg hregMIPS_F28(Bool mode64)
417 {
418 return MkHRegFPR(28, mode64);
419 }
420
hregMIPS_F29(Bool mode64)421 HReg hregMIPS_F29(Bool mode64)
422 {
423 return MkHRegFPR(29, mode64);
424 }
425
hregMIPS_F30(Bool mode64)426 HReg hregMIPS_F30(Bool mode64)
427 {
428 return MkHRegFPR(30, mode64);
429 }
430
hregMIPS_F31(Bool mode64)431 HReg hregMIPS_F31(Bool mode64)
432 {
433 return MkHRegFPR(31, mode64);
434 }
435
hregMIPS_PC(Bool mode64)436 HReg hregMIPS_PC(Bool mode64)
437 {
438 return mkHReg(32, mode64 ? HRcFlt64 : HRcFlt32, False);
439 }
440
hregMIPS_HI(Bool mode64)441 HReg hregMIPS_HI(Bool mode64)
442 {
443 return mkHReg(33, mode64 ? HRcFlt64 : HRcFlt32, False);
444 }
445
hregMIPS_LO(Bool mode64)446 HReg hregMIPS_LO(Bool mode64)
447 {
448 return mkHReg(34, mode64 ? HRcFlt64 : HRcFlt32, False);
449 }
450
hregMIPS_D0(void)451 HReg hregMIPS_D0(void)
452 {
453 return mkHReg(0, HRcFlt64, False);
454 }
455
hregMIPS_D1(void)456 HReg hregMIPS_D1(void)
457 {
458 return mkHReg(2, HRcFlt64, False);
459 }
460
hregMIPS_D2(void)461 HReg hregMIPS_D2(void)
462 {
463 return mkHReg(4, HRcFlt64, False);
464 }
465
hregMIPS_D3(void)466 HReg hregMIPS_D3(void)
467 {
468 return mkHReg(6, HRcFlt64, False);
469 }
470
hregMIPS_D4(void)471 HReg hregMIPS_D4(void)
472 {
473 return mkHReg(8, HRcFlt64, False);
474 }
475
hregMIPS_D5(void)476 HReg hregMIPS_D5(void)
477 {
478 return mkHReg(10, HRcFlt64, False);
479 }
480
hregMIPS_D6(void)481 HReg hregMIPS_D6(void)
482 {
483 return mkHReg(12, HRcFlt64, False);
484 }
485
hregMIPS_D7(void)486 HReg hregMIPS_D7(void)
487 {
488 return mkHReg(14, HRcFlt64, False);
489 }
490
hregMIPS_D8(void)491 HReg hregMIPS_D8(void)
492 {
493 return mkHReg(16, HRcFlt64, False);
494 }
495
hregMIPS_D9(void)496 HReg hregMIPS_D9(void)
497 {
498 return mkHReg(18, HRcFlt64, False);
499 }
500
hregMIPS_D10(void)501 HReg hregMIPS_D10(void)
502 {
503 return mkHReg(20, HRcFlt64, False);
504 }
505
hregMIPS_D11(void)506 HReg hregMIPS_D11(void)
507 {
508 return mkHReg(22, HRcFlt64, False);
509 }
510
hregMIPS_D12(void)511 HReg hregMIPS_D12(void)
512 {
513 return mkHReg(24, HRcFlt64, False);
514 }
515
hregMIPS_D13(void)516 HReg hregMIPS_D13(void)
517 {
518 return mkHReg(26, HRcFlt64, False);
519 }
520
hregMIPS_D14(void)521 HReg hregMIPS_D14(void)
522 {
523 return mkHReg(28, HRcFlt64, False);
524 }
525
hregMIPS_D15(void)526 HReg hregMIPS_D15(void)
527 {
528 return mkHReg(30, HRcFlt64, False);
529 }
530
hregMIPS_FIR(void)531 HReg hregMIPS_FIR(void)
532 {
533 return mkHReg(35, HRcInt32, False);
534 }
535
hregMIPS_FCCR(void)536 HReg hregMIPS_FCCR(void)
537 {
538 return mkHReg(36, HRcInt32, False);
539 }
540
hregMIPS_FEXR(void)541 HReg hregMIPS_FEXR(void)
542 {
543 return mkHReg(37, HRcInt32, False);
544 }
545
hregMIPS_FENR(void)546 HReg hregMIPS_FENR(void)
547 {
548 return mkHReg(38, HRcInt32, False);
549 }
550
hregMIPS_FCSR(void)551 HReg hregMIPS_FCSR(void)
552 {
553 return mkHReg(39, HRcInt32, False);
554 }
555
hregMIPS_COND(void)556 HReg hregMIPS_COND(void)
557 {
558 return mkHReg(47, HRcInt32, False);
559 }
560
getAllocableRegs_MIPS(Int * nregs,HReg ** arr,Bool mode64)561 void getAllocableRegs_MIPS(Int * nregs, HReg ** arr, Bool mode64)
562 {
563 /* The list of allocable registers is shorten to fit MIPS32 mode on Loongson.
564 More precisely, we workaround Loongson MIPS32 issues by avoiding usage of
565 odd single precision FP registers. */
566 if (mode64)
567 *nregs = 20;
568 else
569 *nregs = 28;
570 UInt i = 0;
571 *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
572
573 /* ZERO = constant 0
574 AT = assembler temporary
575 callee saves ones are listed first, since we prefer them
576 if they're available */
577 (*arr)[i++] = hregMIPS_GPR16(mode64);
578 (*arr)[i++] = hregMIPS_GPR17(mode64);
579 (*arr)[i++] = hregMIPS_GPR18(mode64);
580 (*arr)[i++] = hregMIPS_GPR19(mode64);
581 (*arr)[i++] = hregMIPS_GPR20(mode64);
582 (*arr)[i++] = hregMIPS_GPR21(mode64);
583 (*arr)[i++] = hregMIPS_GPR22(mode64);
584
585 (*arr)[i++] = hregMIPS_GPR12(mode64);
586 (*arr)[i++] = hregMIPS_GPR13(mode64);
587 (*arr)[i++] = hregMIPS_GPR14(mode64);
588 (*arr)[i++] = hregMIPS_GPR15(mode64);
589 (*arr)[i++] = hregMIPS_GPR24(mode64);
590 /* s7 (=guest_state) */
591 (*arr)[i++] = hregMIPS_F16(mode64);
592 (*arr)[i++] = hregMIPS_F18(mode64);
593 (*arr)[i++] = hregMIPS_F20(mode64);
594 (*arr)[i++] = hregMIPS_F22(mode64);
595 (*arr)[i++] = hregMIPS_F24(mode64);
596 (*arr)[i++] = hregMIPS_F26(mode64);
597 (*arr)[i++] = hregMIPS_F28(mode64);
598 (*arr)[i++] = hregMIPS_F30(mode64);
599 if (!mode64) {
600 /* Fake double floating point */
601 (*arr)[i++] = hregMIPS_D0();
602 (*arr)[i++] = hregMIPS_D1();
603 (*arr)[i++] = hregMIPS_D2();
604 (*arr)[i++] = hregMIPS_D3();
605 (*arr)[i++] = hregMIPS_D4();
606 (*arr)[i++] = hregMIPS_D5();
607 (*arr)[i++] = hregMIPS_D6();
608 (*arr)[i++] = hregMIPS_D7();
609 }
610 vassert(i == *nregs);
611
612 }
613
614 /*----------------- Condition Codes ----------------------*/
615
showMIPSCondCode(MIPSCondCode cond)616 const HChar *showMIPSCondCode(MIPSCondCode cond)
617 {
618 const HChar* ret;
619 switch (cond) {
620 case MIPScc_EQ:
621 ret = "EQ"; /* equal */
622 break;
623 case MIPScc_NE:
624 ret = "NEQ"; /* not equal */
625 break;
626 case MIPScc_HS:
627 ret = "GE"; /* >=u (Greater Than or Equal) */
628 break;
629 case MIPScc_LO:
630 ret = "LT"; /* <u (lower) */
631 break;
632 case MIPScc_MI:
633 ret = "MI"; /* minus (negative) */
634 break;
635 case MIPScc_PL:
636 ret = "PL"; /* plus (zero or +ve) */
637 break;
638 case MIPScc_VS:
639 ret = "VS"; /* overflow */
640 break;
641 case MIPScc_VC:
642 ret = "VC"; /* no overflow */
643 break;
644 case MIPScc_HI:
645 ret = "HI"; /* >u (higher) */
646 break;
647 case MIPScc_LS:
648 ret = "LS"; /* <=u (lower or same) */
649 break;
650 case MIPScc_GE:
651 ret = "GE"; /* >=s (signed greater or equal) */
652 break;
653 case MIPScc_LT:
654 ret = "LT"; /* <s (signed less than) */
655 break;
656 case MIPScc_GT:
657 ret = "GT"; /* >s (signed greater) */
658 break;
659 case MIPScc_LE:
660 ret = "LE"; /* <=s (signed less or equal) */
661 break;
662 case MIPScc_AL:
663 ret = "AL"; /* always (unconditional) */
664 break;
665 case MIPScc_NV:
666 ret = "NV"; /* never (unconditional): */
667 break;
668 default:
669 vpanic("showMIPSCondCode");
670 break;
671 }
672 return ret;
673 }
674
showMIPSFpOp(MIPSFpOp op)675 const HChar *showMIPSFpOp(MIPSFpOp op)
676 {
677 const HChar *ret;
678 switch (op) {
679 case Mfp_ADDD:
680 ret = "add.d";
681 break;
682 case Mfp_SUBD:
683 ret = "sub.d";
684 break;
685 case Mfp_MULD:
686 ret = "mul.d";
687 break;
688 case Mfp_DIVD:
689 ret = "div.d";
690 break;
691 case Mfp_MADDD:
692 ret = "madd.d";
693 break;
694 case Mfp_MSUBD:
695 ret = "msub.d";
696 break;
697 case Mfp_MADDS:
698 ret = "madd.s";
699 break;
700 case Mfp_MSUBS:
701 ret = "msub.s";
702 break;
703 case Mfp_ADDS:
704 ret = "add.s";
705 break;
706 case Mfp_SUBS:
707 ret = "sub.s";
708 break;
709 case Mfp_MULS:
710 ret = "mul.s";
711 break;
712 case Mfp_DIVS:
713 ret = "div.s";
714 break;
715 case Mfp_SQRTS:
716 ret = "sqrt.s";
717 break;
718 case Mfp_SQRTD:
719 ret = "sqrt.d";
720 break;
721 case Mfp_ABSS:
722 ret = "abs.s";
723 break;
724 case Mfp_ABSD:
725 ret = "abs.d";
726 break;
727 case Mfp_NEGS:
728 ret = "neg.s";
729 break;
730 case Mfp_NEGD:
731 ret = "neg.d";
732 break;
733 case Mfp_MOVS:
734 ret = "mov.s";
735 break;
736 case Mfp_MOVD:
737 ret = "mov.d";
738 break;
739 case Mfp_ROUNDWS:
740 ret = "round.w.s";
741 break;
742 case Mfp_ROUNDWD:
743 ret = "round.w.d";
744 break;
745 case Mfp_ROUNDLD:
746 ret = "round.l.d";
747 break;
748 case Mfp_FLOORWS:
749 ret = "floor.w.s";
750 break;
751 case Mfp_FLOORWD:
752 ret = "floor.w.d";
753 break;
754 case Mfp_CVTDW:
755 ret = "cvt.d.w";
756 break;
757 case Mfp_CVTDL:
758 ret = "cvt.d.l";
759 break;
760 case Mfp_CVTDS:
761 ret = "cvt.d.s";
762 break;
763 case Mfp_CVTSD:
764 ret = "cvt.s.d";
765 break;
766 case Mfp_CVTSW:
767 ret = "cvt.s.w";
768 break;
769 case Mfp_CVTWS:
770 ret = "cvt.w.s";
771 break;
772 case Mfp_CVTWD:
773 ret = "cvt.w.d";
774 break;
775 case Mfp_CVTLD:
776 ret = "cvt.l.d";
777 break;
778 case Mfp_CVTLS:
779 ret = "cvt.l.s";
780 break;
781 case Mfp_TRUWD:
782 ret = "trunc.w.d";
783 break;
784 case Mfp_TRUWS:
785 ret = "trunc.w.s";
786 break;
787 case Mfp_TRULD:
788 ret = "trunc.l.d";
789 break;
790 case Mfp_TRULS:
791 ret = "trunc.l.s";
792 break;
793 case Mfp_CEILWS:
794 ret = "ceil.w.s";
795 break;
796 case Mfp_CEILWD:
797 ret = "ceil.w.d";
798 break;
799 case Mfp_CEILLS:
800 ret = "ceil.l.s";
801 break;
802 case Mfp_CEILLD:
803 ret = "ceil.l.d";
804 break;
805 case Mfp_CMP_UN:
806 ret = "c.un.d";
807 break;
808 case Mfp_CMP_EQ:
809 ret = "c.eq.d";
810 break;
811 case Mfp_CMP_LT:
812 ret = "c.lt.d";
813 break;
814 case Mfp_CMP_NGT:
815 ret = "c.ngt.d";
816 break;
817 default:
818 vex_printf("Unknown op: %d", op);
819 vpanic("showMIPSFpOp");
820 break;
821 }
822 return ret;
823 }
824
825 /* Show move from/to fpr to/from gpr */
showMIPSFpGpMoveOp(MIPSFpGpMoveOp op)826 const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op )
827 {
828 const HChar *ret;
829 switch (op) {
830 case MFpGpMove_mfc1:
831 ret = "mfc1";
832 break;
833 case MFpGpMove_dmfc1:
834 ret = "dmfc1";
835 break;
836 case MFpGpMove_mtc1:
837 ret = "mtc1";
838 break;
839 case MFpGpMove_dmtc1:
840 ret = "dmtc1";
841 break;
842 default:
843 vpanic("showMIPSFpGpMoveOp");
844 break;
845 }
846 return ret;
847 }
848
849 /* Show floating point move conditional */
showMIPSMoveCondOp(MIPSMoveCondOp op)850 const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op )
851 {
852 const HChar *ret;
853 switch (op) {
854 case MFpMoveCond_movns:
855 ret = "movn.s";
856 break;
857 case MFpMoveCond_movnd:
858 ret = "movn.d";
859 break;
860 case MMoveCond_movn:
861 ret = "movn";
862 break;
863 default:
864 vpanic("showMIPSFpMoveCondOp");
865 break;
866 }
867 return ret;
868 }
869
870 /* --------- MIPSAMode: memory address expressions. --------- */
871
MIPSAMode_IR(Int idx,HReg base)872 MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
873 {
874 MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
875 am->tag = Mam_IR;
876 am->Mam.IR.base = base;
877 am->Mam.IR.index = idx;
878
879 return am;
880 }
881
MIPSAMode_RR(HReg idx,HReg base)882 MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
883 {
884 MIPSAMode *am = LibVEX_Alloc(sizeof(MIPSAMode));
885 am->tag = Mam_RR;
886 am->Mam.RR.base = base;
887 am->Mam.RR.index = idx;
888
889 return am;
890 }
891
dopyMIPSAMode(MIPSAMode * am)892 MIPSAMode *dopyMIPSAMode(MIPSAMode * am)
893 {
894 MIPSAMode* ret;
895 switch (am->tag) {
896 case Mam_IR:
897 ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base);
898 break;
899 case Mam_RR:
900 ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base);
901 break;
902 default:
903 vpanic("dopyMIPSAMode");
904 break;
905 }
906 return ret;
907 }
908
nextMIPSAModeFloat(MIPSAMode * am)909 MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am)
910 {
911 MIPSAMode* ret;
912 switch (am->tag) {
913 case Mam_IR:
914 ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
915 break;
916 case Mam_RR:
917 ret = MIPSAMode_RR(mkHReg(hregNumber(am->Mam.RR.index) + 1,
918 hregClass(am->Mam.RR.index),
919 hregIsVirtual(am->Mam.RR.index)),
920 am->Mam.RR.base);
921 break;
922 default:
923 vpanic("dopyMIPSAMode");
924 break;
925 }
926 return ret;
927 }
928
nextMIPSAModeInt(MIPSAMode * am)929 MIPSAMode *nextMIPSAModeInt(MIPSAMode * am)
930 {
931 MIPSAMode* ret;
932 switch (am->tag) {
933 case Mam_IR:
934 ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
935 break;
936 case Mam_RR:
937 ret = MIPSAMode_RR(mkHReg(hregNumber(am->Mam.RR.index) + 1,
938 hregClass(am->Mam.RR.index),
939 hregIsVirtual(am->Mam.RR.index)),
940 am->Mam.RR.base);
941 break;
942 default:
943 vpanic("dopyMIPSAMode");
944 break;
945 }
946 return ret;
947 }
948
ppMIPSAMode(MIPSAMode * am,Bool mode64)949 void ppMIPSAMode(MIPSAMode * am, Bool mode64)
950 {
951 switch (am->tag) {
952 case Mam_IR:
953 if (am->Mam.IR.index == 0)
954 vex_printf("0(");
955 else
956 vex_printf("%d(", (Int) am->Mam.IR.index);
957 ppHRegMIPS(am->Mam.IR.base, mode64);
958 vex_printf(")");
959 return;
960 case Mam_RR:
961 ppHRegMIPS(am->Mam.RR.base, mode64);
962 vex_printf(", ");
963 ppHRegMIPS(am->Mam.RR.index, mode64);
964 return;
965 default:
966 vpanic("ppMIPSAMode");
967 break;
968 }
969 }
970
addRegUsage_MIPSAMode(HRegUsage * u,MIPSAMode * am)971 static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am)
972 {
973 switch (am->tag) {
974 case Mam_IR:
975 addHRegUse(u, HRmRead, am->Mam.IR.base);
976 return;
977 case Mam_RR:
978 addHRegUse(u, HRmRead, am->Mam.RR.base);
979 addHRegUse(u, HRmRead, am->Mam.RR.index);
980 return;
981 default:
982 vpanic("addRegUsage_MIPSAMode");
983 break;
984 }
985 }
986
mapRegs_MIPSAMode(HRegRemap * m,MIPSAMode * am)987 static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am)
988 {
989 switch (am->tag) {
990 case Mam_IR:
991 am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base);
992 return;
993 case Mam_RR:
994 am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base);
995 am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index);
996 return;
997 default:
998 vpanic("mapRegs_MIPSAMode");
999 break;
1000 }
1001 }
1002
1003 /* --------- Operand, which can be a reg or a u16/s16. --------- */
1004
MIPSRH_Imm(Bool syned,UShort imm16)1005 MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
1006 {
1007 MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
1008 op->tag = Mrh_Imm;
1009 op->Mrh.Imm.syned = syned;
1010 op->Mrh.Imm.imm16 = imm16;
1011 /* If this is a signed value, ensure it's not -32768, so that we
1012 are guaranteed always to be able to negate if needed. */
1013 if (syned)
1014 vassert(imm16 != 0x8000);
1015 vassert(syned == True || syned == False);
1016 return op;
1017 }
1018
MIPSRH_Reg(HReg reg)1019 MIPSRH *MIPSRH_Reg(HReg reg)
1020 {
1021 MIPSRH *op = LibVEX_Alloc(sizeof(MIPSRH));
1022 op->tag = Mrh_Reg;
1023 op->Mrh.Reg.reg = reg;
1024 return op;
1025 }
1026
ppMIPSRH(MIPSRH * op,Bool mode64)1027 void ppMIPSRH(MIPSRH * op, Bool mode64)
1028 {
1029 MIPSRHTag tag = op->tag;
1030 switch (tag) {
1031 case Mrh_Imm:
1032 if (op->Mrh.Imm.syned)
1033 vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16);
1034 else
1035 vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16);
1036 return;
1037 case Mrh_Reg:
1038 ppHRegMIPS(op->Mrh.Reg.reg, mode64);
1039 return;
1040 default:
1041 vpanic("ppMIPSRH");
1042 break;
1043 }
1044 }
1045
1046 /* An MIPSRH can only be used in a "read" context (what would it mean
1047 to write or modify a literal?) and so we enumerate its registers
1048 accordingly. */
addRegUsage_MIPSRH(HRegUsage * u,MIPSRH * op)1049 static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op)
1050 {
1051 switch (op->tag) {
1052 case Mrh_Imm:
1053 return;
1054 case Mrh_Reg:
1055 addHRegUse(u, HRmRead, op->Mrh.Reg.reg);
1056 return;
1057 default:
1058 vpanic("addRegUsage_MIPSRH");
1059 break;
1060 }
1061 }
1062
mapRegs_MIPSRH(HRegRemap * m,MIPSRH * op)1063 static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op)
1064 {
1065 switch (op->tag) {
1066 case Mrh_Imm:
1067 return;
1068 case Mrh_Reg:
1069 op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg);
1070 return;
1071 default:
1072 vpanic("mapRegs_MIPSRH");
1073 break;
1074 }
1075 }
1076
1077 /* --------- Instructions. --------- */
1078
showMIPSUnaryOp(MIPSUnaryOp op)1079 const HChar *showMIPSUnaryOp(MIPSUnaryOp op)
1080 {
1081 const HChar* ret;
1082 switch (op) {
1083 case Mun_CLO:
1084 ret = "clo";
1085 break;
1086 case Mun_CLZ:
1087 ret = "clz";
1088 break;
1089 case Mun_NOP:
1090 ret = "nop";
1091 break;
1092 case Mun_DCLO:
1093 ret = "dclo";
1094 break;
1095 case Mun_DCLZ:
1096 ret = "dclz";
1097 break;
1098 default:
1099 vpanic("showMIPSUnaryOp");
1100 break;
1101 }
1102 return ret;
1103 }
1104
showMIPSAluOp(MIPSAluOp op,Bool immR)1105 const HChar *showMIPSAluOp(MIPSAluOp op, Bool immR)
1106 {
1107 const HChar* ret;
1108 switch (op) {
1109 case Malu_ADD:
1110 ret = immR ? "addiu" : "addu";
1111 break;
1112 case Malu_SUB:
1113 ret = "subu";
1114 break;
1115 case Malu_AND:
1116 ret = immR ? "andi" : "and";
1117 break;
1118 case Malu_OR:
1119 ret = immR ? "ori" : "or";
1120 break;
1121 case Malu_NOR:
1122 vassert(immR == False); /*there's no nor with an immediate operand!? */
1123 ret = "nor";
1124 break;
1125 case Malu_XOR:
1126 ret = immR ? "xori" : "xor";
1127 break;
1128 case Malu_DADD:
1129 ret = immR ? "daddi" : "dadd";
1130 break;
1131 case Malu_DSUB:
1132 ret = immR ? "dsubi" : "dsub";
1133 break;
1134 case Malu_SLT:
1135 ret = immR ? "slti" : "slt";
1136 break;
1137 default:
1138 vpanic("showMIPSAluOp");
1139 break;
1140 }
1141 return ret;
1142 }
1143
showMIPSShftOp(MIPSShftOp op,Bool immR,Bool sz32)1144 const HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32)
1145 {
1146 const HChar *ret;
1147 switch (op) {
1148 case Mshft_SRA:
1149 ret = immR ? (sz32 ? "sra" : "dsra") : (sz32 ? "srav" : "dsrav");
1150 break;
1151 case Mshft_SLL:
1152 ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv");
1153 break;
1154 case Mshft_SRL:
1155 ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv");
1156 break;
1157 default:
1158 vpanic("showMIPSShftOp");
1159 break;
1160 }
1161 return ret;
1162 }
1163
showMIPSMaccOp(MIPSMaccOp op,Bool variable)1164 const HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable)
1165 {
1166 const HChar *ret;
1167 switch (op) {
1168 case Macc_ADD:
1169 ret = variable ? "madd" : "maddu";
1170 break;
1171 case Macc_SUB:
1172 ret = variable ? "msub" : "msubu";
1173 break;
1174 default:
1175 vpanic("showMIPSAccOp");
1176 break;
1177 }
1178 return ret;
1179 }
1180
MIPSInstr_LI(HReg dst,ULong imm)1181 MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
1182 {
1183 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1184 i->tag = Min_LI;
1185 i->Min.LI.dst = dst;
1186 i->Min.LI.imm = imm;
1187 return i;
1188 }
1189
MIPSInstr_Alu(MIPSAluOp op,HReg dst,HReg srcL,MIPSRH * srcR)1190 MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
1191 {
1192 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1193 i->tag = Min_Alu;
1194 i->Min.Alu.op = op;
1195 i->Min.Alu.dst = dst;
1196 i->Min.Alu.srcL = srcL;
1197 i->Min.Alu.srcR = srcR;
1198 return i;
1199 }
1200
MIPSInstr_Shft(MIPSShftOp op,Bool sz32,HReg dst,HReg srcL,MIPSRH * srcR)1201 MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
1202 MIPSRH * srcR)
1203 {
1204 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1205 i->tag = Min_Shft;
1206 i->Min.Shft.op = op;
1207 i->Min.Shft.sz32 = sz32;
1208 i->Min.Shft.dst = dst;
1209 i->Min.Shft.srcL = srcL;
1210 i->Min.Shft.srcR = srcR;
1211 return i;
1212 }
1213
MIPSInstr_Unary(MIPSUnaryOp op,HReg dst,HReg src)1214 MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
1215 {
1216 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1217 i->tag = Min_Unary;
1218 i->Min.Unary.op = op;
1219 i->Min.Unary.dst = dst;
1220 i->Min.Unary.src = src;
1221 return i;
1222 }
1223
MIPSInstr_Cmp(Bool syned,Bool sz32,HReg dst,HReg srcL,HReg srcR,MIPSCondCode cond)1224 MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
1225 MIPSCondCode cond)
1226 {
1227 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1228 i->tag = Min_Cmp;
1229 i->Min.Cmp.syned = syned;
1230 i->Min.Cmp.sz32 = sz32;
1231 i->Min.Cmp.dst = dst;
1232 i->Min.Cmp.srcL = srcL;
1233 i->Min.Cmp.srcR = srcR;
1234 i->Min.Cmp.cond = cond;
1235 return i;
1236 }
1237
1238 /* multiply */
MIPSInstr_Mul(Bool syned,Bool wid,Bool sz32,HReg dst,HReg srcL,HReg srcR)1239 MIPSInstr *MIPSInstr_Mul(Bool syned, Bool wid, Bool sz32, HReg dst, HReg srcL,
1240 HReg srcR)
1241 {
1242 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1243 i->tag = Min_Mul;
1244 i->Min.Mul.syned = syned;
1245 i->Min.Mul.widening = wid; /* widen=True else False */
1246 i->Min.Mul.sz32 = sz32; /* True = 32 bits */
1247 i->Min.Mul.dst = dst;
1248 i->Min.Mul.srcL = srcL;
1249 i->Min.Mul.srcR = srcR;
1250 return i;
1251 }
1252
1253 /* msub */
MIPSInstr_Msub(Bool syned,HReg srcL,HReg srcR)1254 MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
1255 {
1256 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1257 i->tag = Min_Macc;
1258
1259 i->Min.Macc.op = Macc_SUB;
1260 i->Min.Macc.syned = syned;
1261 i->Min.Macc.srcL = srcL;
1262 i->Min.Macc.srcR = srcR;
1263 return i;
1264 }
1265
1266 /* madd */
MIPSInstr_Madd(Bool syned,HReg srcL,HReg srcR)1267 MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
1268 {
1269 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1270 i->tag = Min_Macc;
1271
1272 i->Min.Macc.op = Macc_ADD;
1273 i->Min.Macc.syned = syned;
1274 i->Min.Macc.srcL = srcL;
1275 i->Min.Macc.srcR = srcR;
1276 return i;
1277 }
1278
1279 /* div */
MIPSInstr_Div(Bool syned,Bool sz32,HReg srcL,HReg srcR)1280 MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
1281 {
1282 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1283 i->tag = Min_Div;
1284 i->Min.Div.syned = syned;
1285 i->Min.Div.sz32 = sz32; /* True = 32 bits */
1286 i->Min.Div.srcL = srcL;
1287 i->Min.Div.srcR = srcR;
1288 return i;
1289 }
1290
MIPSInstr_Call(MIPSCondCode cond,Addr64 target,UInt argiregs,HReg src,RetLoc rloc)1291 MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
1292 HReg src, RetLoc rloc )
1293 {
1294 UInt mask;
1295 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1296 i->tag = Min_Call;
1297 i->Min.Call.cond = cond;
1298 i->Min.Call.target = target;
1299 i->Min.Call.argiregs = argiregs;
1300 i->Min.Call.src = src;
1301 i->Min.Call.rloc = rloc;
1302 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1303 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1304 | (1 << 10) | (1 << 11);
1305 vassert(0 == (argiregs & ~mask));
1306 vassert(is_sane_RetLoc(rloc));
1307 return i;
1308 }
1309
MIPSInstr_CallAlways(MIPSCondCode cond,Addr64 target,UInt argiregs,RetLoc rloc)1310 MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target,
1311 UInt argiregs, RetLoc rloc )
1312 {
1313 UInt mask;
1314 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1315 i->tag = Min_Call;
1316 i->Min.Call.cond = cond;
1317 i->Min.Call.target = target;
1318 i->Min.Call.argiregs = argiregs;
1319 i->Min.Call.rloc = rloc;
1320 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1321 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1322 | (1 << 10) | (1 << 11);
1323 vassert(0 == (argiregs & ~mask));
1324 vassert(is_sane_RetLoc(rloc));
1325 return i;
1326 }
1327
MIPSInstr_XDirect(Addr64 dstGA,MIPSAMode * amPC,MIPSCondCode cond,Bool toFastEP)1328 MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
1329 MIPSCondCode cond, Bool toFastEP ) {
1330 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
1331 i->tag = Min_XDirect;
1332 i->Min.XDirect.dstGA = dstGA;
1333 i->Min.XDirect.amPC = amPC;
1334 i->Min.XDirect.cond = cond;
1335 i->Min.XDirect.toFastEP = toFastEP;
1336 return i;
1337 }
1338
MIPSInstr_XIndir(HReg dstGA,MIPSAMode * amPC,MIPSCondCode cond)1339 MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
1340 MIPSCondCode cond ) {
1341 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
1342 i->tag = Min_XIndir;
1343 i->Min.XIndir.dstGA = dstGA;
1344 i->Min.XIndir.amPC = amPC;
1345 i->Min.XIndir.cond = cond;
1346 return i;
1347 }
1348
MIPSInstr_XAssisted(HReg dstGA,MIPSAMode * amPC,MIPSCondCode cond,IRJumpKind jk)1349 MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
1350 MIPSCondCode cond, IRJumpKind jk ) {
1351 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
1352 i->tag = Min_XAssisted;
1353 i->Min.XAssisted.dstGA = dstGA;
1354 i->Min.XAssisted.amPC = amPC;
1355 i->Min.XAssisted.cond = cond;
1356 i->Min.XAssisted.jk = jk;
1357 return i;
1358 }
1359
MIPSInstr_Load(UChar sz,HReg dst,MIPSAMode * src,Bool mode64)1360 MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
1361 {
1362 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1363 i->tag = Min_Load;
1364 i->Min.Load.sz = sz;
1365 i->Min.Load.src = src;
1366 i->Min.Load.dst = dst;
1367 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
1368
1369 if (sz == 8)
1370 vassert(mode64);
1371 return i;
1372 }
1373
MIPSInstr_Store(UChar sz,MIPSAMode * dst,HReg src,Bool mode64)1374 MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
1375 {
1376 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1377 i->tag = Min_Store;
1378 i->Min.Store.sz = sz;
1379 i->Min.Store.src = src;
1380 i->Min.Store.dst = dst;
1381 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
1382
1383 if (sz == 8)
1384 vassert(mode64);
1385 return i;
1386 }
1387
MIPSInstr_LoadL(UChar sz,HReg dst,MIPSAMode * src,Bool mode64)1388 MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
1389 {
1390 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1391 i->tag = Min_LoadL;
1392 i->Min.LoadL.sz = sz;
1393 i->Min.LoadL.src = src;
1394 i->Min.LoadL.dst = dst;
1395 vassert(sz == 4 || sz == 8);
1396
1397 if (sz == 8)
1398 vassert(mode64);
1399 return i;
1400 }
1401
MIPSInstr_StoreC(UChar sz,MIPSAMode * dst,HReg src,Bool mode64)1402 MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
1403 {
1404 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1405 i->tag = Min_StoreC;
1406 i->Min.StoreC.sz = sz;
1407 i->Min.StoreC.src = src;
1408 i->Min.StoreC.dst = dst;
1409 vassert(sz == 4 || sz == 8);
1410
1411 if (sz == 8)
1412 vassert(mode64);
1413 return i;
1414 }
1415
MIPSInstr_Mthi(HReg src)1416 MIPSInstr *MIPSInstr_Mthi(HReg src)
1417 {
1418 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1419 i->tag = Min_Mthi;
1420 i->Min.MtHL.src = src;
1421 return i;
1422 }
1423
MIPSInstr_Mtlo(HReg src)1424 MIPSInstr *MIPSInstr_Mtlo(HReg src)
1425 {
1426 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1427 i->tag = Min_Mtlo;
1428 i->Min.MtHL.src = src;
1429 return i;
1430 }
1431
MIPSInstr_Mfhi(HReg dst)1432 MIPSInstr *MIPSInstr_Mfhi(HReg dst)
1433 {
1434 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1435 i->tag = Min_Mfhi;
1436 i->Min.MfHL.dst = dst;
1437 return i;
1438 }
1439
MIPSInstr_Mflo(HReg dst)1440 MIPSInstr *MIPSInstr_Mflo(HReg dst)
1441 {
1442 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1443 i->tag = Min_Mflo;
1444 i->Min.MfHL.dst = dst;
1445 return i;
1446 }
1447
1448 /* Read/Write Link Register */
MIPSInstr_RdWrLR(Bool wrLR,HReg gpr)1449 MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
1450 {
1451 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1452 i->tag = Min_RdWrLR;
1453 i->Min.RdWrLR.wrLR = wrLR;
1454 i->Min.RdWrLR.gpr = gpr;
1455 return i;
1456 }
1457
MIPSInstr_FpLdSt(Bool isLoad,UChar sz,HReg reg,MIPSAMode * addr)1458 MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
1459 {
1460 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1461 i->tag = Min_FpLdSt;
1462 i->Min.FpLdSt.isLoad = isLoad;
1463 i->Min.FpLdSt.sz = sz;
1464 i->Min.FpLdSt.reg = reg;
1465 i->Min.FpLdSt.addr = addr;
1466 vassert(sz == 4 || sz == 8);
1467 return i;
1468 }
1469
MIPSInstr_FpUnary(MIPSFpOp op,HReg dst,HReg src)1470 MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
1471 {
1472 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1473 i->tag = Min_FpUnary;
1474 i->Min.FpUnary.op = op;
1475 i->Min.FpUnary.dst = dst;
1476 i->Min.FpUnary.src = src;
1477 return i;
1478 }
1479
MIPSInstr_FpBinary(MIPSFpOp op,HReg dst,HReg srcL,HReg srcR)1480 MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
1481 {
1482 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1483 i->tag = Min_FpBinary;
1484 i->Min.FpBinary.op = op;
1485 i->Min.FpBinary.dst = dst;
1486 i->Min.FpBinary.srcL = srcL;
1487 i->Min.FpBinary.srcR = srcR;
1488 return i;
1489 }
1490
MIPSInstr_FpTernary(MIPSFpOp op,HReg dst,HReg src1,HReg src2,HReg src3)1491 MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
1492 HReg src3 )
1493 {
1494 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1495 i->tag = Min_FpTernary;
1496 i->Min.FpTernary.op = op;
1497 i->Min.FpTernary.dst = dst;
1498 i->Min.FpTernary.src1 = src1;
1499 i->Min.FpTernary.src2 = src2;
1500 i->Min.FpTernary.src3 = src3;
1501 return i;
1502 }
1503
MIPSInstr_FpConvert(MIPSFpOp op,HReg dst,HReg src)1504 MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
1505 {
1506 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1507 i->tag = Min_FpConvert;
1508 i->Min.FpConvert.op = op;
1509 i->Min.FpConvert.dst = dst;
1510 i->Min.FpConvert.src = src;
1511 return i;
1512
1513 }
1514
MIPSInstr_FpCompare(MIPSFpOp op,HReg dst,HReg srcL,HReg srcR)1515 MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
1516 {
1517 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1518 i->tag = Min_FpCompare;
1519 i->Min.FpCompare.op = op;
1520 i->Min.FpCompare.dst = dst;
1521 i->Min.FpCompare.srcL = srcL;
1522 i->Min.FpCompare.srcR = srcR;
1523 return i;
1524 }
1525
MIPSInstr_MtFCSR(HReg src)1526 MIPSInstr *MIPSInstr_MtFCSR(HReg src)
1527 {
1528 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1529 i->tag = Min_MtFCSR;
1530 i->Min.MtFCSR.src = src;
1531 return i;
1532 }
1533
MIPSInstr_MfFCSR(HReg dst)1534 MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
1535 {
1536 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1537 i->tag = Min_MfFCSR;
1538 i->Min.MfFCSR.dst = dst;
1539 return i;
1540 }
1541
MIPSInstr_FpGpMove(MIPSFpGpMoveOp op,HReg dst,HReg src)1542 MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
1543 {
1544 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1545 i->tag = Min_FpGpMove;
1546 i->Min.FpGpMove.op = op;
1547 i->Min.FpGpMove.dst = dst;
1548 i->Min.FpGpMove.src = src;
1549 return i;
1550 }
1551
MIPSInstr_MoveCond(MIPSMoveCondOp op,HReg dst,HReg src,HReg cond)1552 MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
1553 HReg cond )
1554 {
1555 MIPSInstr *i = LibVEX_Alloc(sizeof(MIPSInstr));
1556 i->tag = Min_MoveCond;
1557 i->Min.MoveCond.op = op;
1558 i->Min.MoveCond.dst = dst;
1559 i->Min.MoveCond.src = src;
1560 i->Min.MoveCond.cond = cond;
1561 return i;
1562 }
1563
MIPSInstr_EvCheck(MIPSAMode * amCounter,MIPSAMode * amFailAddr)1564 MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
1565 MIPSAMode* amFailAddr ) {
1566 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
1567 i->tag = Min_EvCheck;
1568 i->Min.EvCheck.amCounter = amCounter;
1569 i->Min.EvCheck.amFailAddr = amFailAddr;
1570 return i;
1571 }
1572
MIPSInstr_ProfInc(void)1573 MIPSInstr* MIPSInstr_ProfInc ( void ) {
1574 MIPSInstr* i = LibVEX_Alloc(sizeof(MIPSInstr));
1575 i->tag = Min_ProfInc;
1576 return i;
1577 }
1578
1579 /* -------- Pretty Print instructions ------------- */
ppLoadImm(HReg dst,ULong imm,Bool mode64)1580 static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
1581 {
1582 vex_printf("li ");
1583 ppHRegMIPS(dst, mode64);
1584 vex_printf(",0x%016llx", imm);
1585 }
1586
ppMIPSInstr(MIPSInstr * i,Bool mode64)1587 void ppMIPSInstr(MIPSInstr * i, Bool mode64)
1588 {
1589 switch (i->tag) {
1590 case Min_LI:
1591 ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64);
1592 break;
1593 case Min_Alu: {
1594 HReg r_srcL = i->Min.Alu.srcL;
1595 MIPSRH *rh_srcR = i->Min.Alu.srcR;
1596 /* generic */
1597 vex_printf("%s ", showMIPSAluOp(i->Min.Alu.op,
1598 toBool(rh_srcR->tag == Mrh_Imm)));
1599 ppHRegMIPS(i->Min.Alu.dst, mode64);
1600 vex_printf(",");
1601 ppHRegMIPS(r_srcL, mode64);
1602 vex_printf(",");
1603 ppMIPSRH(rh_srcR, mode64);
1604 return;
1605 }
1606 case Min_Shft: {
1607 HReg r_srcL = i->Min.Shft.srcL;
1608 MIPSRH *rh_srcR = i->Min.Shft.srcR;
1609 vex_printf("%s ", showMIPSShftOp(i->Min.Shft.op,
1610 toBool(rh_srcR->tag == Mrh_Imm),
1611 i->Min.Shft.sz32));
1612 ppHRegMIPS(i->Min.Shft.dst, mode64);
1613 vex_printf(",");
1614 ppHRegMIPS(r_srcL, mode64);
1615 vex_printf(",");
1616 ppMIPSRH(rh_srcR, mode64);
1617 return;
1618 }
1619 case Min_Unary: {
1620 vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op));
1621 ppHRegMIPS(i->Min.Unary.dst, mode64);
1622 vex_printf(",");
1623 ppHRegMIPS(i->Min.Unary.src, mode64);
1624 return;
1625 }
1626 case Min_Cmp: {
1627 vex_printf("word_compare ");
1628 ppHRegMIPS(i->Min.Cmp.dst, mode64);
1629 vex_printf(" = %s ( ", showMIPSCondCode(i->Min.Cmp.cond));
1630 ppHRegMIPS(i->Min.Cmp.srcL, mode64);
1631 vex_printf(", ");
1632 ppHRegMIPS(i->Min.Cmp.srcR, mode64);
1633 vex_printf(" )");
1634
1635 return;
1636 }
1637 case Min_Mul: {
1638 switch (i->Min.Mul.widening) {
1639 case False:
1640 vex_printf("mul ");
1641 ppHRegMIPS(i->Min.Mul.dst, mode64);
1642 vex_printf(", ");
1643 ppHRegMIPS(i->Min.Mul.srcL, mode64);
1644 vex_printf(", ");
1645 ppHRegMIPS(i->Min.Mul.srcR, mode64);
1646 return;
1647 case True:
1648 vex_printf("%s%s ", i->Min.Mul.sz32 ? "mult" : "dmult",
1649 i->Min.Mul.syned ? "" : "u");
1650 ppHRegMIPS(i->Min.Mul.dst, mode64);
1651 vex_printf(", ");
1652 ppHRegMIPS(i->Min.Mul.srcL, mode64);
1653 vex_printf(", ");
1654 ppHRegMIPS(i->Min.Mul.srcR, mode64);
1655 return;
1656 }
1657 break;
1658 }
1659 case Min_Mthi: {
1660 vex_printf("mthi ");
1661 ppHRegMIPS(i->Min.MtHL.src, mode64);
1662 return;
1663 }
1664 case Min_Mtlo: {
1665 vex_printf("mtlo ");
1666 ppHRegMIPS(i->Min.MtHL.src, mode64);
1667 return;
1668 }
1669 case Min_Mfhi: {
1670 vex_printf("mfhi ");
1671 ppHRegMIPS(i->Min.MfHL.dst, mode64);
1672 return;
1673 }
1674 case Min_Mflo: {
1675 vex_printf("mflo ");
1676 ppHRegMIPS(i->Min.MfHL.dst, mode64);
1677 return;
1678 }
1679 case Min_Macc: {
1680 vex_printf("%s ", showMIPSMaccOp(i->Min.Macc.op, i->Min.Macc.syned));
1681 ppHRegMIPS(i->Min.Macc.srcL, mode64);
1682 vex_printf(", ");
1683 ppHRegMIPS(i->Min.Macc.srcR, mode64);
1684 return;
1685 }
1686 case Min_Div: {
1687 if (!i->Min.Div.sz32)
1688 vex_printf("d");
1689 vex_printf("div");
1690 vex_printf("%s ", i->Min.Div.syned ? "s" : "u");
1691 ppHRegMIPS(i->Min.Div.srcL, mode64);
1692 vex_printf(", ");
1693 ppHRegMIPS(i->Min.Div.srcR, mode64);
1694 return;
1695 }
1696 case Min_Call: {
1697 Int n;
1698 vex_printf("call: ");
1699 if (i->Min.Call.cond != MIPScc_AL) {
1700 vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond));
1701 }
1702 vex_printf(" {");
1703 if (!mode64)
1704 vex_printf(" addiu $29, $29, -16");
1705
1706 ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64);
1707
1708 vex_printf(" ; jarl $31, $25; # args [");
1709 for (n = 0; n < 32; n++) {
1710 if (i->Min.Call.argiregs & (1 << n)) {
1711 vex_printf("$%d", n);
1712 if ((i->Min.Call.argiregs >> n) > 1)
1713 vex_printf(",");
1714 }
1715 }
1716 vex_printf("] nop; ");
1717 if (!mode64)
1718 vex_printf("addiu $29, $29, 16; ]");
1719
1720 break;
1721 }
1722 case Min_XDirect:
1723 vex_printf("(xDirect) ");
1724 vex_printf("if (guest_COND.%s) { ",
1725 showMIPSCondCode(i->Min.XDirect.cond));
1726 vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA);
1727 vex_printf("; sw $9, ");
1728 ppMIPSAMode(i->Min.XDirect.amPC, mode64);
1729 vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
1730 i->Min.XDirect.toFastEP ? "fast" : "slow");
1731 return;
1732 case Min_XIndir:
1733 vex_printf("(xIndir) ");
1734 vex_printf("if (guest_COND.%s) { sw ",
1735 showMIPSCondCode(i->Min.XIndir.cond));
1736 ppHRegMIPS(i->Min.XIndir.dstGA, mode64);
1737 vex_printf(", ");
1738 ppMIPSAMode(i->Min.XIndir.amPC, mode64);
1739 vex_printf("; move $9, $disp_indir; jalr $9; nop}");
1740 return;
1741 case Min_XAssisted:
1742 vex_printf("(xAssisted) ");
1743 vex_printf("if (guest_COND.%s) { ",
1744 showMIPSCondCode(i->Min.XAssisted.cond));
1745 vex_printf("sw ");
1746 ppHRegMIPS(i->Min.XAssisted.dstGA, mode64);
1747 vex_printf(", ");
1748 ppMIPSAMode(i->Min.XAssisted.amPC, mode64);
1749 vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)",
1750 (Int)i->Min.XAssisted.jk);
1751 vex_printf("; move $9, $disp_assisted; jalr $9; nop; }");
1752 return;
1753 case Min_Load: {
1754 Bool idxd = toBool(i->Min.Load.src->tag == Mam_RR);
1755 UChar sz = i->Min.Load.sz;
1756 HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
1757 vex_printf("l%c%s ", c_sz, idxd ? "x" : "");
1758 ppHRegMIPS(i->Min.Load.dst, mode64);
1759 vex_printf(",");
1760 ppMIPSAMode(i->Min.Load.src, mode64);
1761 return;
1762 }
1763 case Min_Store: {
1764 UChar sz = i->Min.Store.sz;
1765 Bool idxd = toBool(i->Min.Store.dst->tag == Mam_RR);
1766 HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
1767 vex_printf("s%c%s ", c_sz, idxd ? "x" : "");
1768 ppHRegMIPS(i->Min.Store.src, mode64);
1769 vex_printf(",");
1770 ppMIPSAMode(i->Min.Store.dst, mode64);
1771 return;
1772 }
1773 case Min_LoadL: {
1774 vex_printf("ll ");
1775 ppHRegMIPS(i->Min.LoadL.dst, mode64);
1776 vex_printf(",");
1777 ppMIPSAMode(i->Min.LoadL.src, mode64);
1778 return;
1779 }
1780 case Min_StoreC: {
1781 vex_printf("sc ");
1782 ppHRegMIPS(i->Min.StoreC.src, mode64);
1783 vex_printf(",");
1784 ppMIPSAMode(i->Min.StoreC.dst, mode64);
1785 return;
1786 }
1787 case Min_RdWrLR: {
1788 vex_printf("%s ", i->Min.RdWrLR.wrLR ? "mtlr" : "mflr");
1789 ppHRegMIPS(i->Min.RdWrLR.gpr, mode64);
1790 return;
1791 }
1792 case Min_FpUnary:
1793 vex_printf("%s ", showMIPSFpOp(i->Min.FpUnary.op));
1794 ppHRegMIPS(i->Min.FpUnary.dst, mode64);
1795 vex_printf(",");
1796 ppHRegMIPS(i->Min.FpUnary.src, mode64);
1797 return;
1798 case Min_FpBinary:
1799 vex_printf("%s", showMIPSFpOp(i->Min.FpBinary.op));
1800 ppHRegMIPS(i->Min.FpBinary.dst, mode64);
1801 vex_printf(",");
1802 ppHRegMIPS(i->Min.FpBinary.srcL, mode64);
1803 vex_printf(",");
1804 ppHRegMIPS(i->Min.FpBinary.srcR, mode64);
1805 return;
1806 case Min_FpTernary:
1807 vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op));
1808 ppHRegMIPS(i->Min.FpTernary.dst, mode64);
1809 vex_printf(",");
1810 ppHRegMIPS(i->Min.FpTernary.src1, mode64);
1811 vex_printf(",");
1812 ppHRegMIPS(i->Min.FpTernary.src2, mode64);
1813 vex_printf(",");
1814 ppHRegMIPS(i->Min.FpTernary.src3, mode64);
1815 return;
1816 case Min_FpConvert:
1817 vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op));
1818 ppHRegMIPS(i->Min.FpConvert.dst, mode64);
1819 vex_printf(",");
1820 ppHRegMIPS(i->Min.FpConvert.src, mode64);
1821 return;
1822 case Min_FpCompare:
1823 vex_printf("%s ", showMIPSFpOp(i->Min.FpCompare.op));
1824 ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
1825 vex_printf(",");
1826 ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
1827 return;
1828 case Min_FpMulAcc:
1829 vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op));
1830 ppHRegMIPS(i->Min.FpMulAcc.dst, mode64);
1831 vex_printf(",");
1832 ppHRegMIPS(i->Min.FpMulAcc.srcML, mode64);
1833 vex_printf(",");
1834 ppHRegMIPS(i->Min.FpMulAcc.srcMR, mode64);
1835 vex_printf(",");
1836 ppHRegMIPS(i->Min.FpMulAcc.srcAcc, mode64);
1837 return;
1838 case Min_FpLdSt: {
1839 if (i->Min.FpLdSt.sz == 4) {
1840 if (i->Min.FpLdSt.isLoad) {
1841 vex_printf("lwc1 ");
1842 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
1843 vex_printf(",");
1844 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
1845 } else {
1846 vex_printf("swc1 ");
1847 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
1848 vex_printf(",");
1849 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
1850 }
1851 } else if (i->Min.FpLdSt.sz == 8) {
1852 if (i->Min.FpLdSt.isLoad) {
1853 vex_printf("ldc1 ");
1854 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
1855 vex_printf(",");
1856 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
1857 } else {
1858 vex_printf("sdc1 ");
1859 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
1860 vex_printf(",");
1861 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
1862 }
1863 }
1864 return;
1865 }
1866 case Min_MtFCSR: {
1867 vex_printf("ctc1 ");
1868 ppHRegMIPS(i->Min.MtFCSR.src, mode64);
1869 vex_printf(", $31");
1870 return;
1871 }
1872 case Min_MfFCSR: {
1873 vex_printf("ctc1 ");
1874 ppHRegMIPS(i->Min.MfFCSR.dst, mode64);
1875 vex_printf(", $31");
1876 return;
1877 }
1878 case Min_FpGpMove: {
1879 vex_printf("%s ", showMIPSFpGpMoveOp(i->Min.FpGpMove.op));
1880 ppHRegMIPS(i->Min.FpGpMove.dst, mode64);
1881 vex_printf(", ");
1882 ppHRegMIPS(i->Min.FpGpMove.src, mode64);
1883 return;
1884 }
1885 case Min_MoveCond: {
1886 vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op));
1887 ppHRegMIPS(i->Min.MoveCond.dst, mode64);
1888 vex_printf(", ");
1889 ppHRegMIPS(i->Min.MoveCond.src, mode64);
1890 vex_printf(", ");
1891 ppHRegMIPS(i->Min.MoveCond.cond, mode64);
1892 return;
1893 }
1894 case Min_EvCheck:
1895 vex_printf("(evCheck) lw $9, ");
1896 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
1897 vex_printf("; addiu $9, $9, -1");
1898 vex_printf("; sw $9, ");
1899 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
1900 vex_printf("; bgez $t9, nofail; jalr *");
1901 ppMIPSAMode(i->Min.EvCheck.amFailAddr, mode64);
1902 vex_printf("; nofail:");
1903 return;
1904 case Min_ProfInc:
1905 if (mode64)
1906 vex_printf("(profInc) move $9, ($NotKnownYet); "
1907 "ld $8, 0($9); "
1908 "daddiu $8, $8, 1; "
1909 "sd $8, 0($9); " );
1910 else
1911 vex_printf("(profInc) move $9, ($NotKnownYet); "
1912 "lw $8, 0($9); "
1913 "addiu $8, $8, 1; "
1914 "sw $8, 0($9); "
1915 "sltiu $1, $8, 1; "
1916 "lw $8, 4($9); "
1917 "addu $8, $8, $1; "
1918 "sw $8, 4($9); " );
1919 return;
1920 default:
1921 vpanic("ppMIPSInstr");
1922 break;
1923 }
1924 }
1925
1926 /* --------- Helpers for register allocation. --------- */
1927
getRegUsage_MIPSInstr(HRegUsage * u,MIPSInstr * i,Bool mode64)1928 void getRegUsage_MIPSInstr(HRegUsage * u, MIPSInstr * i, Bool mode64)
1929 {
1930 initHRegUsage(u);
1931 switch (i->tag) {
1932 case Min_LI:
1933 addHRegUse(u, HRmWrite, i->Min.LI.dst);
1934 break;
1935 case Min_Alu:
1936 addHRegUse(u, HRmRead, i->Min.Alu.srcL);
1937 addRegUsage_MIPSRH(u, i->Min.Alu.srcR);
1938 addHRegUse(u, HRmWrite, i->Min.Alu.dst);
1939 return;
1940 case Min_Shft:
1941 addHRegUse(u, HRmRead, i->Min.Shft.srcL);
1942 addRegUsage_MIPSRH(u, i->Min.Shft.srcR);
1943 addHRegUse(u, HRmWrite, i->Min.Shft.dst);
1944 return;
1945 case Min_Cmp:
1946 addHRegUse(u, HRmRead, i->Min.Cmp.srcL);
1947 addHRegUse(u, HRmRead, i->Min.Cmp.srcR);
1948 addHRegUse(u, HRmWrite, i->Min.Cmp.dst);
1949 return;
1950 case Min_Unary:
1951 addHRegUse(u, HRmRead, i->Min.Unary.src);
1952 addHRegUse(u, HRmWrite, i->Min.Unary.dst);
1953 return;
1954 case Min_Mul:
1955 addHRegUse(u, HRmWrite, i->Min.Mul.dst);
1956 addHRegUse(u, HRmRead, i->Min.Mul.srcL);
1957 addHRegUse(u, HRmRead, i->Min.Mul.srcR);
1958 return;
1959 case Min_Mthi:
1960 case Min_Mtlo:
1961 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
1962 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
1963 addHRegUse(u, HRmRead, i->Min.MtHL.src);
1964 return;
1965 case Min_Mfhi:
1966 case Min_Mflo:
1967 addHRegUse(u, HRmRead, hregMIPS_HI(mode64));
1968 addHRegUse(u, HRmRead, hregMIPS_LO(mode64));
1969 addHRegUse(u, HRmWrite, i->Min.MfHL.dst);
1970 return;
1971 case Min_MtFCSR:
1972 addHRegUse(u, HRmRead, i->Min.MtFCSR.src);
1973 return;
1974 case Min_MfFCSR:
1975 addHRegUse(u, HRmWrite, i->Min.MfFCSR.dst);
1976 return;
1977 case Min_Macc:
1978 addHRegUse(u, HRmModify, hregMIPS_HI(mode64));
1979 addHRegUse(u, HRmModify, hregMIPS_LO(mode64));
1980 addHRegUse(u, HRmRead, i->Min.Macc.srcL);
1981 addHRegUse(u, HRmRead, i->Min.Macc.srcR);
1982 return;
1983 case Min_Div:
1984 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
1985 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
1986 addHRegUse(u, HRmRead, i->Min.Div.srcL);
1987 addHRegUse(u, HRmRead, i->Min.Div.srcR);
1988 return;
1989 case Min_Call: {
1990 /* Logic and comments copied/modified from x86, ppc and arm back end.
1991 First off, claim it trashes all the caller-saved regs
1992 which fall within the register allocator's jurisdiction. */
1993 if (i->Min.Call.cond != MIPScc_AL)
1994 addHRegUse(u, HRmRead, i->Min.Call.src);
1995 UInt argir;
1996 addHRegUse(u, HRmWrite, hregMIPS_GPR1(mode64));
1997
1998 addHRegUse(u, HRmWrite, hregMIPS_GPR2(mode64));
1999 addHRegUse(u, HRmWrite, hregMIPS_GPR3(mode64));
2000
2001 addHRegUse(u, HRmWrite, hregMIPS_GPR4(mode64));
2002 addHRegUse(u, HRmWrite, hregMIPS_GPR5(mode64));
2003 addHRegUse(u, HRmWrite, hregMIPS_GPR6(mode64));
2004 addHRegUse(u, HRmWrite, hregMIPS_GPR7(mode64));
2005
2006 addHRegUse(u, HRmWrite, hregMIPS_GPR8(mode64));
2007 addHRegUse(u, HRmWrite, hregMIPS_GPR9(mode64));
2008 addHRegUse(u, HRmWrite, hregMIPS_GPR10(mode64));
2009 addHRegUse(u, HRmWrite, hregMIPS_GPR11(mode64));
2010 addHRegUse(u, HRmWrite, hregMIPS_GPR12(mode64));
2011 addHRegUse(u, HRmWrite, hregMIPS_GPR13(mode64));
2012 addHRegUse(u, HRmWrite, hregMIPS_GPR14(mode64));
2013 addHRegUse(u, HRmWrite, hregMIPS_GPR15(mode64));
2014
2015 addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64));
2016 addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64));
2017 addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64));
2018
2019 /* Now we have to state any parameter-carrying registers
2020 which might be read. This depends on the argiregs field. */
2021 argir = i->Min.Call.argiregs;
2022 if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64));
2023 if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64));
2024 if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64));
2025 if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64));
2026 if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
2027 if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
2028 if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
2029 if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
2030
2031 vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6)
2032 | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
2033 | (1 << 11))));
2034
2035 return;
2036 }
2037 /* XDirect/XIndir/XAssisted are also a bit subtle. They
2038 conditionally exit the block. Hence we only need to list (1)
2039 the registers that they read, and (2) the registers that they
2040 write in the case where the block is not exited. (2) is
2041 empty, hence only (1) is relevant here. */
2042 case Min_XDirect:
2043 addRegUsage_MIPSAMode(u, i->Min.XDirect.amPC);
2044 return;
2045 case Min_XIndir:
2046 addHRegUse(u, HRmRead, i->Min.XIndir.dstGA);
2047 addRegUsage_MIPSAMode(u, i->Min.XIndir.amPC);
2048 return;
2049 case Min_XAssisted:
2050 addHRegUse(u, HRmRead, i->Min.XAssisted.dstGA);
2051 addRegUsage_MIPSAMode(u, i->Min.XAssisted.amPC);
2052 return;
2053 case Min_Load:
2054 addRegUsage_MIPSAMode(u, i->Min.Load.src);
2055 addHRegUse(u, HRmWrite, i->Min.Load.dst);
2056 return;
2057 case Min_Store:
2058 addHRegUse(u, HRmRead, i->Min.Store.src);
2059 addRegUsage_MIPSAMode(u, i->Min.Store.dst);
2060 return;
2061 case Min_LoadL:
2062 addRegUsage_MIPSAMode(u, i->Min.LoadL.src);
2063 addHRegUse(u, HRmWrite, i->Min.LoadL.dst);
2064 return;
2065 case Min_StoreC:
2066 addHRegUse(u, HRmWrite, i->Min.StoreC.src);
2067 addHRegUse(u, HRmRead, i->Min.StoreC.src);
2068 addRegUsage_MIPSAMode(u, i->Min.StoreC.dst);
2069 return;
2070 case Min_RdWrLR:
2071 addHRegUse(u, (i->Min.RdWrLR.wrLR ? HRmRead : HRmWrite),
2072 i->Min.RdWrLR.gpr);
2073 return;
2074 case Min_FpLdSt:
2075 if (i->Min.FpLdSt.sz == 4) {
2076 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
2077 i->Min.FpLdSt.reg);
2078 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
2079 return;
2080 } else if (i->Min.FpLdSt.sz == 8) {
2081 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
2082 i->Min.FpLdSt.reg);
2083 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
2084 return;
2085 }
2086 break;
2087 case Min_FpUnary:
2088 addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
2089 addHRegUse(u, HRmRead, i->Min.FpUnary.src);
2090 return;
2091 case Min_FpBinary:
2092 addHRegUse(u, HRmWrite, i->Min.FpBinary.dst);
2093 addHRegUse(u, HRmRead, i->Min.FpBinary.srcL);
2094 addHRegUse(u, HRmRead, i->Min.FpBinary.srcR);
2095 return;
2096 case Min_FpTernary:
2097 addHRegUse(u, HRmWrite, i->Min.FpTernary.dst);
2098 addHRegUse(u, HRmRead, i->Min.FpTernary.src1);
2099 addHRegUse(u, HRmRead, i->Min.FpTernary.src2);
2100 addHRegUse(u, HRmRead, i->Min.FpTernary.src3);
2101 return;
2102 case Min_FpConvert:
2103 addHRegUse(u, HRmWrite, i->Min.FpConvert.dst);
2104 addHRegUse(u, HRmRead, i->Min.FpConvert.src);
2105 return;
2106 case Min_FpCompare:
2107 addHRegUse(u, HRmWrite, i->Min.FpCompare.dst);
2108 addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
2109 addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
2110 return;
2111 case Min_FpGpMove:
2112 addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
2113 addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
2114 return;
2115 case Min_MoveCond:
2116 addHRegUse(u, HRmModify, i->Min.MoveCond.dst);
2117 addHRegUse(u, HRmRead, i->Min.MoveCond.src);
2118 addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
2119 return;
2120 case Min_EvCheck:
2121 /* We expect both amodes only to mention %ebp, so this is in
2122 fact pointless, since %ebp isn't allocatable, but anyway.. */
2123 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amCounter);
2124 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amFailAddr);
2125 return;
2126 case Min_ProfInc:
2127 /* does not use any registers. */
2128 return;
2129 default:
2130 ppMIPSInstr(i, mode64);
2131 vpanic("getRegUsage_MIPSInstr");
2132 break;
2133 }
2134 }
2135
2136 /* local helper */
mapReg(HRegRemap * m,HReg * r)2137 static void mapReg(HRegRemap * m, HReg * r)
2138 {
2139 *r = lookupHRegRemap(m, *r);
2140 }
2141
mapRegs_MIPSInstr(HRegRemap * m,MIPSInstr * i,Bool mode64)2142 void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64)
2143 {
2144 switch (i->tag) {
2145 case Min_LI:
2146 mapReg(m, &i->Min.LI.dst);
2147 break;
2148 case Min_Alu:
2149 mapReg(m, &i->Min.Alu.srcL);
2150 mapRegs_MIPSRH(m, i->Min.Alu.srcR);
2151 mapReg(m, &i->Min.Alu.dst);
2152 return;
2153 case Min_Shft:
2154 mapReg(m, &i->Min.Shft.srcL);
2155 mapRegs_MIPSRH(m, i->Min.Shft.srcR);
2156 mapReg(m, &i->Min.Shft.dst);
2157 return;
2158 case Min_Cmp:
2159 mapReg(m, &i->Min.Cmp.srcL);
2160 mapReg(m, &i->Min.Cmp.srcR);
2161 mapReg(m, &i->Min.Cmp.dst);
2162 return;
2163 case Min_Unary:
2164 mapReg(m, &i->Min.Unary.src);
2165 mapReg(m, &i->Min.Unary.dst);
2166 return;
2167 case Min_Mul:
2168 mapReg(m, &i->Min.Mul.dst);
2169 mapReg(m, &i->Min.Mul.srcL);
2170 mapReg(m, &i->Min.Mul.srcR);
2171 return;
2172 case Min_Mthi:
2173 case Min_Mtlo:
2174 mapReg(m, &i->Min.MtHL.src);
2175 return;
2176 case Min_Mfhi:
2177 case Min_Mflo:
2178 mapReg(m, &i->Min.MfHL.dst);
2179 return;
2180 case Min_Macc:
2181 mapReg(m, &i->Min.Macc.srcL);
2182 mapReg(m, &i->Min.Macc.srcR);
2183 return;
2184 case Min_Div:
2185 mapReg(m, &i->Min.Div.srcL);
2186 mapReg(m, &i->Min.Div.srcR);
2187 return;
2188 case Min_Call:
2189 {
2190 if (i->Min.Call.cond != MIPScc_AL)
2191 mapReg(m, &i->Min.Call.src);
2192 return;
2193 }
2194 case Min_XDirect:
2195 mapRegs_MIPSAMode(m, i->Min.XDirect.amPC);
2196 return;
2197 case Min_XIndir:
2198 mapReg(m, &i->Min.XIndir.dstGA);
2199 mapRegs_MIPSAMode(m, i->Min.XIndir.amPC);
2200 return;
2201 case Min_XAssisted:
2202 mapReg(m, &i->Min.XAssisted.dstGA);
2203 mapRegs_MIPSAMode(m, i->Min.XAssisted.amPC);
2204 return;
2205 case Min_Load:
2206 mapRegs_MIPSAMode(m, i->Min.Load.src);
2207 mapReg(m, &i->Min.Load.dst);
2208 return;
2209 case Min_Store:
2210 mapReg(m, &i->Min.Store.src);
2211 mapRegs_MIPSAMode(m, i->Min.Store.dst);
2212 return;
2213 case Min_LoadL:
2214 mapRegs_MIPSAMode(m, i->Min.LoadL.src);
2215 mapReg(m, &i->Min.LoadL.dst);
2216 return;
2217 case Min_StoreC:
2218 mapReg(m, &i->Min.StoreC.src);
2219 mapRegs_MIPSAMode(m, i->Min.StoreC.dst);
2220 return;
2221 case Min_RdWrLR:
2222 mapReg(m, &i->Min.RdWrLR.gpr);
2223 return;
2224 case Min_FpLdSt:
2225 if (i->Min.FpLdSt.sz == 4) {
2226 mapReg(m, &i->Min.FpLdSt.reg);
2227 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
2228 return;
2229 } else if (i->Min.FpLdSt.sz == 8) {
2230 mapReg(m, &i->Min.FpLdSt.reg);
2231 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
2232 return;
2233 }
2234 break;
2235 case Min_FpUnary:
2236 mapReg(m, &i->Min.FpUnary.dst);
2237 mapReg(m, &i->Min.FpUnary.src);
2238 return;
2239 case Min_FpBinary:
2240 mapReg(m, &i->Min.FpBinary.dst);
2241 mapReg(m, &i->Min.FpBinary.srcL);
2242 mapReg(m, &i->Min.FpBinary.srcR);
2243 return;
2244 case Min_FpTernary:
2245 mapReg(m, &i->Min.FpTernary.dst);
2246 mapReg(m, &i->Min.FpTernary.src1);
2247 mapReg(m, &i->Min.FpTernary.src2);
2248 mapReg(m, &i->Min.FpTernary.src3);
2249 return;
2250 case Min_FpConvert:
2251 mapReg(m, &i->Min.FpConvert.dst);
2252 mapReg(m, &i->Min.FpConvert.src);
2253 return;
2254 case Min_FpCompare:
2255 mapReg(m, &i->Min.FpCompare.dst);
2256 mapReg(m, &i->Min.FpCompare.srcL);
2257 mapReg(m, &i->Min.FpCompare.srcR);
2258 return;
2259 case Min_MtFCSR:
2260 mapReg(m, &i->Min.MtFCSR.src);
2261 return;
2262 case Min_MfFCSR:
2263 mapReg(m, &i->Min.MfFCSR.dst);
2264 return;
2265 case Min_FpGpMove:
2266 mapReg(m, &i->Min.FpGpMove.dst);
2267 mapReg(m, &i->Min.FpGpMove.src);
2268 return;
2269 case Min_MoveCond:
2270 mapReg(m, &i->Min.MoveCond.dst);
2271 mapReg(m, &i->Min.MoveCond.src);
2272 mapReg(m, &i->Min.MoveCond.cond);
2273 return;
2274 case Min_EvCheck:
2275 /* We expect both amodes only to mention %ebp, so this is in
2276 fact pointless, since %ebp isn't allocatable, but anyway.. */
2277 mapRegs_MIPSAMode(m, i->Min.EvCheck.amCounter);
2278 mapRegs_MIPSAMode(m, i->Min.EvCheck.amFailAddr);
2279 return;
2280 case Min_ProfInc:
2281 /* does not use any registers. */
2282 return;
2283 default:
2284 ppMIPSInstr(i, mode64);
2285 vpanic("mapRegs_MIPSInstr");
2286 break;
2287 }
2288
2289 }
2290
2291 /* Figure out if i represents a reg-reg move, and if so assign the
2292 source and destination to *src and *dst. If in doubt say No. Used
2293 by the register allocator to do move coalescing.
2294 */
isMove_MIPSInstr(MIPSInstr * i,HReg * src,HReg * dst)2295 Bool isMove_MIPSInstr(MIPSInstr * i, HReg * src, HReg * dst)
2296 {
2297 /* Moves between integer regs */
2298 if (i->tag == Min_Alu) {
2299 /* or Rd,Rs,Rs == mr Rd,Rs */
2300 if (i->Min.Alu.op != Malu_OR)
2301 return False;
2302 if (i->Min.Alu.srcR->tag != Mrh_Reg)
2303 return False;
2304 if (hregNumber(i->Min.Alu.srcR->Mrh.Reg.reg)
2305 != hregNumber(i->Min.Alu.srcL))
2306 return False;
2307 *src = i->Min.Alu.srcL;
2308 *dst = i->Min.Alu.dst;
2309 return True;
2310 }
2311 return False;
2312 }
2313
2314 /* Generate mips spill/reload instructions under the direction of the
2315 register allocator. */
genSpill_MIPS(HInstr ** i1,HInstr ** i2,HReg rreg,Int offsetB,Bool mode64)2316 void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
2317 Int offsetB, Bool mode64)
2318 {
2319 MIPSAMode *am;
2320 vassert(offsetB >= 0);
2321 vassert(!hregIsVirtual(rreg));
2322 *i1 = *i2 = NULL;
2323 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
2324
2325 switch (hregClass(rreg)) {
2326 case HRcInt64:
2327 vassert(mode64);
2328 *i1 = MIPSInstr_Store(8, am, rreg, mode64);
2329 break;
2330 case HRcInt32:
2331 vassert(!mode64);
2332 *i1 = MIPSInstr_Store(4, am, rreg, mode64);
2333 break;
2334 case HRcFlt32:
2335 vassert(!mode64);
2336 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 4, rreg, am);
2337 break;
2338 case HRcFlt64:
2339 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 8, rreg, am);
2340 break;
2341 default:
2342 ppHRegClass(hregClass(rreg));
2343 vpanic("genSpill_MIPS: unimplemented regclass");
2344 break;
2345 }
2346 }
2347
genReload_MIPS(HInstr ** i1,HInstr ** i2,HReg rreg,Int offsetB,Bool mode64)2348 void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
2349 Int offsetB, Bool mode64)
2350 {
2351 MIPSAMode *am;
2352 vassert(!hregIsVirtual(rreg));
2353 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
2354
2355 switch (hregClass(rreg)) {
2356 case HRcInt64:
2357 vassert(mode64);
2358 *i1 = MIPSInstr_Load(8, rreg, am, mode64);
2359 break;
2360 case HRcInt32:
2361 vassert(!mode64);
2362 *i1 = MIPSInstr_Load(4, rreg, am, mode64);
2363 break;
2364 case HRcFlt32:
2365 if (mode64)
2366 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
2367 else
2368 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 4, rreg, am);
2369 break;
2370 case HRcFlt64:
2371 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
2372 break;
2373 default:
2374 ppHRegClass(hregClass(rreg));
2375 vpanic("genReload_MIPS: unimplemented regclass");
2376 break;
2377 }
2378 }
2379
2380 /* --------- The mips assembler --------- */
2381
iregNo(HReg r,Bool mode64)2382 static UInt iregNo(HReg r, Bool mode64)
2383 {
2384 UInt n;
2385 vassert(hregClass(r) == (mode64 ? HRcInt64 : HRcInt32));
2386 vassert(!hregIsVirtual(r));
2387 n = hregNumber(r);
2388 vassert(n <= 32);
2389 return n;
2390 }
2391
fregNo(HReg r,Bool mode64)2392 static UChar fregNo(HReg r, Bool mode64)
2393 {
2394 UInt n;
2395 vassert(!hregIsVirtual(r));
2396 n = hregNumber(r);
2397 vassert(n <= 31);
2398 return n;
2399 }
2400
dregNo(HReg r)2401 static UChar dregNo(HReg r)
2402 {
2403 UInt n;
2404 vassert(!hregIsVirtual(r));
2405 n = hregNumber(r);
2406 vassert(n <= 31);
2407 return n;
2408 }
2409
2410 /* Emit 32bit instruction */
emit32(UChar * p,UInt w32)2411 static UChar *emit32(UChar * p, UInt w32)
2412 {
2413 #if defined (_MIPSEL)
2414 *p++ = toUChar(w32 & 0x000000FF);
2415 *p++ = toUChar((w32 >> 8) & 0x000000FF);
2416 *p++ = toUChar((w32 >> 16) & 0x000000FF);
2417 *p++ = toUChar((w32 >> 24) & 0x000000FF);
2418 #elif defined (_MIPSEB)
2419 *p++ = toUChar((w32 >> 24) & 0x000000FF);
2420 *p++ = toUChar((w32 >> 16) & 0x000000FF);
2421 *p++ = toUChar((w32 >> 8) & 0x000000FF);
2422 *p++ = toUChar(w32 & 0x000000FF);
2423 #endif
2424 return p;
2425 }
2426 /* Fetch an instruction */
fetch32(UChar * p)2427 static UInt fetch32 ( UChar* p )
2428 {
2429 UInt w32 = 0;
2430 #if defined (_MIPSEL)
2431 w32 |= ((0xFF & (UInt)p[0]) << 0);
2432 w32 |= ((0xFF & (UInt)p[1]) << 8);
2433 w32 |= ((0xFF & (UInt)p[2]) << 16);
2434 w32 |= ((0xFF & (UInt)p[3]) << 24);
2435 #elif defined (_MIPSEB)
2436 w32 |= ((0xFF & (UInt)p[0]) << 24);
2437 w32 |= ((0xFF & (UInt)p[1]) << 16);
2438 w32 |= ((0xFF & (UInt)p[2]) << 8);
2439 w32 |= ((0xFF & (UInt)p[3]) << 0);
2440 #endif
2441 return w32;
2442 }
2443
2444 /* physical structure of mips instructions */
2445 /* type I : opcode - 6 bits
2446 rs - 5 bits
2447 rt - 5 bits
2448 immediate - 16 bits
2449 */
mkFormI(UChar * p,UInt opc,UInt rs,UInt rt,UInt imm)2450 static UChar *mkFormI(UChar * p, UInt opc, UInt rs, UInt rt, UInt imm)
2451 {
2452 UInt theInstr;
2453 vassert(opc < 0x40);
2454 vassert(rs < 0x20);
2455 vassert(rt < 0x20);
2456 imm = imm & 0xFFFF;
2457 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (imm));
2458 return emit32(p, theInstr);
2459 }
2460
2461 /* type R: opcode - 6 bits
2462 rs - 5 bits
2463 rt - 5 bits
2464 rd - 5 bits
2465 sa - 5 bits
2466 func - 6 bits
2467 */
mkFormR(UChar * p,UInt opc,UInt rs,UInt rt,UInt rd,UInt sa,UInt func)2468 static UChar *mkFormR(UChar * p, UInt opc, UInt rs, UInt rt, UInt rd, UInt sa,
2469 UInt func)
2470 {
2471 if (rs >= 0x20)
2472 vex_printf("rs = %d\n", rs);
2473 UInt theInstr;
2474 vassert(opc < 0x40);
2475 vassert(rs < 0x20);
2476 vassert(rt < 0x20);
2477 vassert(rd < 0x20);
2478 vassert(sa < 0x20);
2479 func = func & 0xFFFF;
2480 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (sa << 6) |
2481 (func));
2482
2483 return emit32(p, theInstr);
2484 }
2485
mkFormS(UChar * p,UInt opc1,UInt rRD,UInt rRS,UInt rRT,UInt sa,UInt opc2)2486 static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT,
2487 UInt sa, UInt opc2)
2488 {
2489 UInt theInstr;
2490 vassert(opc1 <= 0x3F);
2491 vassert(rRD < 0x20);
2492 vassert(rRS < 0x20);
2493 vassert(rRT < 0x20);
2494 vassert(opc2 <= 0x3F);
2495 vassert(sa >= 0 && sa <= 0x3F);
2496
2497 theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) |
2498 ((sa & 0x1F) << 6) | (opc2));
2499
2500 return emit32(p, theInstr);
2501 }
2502
doAMode_IR(UChar * p,UInt opc1,UInt rSD,MIPSAMode * am,Bool mode64)2503 static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
2504 Bool mode64)
2505 {
2506 UInt rA, idx, r_dst;
2507 vassert(am->tag == Mam_IR);
2508 vassert(am->Mam.IR.index < 0x10000);
2509
2510 rA = iregNo(am->Mam.IR.base, mode64);
2511 idx = am->Mam.IR.index;
2512
2513 if (rSD == 33 || rSD == 34)
2514 r_dst = 24;
2515 else
2516 r_dst = rSD;
2517
2518 if (opc1 < 40) {
2519 /* load */
2520 if (rSD == 33)
2521 /* mfhi */
2522 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
2523 else if (rSD == 34)
2524 /* mflo */
2525 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
2526 }
2527
2528 p = mkFormI(p, opc1, rA, r_dst, idx);
2529
2530 if (opc1 >= 40) {
2531 /* store */
2532 if (rSD == 33)
2533 /* mthi */
2534 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
2535 else if (rSD == 34)
2536 /* mtlo */
2537 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
2538 }
2539
2540 return p;
2541 }
2542
doAMode_RR(UChar * p,UInt opc1,UInt rSD,MIPSAMode * am,Bool mode64)2543 static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
2544 Bool mode64)
2545 {
2546 UInt rA, rB, r_dst;
2547 vassert(am->tag == Mam_RR);
2548
2549 rA = iregNo(am->Mam.RR.base, mode64);
2550 rB = iregNo(am->Mam.RR.index, mode64);
2551
2552 if (rSD == 33 || rSD == 34)
2553 r_dst = 24;
2554 else
2555 r_dst = rSD;
2556
2557 if (opc1 < 40) {
2558 /* load */
2559 if (rSD == 33)
2560 /* mfhi */
2561 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
2562 else if (rSD == 34)
2563 /* mflo */
2564 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
2565 }
2566
2567 if (mode64) {
2568 /* daddu rA, rA, rB$
2569 sd/ld r_dst, 0(rA)$
2570 dsubu rA, rA, rB */
2571 p = mkFormR(p, 0, rA, rB, rA, 0, 45);
2572 p = mkFormI(p, opc1, rA, r_dst, 0);
2573 p = mkFormR(p, 0, rA, rB, rA, 0, 47);
2574 } else {
2575 /* addu rA, rA, rB
2576 sw/lw r_dst, 0(rA)
2577 subu rA, rA, rB */
2578 p = mkFormR(p, 0, rA, rB, rA, 0, 33);
2579 p = mkFormI(p, opc1, rA, r_dst, 0);
2580 p = mkFormR(p, 0, rA, rB, rA, 0, 35);
2581 }
2582 if (opc1 >= 40) {
2583 /* store */
2584 if (rSD == 33)
2585 /* mthi */
2586 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
2587 else if (rSD == 34)
2588 /* mtlo */
2589 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
2590 }
2591
2592 return p;
2593 }
2594
2595 /* Load imm to r_dst */
mkLoadImm(UChar * p,UInt r_dst,ULong imm,Bool mode64)2596 static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64)
2597 {
2598 if (!mode64) {
2599 vassert(r_dst < 0x20);
2600 UInt u32 = (UInt) imm;
2601 Int s32 = (Int) u32;
2602 Long s64 = (Long) s32;
2603 imm = (ULong) s64;
2604 }
2605
2606 if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
2607 /* sign-extendable from 16 bits
2608 addiu r_dst, 0, imm => li r_dst, imm */
2609 p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF);
2610 } else {
2611 if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
2612 /* sign-extendable from 32 bits
2613 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
2614 lui r_dst, (imm >> 16) */
2615 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
2616 /* ori r_dst, r_dst, (imm & 0xFFFF) */
2617 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2618 } else {
2619 vassert(mode64);
2620 /* lui load in upper half of low word */
2621 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
2622 /* ori */
2623 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
2624 /* shift */
2625 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2626 /* ori */
2627 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
2628 /* shift */
2629 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2630 /* ori */
2631 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2632 }
2633 }
2634 return p;
2635 }
2636
2637 /* A simplified version of mkLoadImm that always generates 2 or 6
2638 instructions (32 or 64 bits respectively) even if it could generate
2639 fewer. This is needed for generating fixed sized patchable
2640 sequences. */
mkLoadImm_EXACTLY2or6(UChar * p,UInt r_dst,ULong imm,Bool mode64)2641 static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p,
2642 UInt r_dst, ULong imm, Bool mode64)
2643 {
2644 vassert(r_dst < 0x20);
2645
2646 if (!mode64) {
2647 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
2648 extension of the bottom 32 bits. (Probably unnecessary.) */
2649 UInt u32 = (UInt)imm;
2650 Int s32 = (Int)u32;
2651 Long s64 = (Long)s32;
2652 imm = (ULong)s64;
2653 }
2654
2655 if (!mode64) {
2656 /* sign-extendable from 32 bits
2657 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
2658 lui r_dst, (imm >> 16) */
2659 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
2660 /* ori r_dst, r_dst, (imm & 0xFFFF) */
2661 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2662 } else {
2663 /* full 64bit immediate load: 6 (six!) insns. */
2664 vassert(mode64);
2665 /* lui load in upper half of low word */
2666 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
2667 /* ori */
2668 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
2669 /* shift */
2670 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2671 /* ori */
2672 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
2673 /* shift */
2674 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2675 /* ori */
2676 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2677 }
2678 return p;
2679 }
2680
2681 /* Checks whether the sequence of bytes at p was indeed created
2682 by mkLoadImm_EXACTLY2or6 with the given parameters. */
isLoadImm_EXACTLY2or6(UChar * p_to_check,UInt r_dst,ULong imm,Bool mode64)2683 static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check,
2684 UInt r_dst, ULong imm, Bool mode64 )
2685 {
2686 vassert(r_dst < 0x20);
2687 Bool ret;
2688 if (!mode64) {
2689 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
2690 extension of the bottom 32 bits. (Probably unnecessary.) */
2691 UInt u32 = (UInt)imm;
2692 Int s32 = (Int)u32;
2693 Long s64 = (Long)s32;
2694 imm = (ULong)s64;
2695 }
2696
2697 if (!mode64) {
2698 UInt expect[2] = { 0, 0 };
2699 UChar* p = (UChar*)&expect[0];
2700 /* lui r_dst, (immi >> 16) */
2701 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
2702 /* ori r_dst, r_dst, (imm & 0xFFFF) */
2703 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2704 vassert(p == (UChar*)&expect[2]);
2705
2706 ret = fetch32(p_to_check + 0) == expect[0]
2707 && fetch32(p_to_check + 4) == expect[1];
2708 } else {
2709 UInt expect[6] = { 0, 0, 0, 0, 0, 0};
2710 UChar* p = (UChar*)&expect[0];
2711 /* lui load in upper half of low word */
2712 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
2713 /* ori */
2714 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
2715 /* shift */
2716 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2717 /* ori */
2718 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
2719 /* shift */
2720 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
2721 /* ori */
2722 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
2723 vassert(p == (UChar*)&expect[6]);
2724
2725 ret = fetch32(p_to_check + 0) == expect[0]
2726 && fetch32(p_to_check + 4) == expect[1]
2727 && fetch32(p_to_check + 8) == expect[2]
2728 && fetch32(p_to_check + 12) == expect[3]
2729 && fetch32(p_to_check + 16) == expect[4]
2730 && fetch32(p_to_check + 20) == expect[5];
2731 }
2732 return ret;
2733 }
2734
2735 /* Generate a machine-word sized load or store. Simplified version of
2736 the Min_Load and Min_Store cases below.
2737 This will generate 32-bit load/store on MIPS32, and 64-bit load/store on
2738 MIPS64 platforms.
2739 */
do_load_or_store_machine_word(UChar * p,Bool isLoad,UInt reg,MIPSAMode * am,Bool mode64)2740 static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt reg,
2741 MIPSAMode* am, Bool mode64 )
2742 {
2743 if (isLoad) { /* load */
2744 switch (am->tag) {
2745 case Mam_IR:
2746 if (mode64) {
2747 vassert(0 == (am->Mam.IR.index & 3));
2748 }
2749 p = doAMode_IR(p, mode64 ? 55 : 35, reg, am, mode64);
2750 break;
2751 case Mam_RR:
2752 /* we could handle this case, but we don't expect to ever
2753 need to. */
2754 vassert(0);
2755 break;
2756 default:
2757 vassert(0);
2758 break;
2759 }
2760 } else /* store */ {
2761 switch (am->tag) {
2762 case Mam_IR:
2763 if (mode64) {
2764 vassert(0 == (am->Mam.IR.index & 3));
2765 }
2766 p = doAMode_IR(p, mode64 ? 63 : 43, reg, am, mode64);
2767 break;
2768 case Mam_RR:
2769 /* we could handle this case, but we don't expect to ever
2770 need to. */
2771 vassert(0);
2772 break;
2773 default:
2774 vassert(0);
2775 break;
2776 }
2777 }
2778 return p;
2779 }
2780
2781 /* Generate a 32-bit sized load or store. Simplified version of
2782 do_load_or_store_machine_word above. */
do_load_or_store_word32(UChar * p,Bool isLoad,UInt reg,MIPSAMode * am,Bool mode64)2783 static UChar* do_load_or_store_word32 ( UChar* p, Bool isLoad, UInt reg,
2784 MIPSAMode* am, Bool mode64 )
2785 {
2786 if (isLoad) { /* load */
2787 switch (am->tag) {
2788 case Mam_IR:
2789 if (mode64) {
2790 vassert(0 == (am->Mam.IR.index & 3));
2791 }
2792 p = doAMode_IR(p, 35, reg, am, mode64);
2793 break;
2794 case Mam_RR:
2795 /* we could handle this case, but we don't expect to ever
2796 need to. */
2797 vassert(0);
2798 break;
2799 default:
2800 vassert(0);
2801 break;
2802 }
2803 } else /* store */ {
2804 switch (am->tag) {
2805 case Mam_IR:
2806 if (mode64) {
2807 vassert(0 == (am->Mam.IR.index & 3));
2808 }
2809 p = doAMode_IR(p, 43, reg, am, mode64);
2810 break;
2811 case Mam_RR:
2812 /* we could handle this case, but we don't expect to ever
2813 need to. */
2814 vassert(0);
2815 break;
2816 default:
2817 vassert(0);
2818 break;
2819 }
2820 }
2821 return p;
2822 }
2823
2824 /* Move r_dst to r_src */
mkMoveReg(UChar * p,UInt r_dst,UInt r_src)2825 static UChar *mkMoveReg(UChar * p, UInt r_dst, UInt r_src)
2826 {
2827 vassert(r_dst < 0x20);
2828 vassert(r_src < 0x20);
2829
2830 if (r_dst != r_src) {
2831 /* or r_dst, r_src, r_src */
2832 p = mkFormR(p, 0, r_src, r_src, r_dst, 0, 37);
2833 }
2834 return p;
2835 }
2836
2837 /* Emit an instruction into buf and return the number of bytes used.
2838 Note that buf is not the insn's final place, and therefore it is
2839 imperative to emit position-independent code. If the emitted
2840 instruction was a profiler inc, set *is_profInc to True, else
2841 leave it unchanged. */
emit_MIPSInstr(Bool * is_profInc,UChar * buf,Int nbuf,MIPSInstr * i,Bool mode64,void * disp_cp_chain_me_to_slowEP,void * disp_cp_chain_me_to_fastEP,void * disp_cp_xindir,void * disp_cp_xassisted)2842 Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
2843 UChar* buf, Int nbuf, MIPSInstr* i,
2844 Bool mode64,
2845 void* disp_cp_chain_me_to_slowEP,
2846 void* disp_cp_chain_me_to_fastEP,
2847 void* disp_cp_xindir,
2848 void* disp_cp_xassisted )
2849 {
2850 UChar *p = &buf[0];
2851 UChar *ptmp = p;
2852 vassert(nbuf >= 32);
2853
2854 switch (i->tag) {
2855 case Min_LI:
2856 p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64);
2857 goto done;
2858
2859 case Min_Alu: {
2860 MIPSRH *srcR = i->Min.Alu.srcR;
2861 Bool immR = toBool(srcR->tag == Mrh_Imm);
2862 UInt r_dst = iregNo(i->Min.Alu.dst, mode64);
2863 UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64);
2864 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
2865 mode64);
2866 switch (i->Min.Alu.op) {
2867 /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */
2868 case Malu_ADD:
2869 if (immR) {
2870 vassert(srcR->Mrh.Imm.imm16 != 0x8000);
2871 if (srcR->Mrh.Imm.syned)
2872 /* addi */
2873 p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2874 else
2875 /* addiu */
2876 p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2877 } else {
2878 /* addu */
2879 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 33);
2880 }
2881 break;
2882 case Malu_SUB:
2883 if (immR) {
2884 /* addi , but with negated imm */
2885 vassert(srcR->Mrh.Imm.syned);
2886 vassert(srcR->Mrh.Imm.imm16 != 0x8000);
2887 p = mkFormI(p, 8, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
2888 } else {
2889 /* subu */
2890 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 35);
2891 }
2892 break;
2893 case Malu_AND:
2894 if (immR) {
2895 /* andi */
2896 vassert(!srcR->Mrh.Imm.syned);
2897 p = mkFormI(p, 12, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2898 } else {
2899 /* and */
2900 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 36);
2901 }
2902 break;
2903 case Malu_OR:
2904 if (immR) {
2905 /* ori */
2906 vassert(!srcR->Mrh.Imm.syned);
2907 p = mkFormI(p, 13, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2908 } else {
2909 /* or */
2910 if (r_srcL == 33)
2911 /* MFHI */
2912 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
2913 else if (r_srcL == 34)
2914 /* MFLO */
2915 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
2916 else if (r_dst == 33)
2917 /* MTHI */
2918 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17);
2919 else if (r_dst == 34)
2920 /* MTLO */
2921 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19);
2922 else
2923 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37);
2924 }
2925 break;
2926 case Malu_NOR:
2927 /* nor */
2928 vassert(!immR);
2929 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 39);
2930 break;
2931 case Malu_XOR:
2932 if (immR) {
2933 /* xori */
2934 vassert(!srcR->Mrh.Imm.syned);
2935 p = mkFormI(p, 14, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2936 } else {
2937 /* xor */
2938 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
2939 }
2940 break;
2941 case Malu_DADD:
2942 if (immR) {
2943 vassert(srcR->Mrh.Imm.syned);
2944 vassert(srcR->Mrh.Imm.imm16 != 0x8000);
2945 p = mkFormI(p, 25, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
2946 } else {
2947 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 45);
2948 }
2949 break;
2950 case Malu_DSUB:
2951 if (immR) {
2952 p = mkFormI(p, 25, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
2953 } else {
2954 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 47);
2955 }
2956 break;
2957 case Malu_SLT:
2958 if (immR) {
2959 goto bad;
2960 } else {
2961 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
2962 }
2963 break;
2964
2965 default:
2966 goto bad;
2967 }
2968 goto done;
2969 }
2970
2971 case Min_Shft: {
2972 MIPSRH *srcR = i->Min.Shft.srcR;
2973 Bool sz32 = i->Min.Shft.sz32;
2974 Bool immR = toBool(srcR->tag == Mrh_Imm);
2975 UInt r_dst = iregNo(i->Min.Shft.dst, mode64);
2976 UInt r_srcL = iregNo(i->Min.Shft.srcL, mode64);
2977 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
2978 mode64);
2979 if (!mode64)
2980 vassert(sz32);
2981 switch (i->Min.Shft.op) {
2982 case Mshft_SLL:
2983 if (sz32) {
2984 if (immR) {
2985 UInt n = srcR->Mrh.Imm.imm16;
2986 vassert(n >= 0 && n <= 32);
2987 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0);
2988 } else {
2989 /* shift variable */
2990 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 4);
2991 }
2992 } else {
2993 if (immR) {
2994 UInt n = srcR->Mrh.Imm.imm16;
2995 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
2996 if (n >= 0 && n < 32) {
2997 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 56);
2998 } else {
2999 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 60);
3000 }
3001 } else {
3002 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 20);
3003 }
3004 }
3005 break;
3006
3007 case Mshft_SRL:
3008 if (sz32) {
3009 /* SRL, SRLV */
3010 if (immR) {
3011 UInt n = srcR->Mrh.Imm.imm16;
3012 vassert(n >= 0 && n < 32);
3013 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 2);
3014 } else {
3015 /* shift variable */
3016 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6);
3017 }
3018 } else {
3019 /* DSRL, DSRL32, DSRLV */
3020 if (immR) {
3021 UInt n = srcR->Mrh.Imm.imm16;
3022 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
3023 if (n >= 0 && n < 32) {
3024 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 58);
3025 } else {
3026 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 62);
3027 }
3028 } else {
3029 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 22);
3030 }
3031 }
3032 break;
3033
3034 case Mshft_SRA:
3035 if (sz32) {
3036 /* SRA, SRAV */
3037 if (immR) {
3038 UInt n = srcR->Mrh.Imm.imm16;
3039 vassert(n >= 0 && n < 32);
3040 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 3);
3041 } else {
3042 /* shift variable */
3043 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7);
3044 }
3045 } else {
3046 /* DSRA, DSRA32, DSRAV */
3047 if (immR) {
3048 UInt n = srcR->Mrh.Imm.imm16;
3049 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
3050 if (n >= 0 && n < 32) {
3051 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 59);
3052 } else {
3053 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 63);
3054 }
3055 } else {
3056 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 23);
3057 }
3058 }
3059 break;
3060
3061 default:
3062 goto bad;
3063 }
3064
3065 goto done;
3066 }
3067
3068 case Min_Unary: {
3069 UInt r_dst = iregNo(i->Min.Unary.dst, mode64);
3070 UInt r_src = iregNo(i->Min.Unary.src, mode64);
3071
3072 switch (i->Min.Unary.op) {
3073 /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
3074 case Mun_CLO: /* clo */
3075 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 33);
3076 break;
3077 case Mun_CLZ: /* clz */
3078 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 32);
3079 break;
3080 case Mun_NOP: /* nop (sll r0,r0,0) */
3081 p = mkFormR(p, 0, 0, 0, 0, 0, 0);
3082 break;
3083 case Mun_DCLO: /* clo */
3084 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 37);
3085 break;
3086 case Mun_DCLZ: /* clz */
3087 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 36);
3088 break;
3089 }
3090 goto done;
3091 }
3092
3093 case Min_Cmp: {
3094 UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64);
3095 UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64);
3096 UInt r_dst = iregNo(i->Min.Cmp.dst, mode64);
3097
3098 switch (i->Min.Cmp.cond) {
3099 case MIPScc_EQ:
3100 /* xor r_dst, r_srcL, r_srcR
3101 sltiu r_dst, r_dst, 1 */
3102 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
3103 p = mkFormI(p, 11, r_dst, r_dst, 1);
3104 break;
3105 case MIPScc_NE:
3106 /* xor r_dst, r_srcL, r_srcR
3107 sltu r_dst, zero, r_dst */
3108 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
3109 p = mkFormR(p, 0, 0, r_dst, r_dst, 0, 43);
3110 break;
3111 case MIPScc_LT:
3112 /* slt r_dst, r_srcL, r_srcR */
3113 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
3114 break;
3115 case MIPScc_LO:
3116 /* sltu r_dst, r_srcL, r_srcR */
3117 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43);
3118 break;
3119 case MIPScc_LE:
3120 /* slt r_dst, r_srcR, r_srcL
3121 xori r_dst, r_dst, 1 */
3122 p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 42);
3123 p = mkFormI(p, 14, r_dst, r_dst, 1);
3124 break;
3125 case MIPScc_LS:
3126 /* sltu r_dst, rsrcR, r_srcL
3127 xori r_dsr, r_dst, 1 */
3128 p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 43);
3129 p = mkFormI(p, 14, r_dst, r_dst, 1);
3130 break;
3131 default:
3132 goto bad;
3133 }
3134 goto done;
3135 }
3136
3137 case Min_Mul: {
3138 Bool syned = i->Min.Mul.syned;
3139 Bool widening = i->Min.Mul.widening;
3140 Bool sz32 = i->Min.Mul.sz32;
3141 UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64);
3142 UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64);
3143 UInt r_dst = iregNo(i->Min.Mul.dst, mode64);
3144 if (widening) {
3145 if (sz32) {
3146 if (syned)
3147 /* mult */
3148 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 24);
3149 else
3150 /* multu */
3151 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 25);
3152 } else {
3153 if (syned) /* DMULT r_dst,r_srcL,r_srcR */
3154 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 28);
3155 else /* DMULTU r_dst,r_srcL,r_srcR */
3156 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 29);
3157 }
3158 } else {
3159 if (sz32)
3160 /* mul */
3161 p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2);
3162 else if (mode64 && !sz32)
3163 p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2);
3164 else
3165 goto bad;
3166 }
3167 goto done;
3168 }
3169
3170 case Min_Macc: {
3171 Bool syned = i->Min.Macc.syned;
3172 UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64);
3173 UInt r_srcR = iregNo(i->Min.Macc.srcR, mode64);
3174
3175 if (syned) {
3176 switch (i->Min.Macc.op) {
3177 case Macc_ADD:
3178 /* madd */
3179 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0);
3180 break;
3181 case Macc_SUB:
3182 /* msub */
3183 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
3184 4);
3185 break;
3186 default:
3187 goto bad;
3188 }
3189 } else {
3190 switch (i->Min.Macc.op) {
3191 case Macc_ADD:
3192 /* maddu */
3193 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
3194 1);
3195 break;
3196 case Macc_SUB:
3197 /* msubu */
3198 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
3199 5);
3200 break;
3201 default:
3202 goto bad;
3203 }
3204 }
3205
3206 goto done;
3207 }
3208
3209 case Min_Div: {
3210 Bool syned = i->Min.Div.syned;
3211 Bool sz32 = i->Min.Div.sz32;
3212 UInt r_srcL = iregNo(i->Min.Div.srcL, mode64);
3213 UInt r_srcR = iregNo(i->Min.Div.srcR, mode64);
3214 if (sz32) {
3215 if (syned) {
3216 /* div */
3217 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 26);
3218 } else
3219 /* divu */
3220 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 27);
3221 goto done;
3222 } else {
3223 if (syned) {
3224 /* ddiv */
3225 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 30);
3226 } else
3227 /* ddivu */
3228 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 31);
3229 goto done;
3230 }
3231 }
3232
3233 case Min_Mthi: {
3234 UInt r_src = iregNo(i->Min.MtHL.src, mode64);
3235 p = mkFormR(p, 0, r_src, 0, 0, 0, 17);
3236 goto done;
3237 }
3238
3239 case Min_Mtlo: {
3240 UInt r_src = iregNo(i->Min.MtHL.src, mode64);
3241 p = mkFormR(p, 0, r_src, 0, 0, 0, 19);
3242 goto done;
3243 }
3244
3245 case Min_Mfhi: {
3246 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
3247 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
3248 goto done;
3249 }
3250
3251 case Min_Mflo: {
3252 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
3253 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
3254 goto done;
3255 }
3256
3257 case Min_MtFCSR: {
3258 UInt r_src = iregNo(i->Min.MtFCSR.src, mode64);
3259 /* ctc1 */
3260 p = mkFormR(p, 17, 6, r_src, 31, 0, 0);
3261 goto done;
3262 }
3263
3264 case Min_MfFCSR: {
3265 UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64);
3266 /* cfc1 */
3267 p = mkFormR(p, 17, 2, r_dst, 31, 0, 0);
3268 goto done;
3269 }
3270
3271 case Min_Call: {
3272 if (i->Min.Call.cond != MIPScc_AL
3273 && i->Min.Call.rloc.pri != RLPri_None) {
3274 /* The call might not happen (it isn't unconditional) and
3275 it returns a result. In this case we will need to
3276 generate a control flow diamond to put 0x555..555 in
3277 the return register(s) in the case where the call
3278 doesn't happen. If this ever becomes necessary, maybe
3279 copy code from the ARM equivalent. Until that day,
3280 just give up. */
3281 goto bad;
3282 }
3283 MIPSCondCode cond = i->Min.Call.cond;
3284 UInt r_dst = 25; /* using %r25 as address temporary -
3285 see getRegUsage_MIPSInstr */
3286
3287 /* jump over the following insns if condition does not hold */
3288 if (cond != MIPScc_AL) {
3289 /* jmp fwds if !condition */
3290 /* don't know how many bytes to jump over yet...
3291 make space for a jump instruction + nop!!! and fill in later. */
3292 ptmp = p; /* fill in this bit later */
3293 p += 8; /* p += 8 */
3294 }
3295
3296 if (!mode64) {
3297 /* addiu $29, $29, -16 */
3298 p = mkFormI(p, 9, 29, 29, 0xFFF0);
3299 }
3300
3301 /* load target to r_dst; p += 4|8 */
3302 p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64);
3303
3304 /* jalr r_dst */
3305 p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); /* p += 4 */
3306 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
3307
3308 if (!mode64) {
3309 /* addiu $29, $29, 16 */
3310 p = mkFormI(p, 9, 29, 29, 0x0010);
3311 }
3312
3313 /* Fix up the conditional jump, if there was one. */
3314 if (cond != MIPScc_AL) {
3315 UInt r_src = iregNo(i->Min.Call.src, mode64);
3316 Int delta = p - ptmp;
3317
3318 vassert(delta >= 20 && delta <= 32);
3319 /* blez r_src, delta/4-1
3320 nop */
3321 ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1);
3322 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
3323 }
3324 goto done;
3325 }
3326
3327 case Min_XDirect: {
3328 /* NB: what goes on here has to be very closely coordinated
3329 with the chainXDirect_MIPS and unchainXDirect_MIPS below. */
3330 /* We're generating chain-me requests here, so we need to be
3331 sure this is actually allowed -- no-redir translations
3332 can't use chain-me's. Hence: */
3333 vassert(disp_cp_chain_me_to_slowEP != NULL);
3334 vassert(disp_cp_chain_me_to_fastEP != NULL);
3335
3336 /* Use ptmp for backpatching conditional jumps. */
3337 ptmp = NULL;
3338
3339 /* First off, if this is conditional, create a conditional
3340 jump over the rest of it. Or at least, leave a space for
3341 it that we will shortly fill in. */
3342 if (i->Min.XDirect.cond != MIPScc_AL) {
3343 vassert(i->Min.XDirect.cond != MIPScc_NV);
3344 ptmp = p;
3345 p += 12;
3346 }
3347
3348 /* Update the guest PC. */
3349 /* move r9, dstGA */
3350 /* sw/sd r9, amPC */
3351 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)i->Min.XDirect.dstGA,
3352 mode64);
3353 p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9,
3354 i->Min.XDirect.amPC, mode64);
3355
3356 /* --- FIRST PATCHABLE BYTE follows --- */
3357 /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
3358 calling to) backs up the return address, so as to find the
3359 address of the first patchable byte. So: don't change the
3360 number of instructions (3) below. */
3361 /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
3362 /* jr r9 */
3363 void* disp_cp_chain_me
3364 = i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
3365 : disp_cp_chain_me_to_slowEP;
3366 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
3367 Ptr_to_ULong(disp_cp_chain_me), mode64);
3368 /* jalr $9 */
3369 /* nop */
3370 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
3371 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
3372 /* --- END of PATCHABLE BYTES --- */
3373
3374 /* Fix up the conditional jump, if there was one. */
3375 if (i->Min.XDirect.cond != MIPScc_AL) {
3376 Int delta = p - ptmp;
3377 delta = delta / 4 - 3;
3378 vassert(delta > 0 && delta < 40);
3379
3380 /* lw $9, COND_OFFSET(GuestSP)
3381 beq $9, $0, 2
3382 nop */
3383 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
3384 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
3385 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
3386 }
3387 goto done;
3388 }
3389
3390 case Min_XIndir: {
3391 /* We're generating transfers that could lead indirectly to a
3392 chain-me, so we need to be sure this is actually allowed --
3393 no-redir translations are not allowed to reach normal
3394 translations without going through the scheduler. That means
3395 no XDirects or XIndirs out from no-redir translations.
3396 Hence: */
3397 vassert(disp_cp_xindir != NULL);
3398
3399 /* Use ptmp for backpatching conditional jumps. */
3400 ptmp = NULL;
3401
3402 /* First off, if this is conditional, create a conditional
3403 jump over the rest of it. */
3404 if (i->Min.XIndir.cond != MIPScc_AL) {
3405 vassert(i->Min.XIndir.cond != MIPScc_NV);
3406 ptmp = p;
3407 p += 12;
3408 }
3409
3410 /* Update the guest PC. */
3411 /* sw/sd r-dstGA, amPC */
3412 p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
3413 iregNo(i->Min.XIndir.dstGA, mode64),
3414 i->Min.XIndir.amPC, mode64);
3415
3416 /* move r9, VG_(disp_cp_xindir) */
3417 /* jalr r9 */
3418 /* nop */
3419 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
3420 Ptr_to_ULong(disp_cp_xindir), mode64);
3421 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
3422 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
3423
3424 /* Fix up the conditional jump, if there was one. */
3425 if (i->Min.XIndir.cond != MIPScc_AL) {
3426 Int delta = p - ptmp;
3427 delta = delta / 4 - 3;
3428 vassert(delta > 0 && delta < 40);
3429
3430 /* lw $9, COND_OFFSET($GuestSP)
3431 beq $9, $0, 2
3432 nop */
3433 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
3434 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
3435 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
3436 }
3437 goto done;
3438 }
3439
3440 case Min_XAssisted: {
3441 /* First off, if this is conditional, create a conditional jump
3442 over the rest of it. Or at least, leave a space for it that
3443 we will shortly fill in. */
3444 ptmp = NULL;
3445 if (i->Min.XAssisted.cond != MIPScc_AL) {
3446 vassert(i->Min.XAssisted.cond != MIPScc_NV);
3447 ptmp = p;
3448 p += 12;
3449 }
3450
3451 /* Update the guest PC. */
3452 /* sw/sd r-dstGA, amPC */
3453 p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
3454 iregNo(i->Min.XIndir.dstGA, mode64),
3455 i->Min.XIndir.amPC, mode64);
3456
3457 /* imm32/64 r31, $magic_number */
3458 UInt trcval = 0;
3459 switch (i->Min.XAssisted.jk) {
3460 case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break;
3461 case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
3462 /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break;
3463 case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break; */
3464 case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break;
3465 case Ijk_EmFail: trcval = VEX_TRC_JMP_EMFAIL; break;
3466 /* case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; */
3467 case Ijk_NoDecode: trcval = VEX_TRC_JMP_NODECODE; break;
3468 case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
3469 case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break;
3470 case Ijk_SigILL: trcval = VEX_TRC_JMP_SIGILL; break;
3471 case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break;
3472 /* case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; */
3473 case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break;
3474 case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break;
3475 case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break;
3476 case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break;
3477 /* We don't expect to see the following being assisted.
3478 case Ijk_Ret:
3479 case Ijk_Call:
3480 fallthrough */
3481 default:
3482 ppIRJumpKind(i->Min.XAssisted.jk);
3483 vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind");
3484 }
3485 vassert(trcval != 0);
3486 p = mkLoadImm_EXACTLY2or6(p, /*r*/ GuestSP, trcval, mode64);
3487
3488 /* move r9, VG_(disp_cp_xassisted) */
3489 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
3490 (ULong)Ptr_to_ULong(disp_cp_xassisted), mode64);
3491 /* jalr $9
3492 nop */
3493 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
3494 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
3495
3496 /* Fix up the conditional jump, if there was one. */
3497 if (i->Min.XAssisted.cond != MIPScc_AL) {
3498 Int delta = p - ptmp;
3499 delta = delta / 4 - 3;
3500 vassert(delta > 0 && delta < 40);
3501
3502 /* lw $9, COND_OFFSET($GuestSP)
3503 beq $9, $0, 2
3504 nop */
3505 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
3506 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
3507 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
3508 }
3509 goto done;
3510 }
3511
3512 case Min_Load: {
3513 MIPSAMode *am_addr = i->Min.Load.src;
3514 if (am_addr->tag == Mam_IR) {
3515 UInt r_dst = iregNo(i->Min.Load.dst, mode64);
3516 UInt opc, sz = i->Min.Load.sz;
3517 if (mode64 && (sz == 4 || sz == 8)) {
3518 /* should be guaranteed to us by iselWordExpr_AMode */
3519 vassert(0 == (am_addr->Mam.IR.index & 3));
3520 }
3521 switch (sz) {
3522 case 1:
3523 opc = 32;
3524 break;
3525 case 2:
3526 opc = 33;
3527 break;
3528 case 4:
3529 opc = 35;
3530 break;
3531 case 8:
3532 opc = 55;
3533 vassert(mode64);
3534 break;
3535 default:
3536 goto bad;
3537 }
3538
3539 p = doAMode_IR(p, opc, r_dst, am_addr, mode64);
3540 goto done;
3541 } else if (am_addr->tag == Mam_RR) {
3542 UInt r_dst = iregNo(i->Min.Load.dst, mode64);
3543 UInt opc, sz = i->Min.Load.sz;
3544
3545 switch (sz) {
3546 case 1:
3547 opc = 32;
3548 break;
3549 case 2:
3550 opc = 33;
3551 break;
3552 case 4:
3553 opc = 35;
3554 break;
3555 case 8:
3556 opc = 55;
3557 vassert(mode64);
3558 break;
3559 default:
3560 goto bad;
3561 }
3562
3563 p = doAMode_RR(p, opc, r_dst, am_addr, mode64);
3564 goto done;
3565 }
3566 break;
3567 }
3568
3569 case Min_Store: {
3570 MIPSAMode *am_addr = i->Min.Store.dst;
3571 if (am_addr->tag == Mam_IR) {
3572 UInt r_src = iregNo(i->Min.Store.src, mode64);
3573 UInt opc, sz = i->Min.Store.sz;
3574 if (mode64 && (sz == 4 || sz == 8)) {
3575 /* should be guaranteed to us by iselWordExpr_AMode */
3576 vassert(0 == (am_addr->Mam.IR.index & 3));
3577 }
3578 switch (sz) {
3579 case 1:
3580 opc = 40;
3581 break;
3582 case 2:
3583 opc = 41;
3584 break;
3585 case 4:
3586 opc = 43;
3587 break;
3588 case 8:
3589 vassert(mode64);
3590 opc = 63;
3591 break;
3592 default:
3593 goto bad;
3594 }
3595
3596 p = doAMode_IR(p, opc, r_src, am_addr, mode64);
3597 goto done;
3598 } else if (am_addr->tag == Mam_RR) {
3599 UInt r_src = iregNo(i->Min.Store.src, mode64);
3600 UInt opc, sz = i->Min.Store.sz;
3601
3602 switch (sz) {
3603 case 1:
3604 opc = 40;
3605 break;
3606 case 2:
3607 opc = 41;
3608 break;
3609 case 4:
3610 opc = 43;
3611 break;
3612 case 8:
3613 vassert(mode64);
3614 opc = 63;
3615 break;
3616 default:
3617 goto bad;
3618 }
3619
3620 p = doAMode_RR(p, opc, r_src, am_addr, mode64);
3621 goto done;
3622 }
3623 break;
3624 }
3625 case Min_LoadL: {
3626 MIPSAMode *am_addr = i->Min.LoadL.src;
3627 UInt r_src = iregNo(am_addr->Mam.IR.base, mode64);
3628 UInt idx = am_addr->Mam.IR.index;
3629 UInt r_dst = iregNo(i->Min.LoadL.dst, mode64);
3630
3631 if (i->Min.LoadL.sz == 4)
3632 p = mkFormI(p, 0x30, r_src, r_dst, idx);
3633 else
3634 p = mkFormI(p, 0x34, r_src, r_dst, idx);
3635 goto done;
3636 }
3637 case Min_StoreC: {
3638 MIPSAMode *am_addr = i->Min.StoreC.dst;
3639 UInt r_src = iregNo(i->Min.StoreC.src, mode64);
3640 UInt idx = am_addr->Mam.IR.index;
3641 UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64);
3642
3643 if (i->Min.StoreC.sz == 4)
3644 p = mkFormI(p, 0x38, r_dst, r_src, idx);
3645 else
3646 p = mkFormI(p, 0x3C, r_dst, r_src, idx);
3647 goto done;
3648 }
3649 case Min_RdWrLR: {
3650 UInt reg = iregNo(i->Min.RdWrLR.gpr, mode64);
3651 Bool wrLR = i->Min.RdWrLR.wrLR;
3652 if (wrLR)
3653 p = mkMoveReg(p, 31, reg);
3654 else
3655 p = mkMoveReg(p, reg, 31);
3656 goto done;
3657 }
3658
3659 /* Floating point */
3660 case Min_FpLdSt: {
3661 MIPSAMode *am_addr = i->Min.FpLdSt.addr;
3662 UChar sz = i->Min.FpLdSt.sz;
3663 vassert(sz == 4 || sz == 8);
3664 if (sz == 4) {
3665 UInt f_reg = fregNo(i->Min.FpLdSt.reg, mode64);
3666 if (i->Min.FpLdSt.isLoad) {
3667 if (am_addr->tag == Mam_IR)
3668 p = doAMode_IR(p, 0x31, f_reg, am_addr, mode64);
3669 else if (am_addr->tag == Mam_RR)
3670 p = doAMode_RR(p, 0x31, f_reg, am_addr, mode64);
3671 } else {
3672 if (am_addr->tag == Mam_IR)
3673 p = doAMode_IR(p, 0x39, f_reg, am_addr, mode64);
3674 else if (am_addr->tag == Mam_RR)
3675 p = doAMode_RR(p, 0x39, f_reg, am_addr, mode64);
3676 }
3677 } else if (sz == 8) {
3678 UInt f_reg = dregNo(i->Min.FpLdSt.reg);
3679 if (i->Min.FpLdSt.isLoad) {
3680 if (am_addr->tag == Mam_IR) {
3681 p = doAMode_IR(p, 0x35, f_reg, am_addr, mode64);
3682 } else if (am_addr->tag == Mam_RR) {
3683 p = doAMode_RR(p, 0x35, f_reg, am_addr, mode64);
3684 }
3685 } else {
3686 if (am_addr->tag == Mam_IR) {
3687 p = doAMode_IR(p, 0x3d, f_reg, am_addr, mode64);
3688 } else if (am_addr->tag == Mam_RR) {
3689 p = doAMode_RR(p, 0x3d, f_reg, am_addr, mode64);
3690 }
3691 }
3692 }
3693 goto done;
3694 }
3695
3696 case Min_FpUnary: {
3697 switch (i->Min.FpUnary.op) {
3698 case Mfp_MOVS: { /* FP move */
3699 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
3700 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
3701 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6);
3702 break;
3703 }
3704 case Mfp_MOVD: { /* FP move */
3705 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
3706 UInt fr_src = dregNo(i->Min.FpUnary.src);
3707 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6);
3708 break;
3709 }
3710 case Mfp_ABSS: { /* ABS.S */
3711 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
3712 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
3713 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5);
3714 break;
3715 }
3716 case Mfp_ABSD: { /* ABS.D */
3717 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
3718 UInt fr_src = dregNo(i->Min.FpUnary.src);
3719 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5);
3720 break;
3721 }
3722 case Mfp_NEGS: { /* NEG.S */
3723 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
3724 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
3725 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7);
3726 break;
3727 }
3728 case Mfp_NEGD: { /* NEG.D */
3729 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
3730 UInt fr_src = dregNo(i->Min.FpUnary.src);
3731 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7);
3732 break;
3733 }
3734 case Mfp_SQRTS: { /* SQRT.S */
3735 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
3736 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
3737 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04);
3738 break;
3739 }
3740 case Mfp_SQRTD: { /* SQRT.D */
3741 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
3742 UInt fr_src = dregNo(i->Min.FpUnary.src);
3743 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04);
3744 break;
3745 }
3746 default:
3747 goto bad;
3748 }
3749 goto done;
3750 }
3751
3752 case Min_FpBinary: {
3753 switch (i->Min.FpBinary.op) {
3754 case Mfp_ADDS: {
3755 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
3756 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
3757 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
3758 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 0);
3759 break;
3760 }
3761 case Mfp_SUBS: {
3762 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
3763 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
3764 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
3765 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 1);
3766 break;
3767 }
3768 case Mfp_MULS: {
3769 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
3770 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
3771 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
3772 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 2);
3773 break;
3774 }
3775 case Mfp_DIVS: {
3776 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
3777 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
3778 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
3779 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 3);
3780 break;
3781 }
3782 case Mfp_ADDD: {
3783 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
3784 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
3785 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
3786 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 0);
3787 break;
3788 }
3789 case Mfp_SUBD: {
3790 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
3791 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
3792 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
3793 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 1);
3794 break;
3795 }
3796 case Mfp_MULD: {
3797 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
3798 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
3799 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
3800 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 2);
3801 break;
3802 }
3803 case Mfp_DIVD: {
3804 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
3805 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
3806 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
3807 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 3);
3808 break;
3809 }
3810 default:
3811 goto bad;
3812 }
3813 goto done;
3814 }
3815
3816 case Min_FpTernary: {
3817 switch (i->Min.FpTernary.op) {
3818 case Mfp_MADDS: {
3819 UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
3820 UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
3821 UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
3822 UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
3823 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20);
3824 break;
3825 }
3826 case Mfp_MADDD: {
3827 UInt fr_dst = dregNo(i->Min.FpTernary.dst);
3828 UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
3829 UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
3830 UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
3831 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21);
3832 break;
3833 }
3834 case Mfp_MSUBS: {
3835 UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
3836 UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
3837 UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
3838 UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
3839 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28);
3840 break;
3841 }
3842 case Mfp_MSUBD: {
3843 UInt fr_dst = dregNo(i->Min.FpTernary.dst);
3844 UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
3845 UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
3846 UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
3847 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29);
3848 break;
3849 }
3850 default:
3851 goto bad;
3852 }
3853 goto done;
3854 }
3855
3856 case Min_FpConvert: {
3857 switch (i->Min.FpConvert.op) {
3858 UInt fr_dst, fr_src;
3859 case Mfp_CVTSD:
3860 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3861 fr_src = dregNo(i->Min.FpConvert.src);
3862 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x20);
3863 break;
3864 case Mfp_CVTSW:
3865 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3866 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3867 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x20);
3868 break;
3869 case Mfp_CVTWD:
3870 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3871 fr_src = dregNo(i->Min.FpConvert.src);
3872 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x24);
3873 break;
3874 case Mfp_CVTWS:
3875 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3876 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3877 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x24);
3878 break;
3879 case Mfp_CVTDW:
3880 fr_dst = dregNo(i->Min.FpConvert.dst);
3881 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3882 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21);
3883 break;
3884 case Mfp_CVTDL:
3885 fr_dst = dregNo(i->Min.FpConvert.dst);
3886 fr_src = dregNo(i->Min.FpConvert.src);
3887 p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x21);
3888 break;
3889 case Mfp_CVTDS:
3890 fr_dst = dregNo(i->Min.FpConvert.dst);
3891 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3892 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21);
3893 break;
3894 case Mfp_CVTSL:
3895 fr_dst = dregNo(i->Min.FpConvert.dst);
3896 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3897 p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x20);
3898 break;
3899 case Mfp_CVTLS:
3900 if (mode64) {
3901 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3902 fr_src = dregNo(i->Min.FpConvert.src);
3903 } else {
3904 fr_dst = dregNo(i->Min.FpConvert.dst);
3905 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3906 }
3907 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x25);
3908 break;
3909 case Mfp_CVTLD:
3910 fr_dst = dregNo(i->Min.FpConvert.dst);
3911 fr_src = dregNo(i->Min.FpConvert.src);
3912 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x25);
3913 break;
3914 case Mfp_TRUWS:
3915 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3916 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3917 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0D);
3918 break;
3919 case Mfp_TRUWD:
3920 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3921 fr_src = dregNo(i->Min.FpConvert.src);
3922 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0D);
3923 break;
3924 case Mfp_TRULS:
3925 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3926 fr_src = dregNo(i->Min.FpConvert.src);
3927 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x09);
3928 break;
3929 case Mfp_TRULD:
3930 fr_dst = dregNo(i->Min.FpConvert.dst);
3931 fr_src = dregNo(i->Min.FpConvert.src);
3932 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x09);
3933 break;
3934 case Mfp_CEILWS:
3935 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3936 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3937 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0E);
3938 break;
3939 case Mfp_CEILWD:
3940 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3941 fr_src = dregNo(i->Min.FpConvert.src);
3942 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0E);
3943 break;
3944 case Mfp_CEILLS:
3945 fr_dst = dregNo(i->Min.FpConvert.dst);
3946 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3947 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0A);
3948 break;
3949 case Mfp_CEILLD:
3950 fr_dst = dregNo(i->Min.FpConvert.dst);
3951 fr_src = dregNo(i->Min.FpConvert.src);
3952 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0A);
3953 break;
3954 case Mfp_ROUNDWS:
3955 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3956 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3957 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0C);
3958 break;
3959 case Mfp_ROUNDWD:
3960 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3961 fr_src = dregNo(i->Min.FpConvert.src);
3962 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0C);
3963 break;
3964 case Mfp_ROUNDLD:
3965 fr_dst = dregNo(i->Min.FpConvert.dst);
3966 fr_src = dregNo(i->Min.FpConvert.src);
3967 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x08);
3968 break;
3969 case Mfp_FLOORWS:
3970 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3971 fr_src = fregNo(i->Min.FpConvert.src, mode64);
3972 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0F);
3973 break;
3974 case Mfp_FLOORWD:
3975 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
3976 fr_src = dregNo(i->Min.FpConvert.src);
3977 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0F);
3978 break;
3979 case Mfp_FLOORLD:
3980 fr_dst = dregNo(i->Min.FpConvert.dst);
3981 fr_src = dregNo(i->Min.FpConvert.src);
3982 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0B);
3983 break;
3984
3985 default:
3986 goto bad;
3987 }
3988 goto done;
3989 }
3990
3991 case Min_FpCompare: {
3992 UInt r_dst = iregNo(i->Min.FpCompare.dst, mode64);
3993 UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
3994 UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
3995
3996 UInt op;
3997 switch (i->Min.FpConvert.op) {
3998 case Mfp_CMP_UN:
3999 op = 1;
4000 break;
4001 case Mfp_CMP_EQ:
4002 op = 2;
4003 break;
4004 case Mfp_CMP_LT:
4005 op = 12;
4006 break;
4007 case Mfp_CMP_NGT:
4008 op = 15;
4009 break;
4010 default:
4011 goto bad;
4012 }
4013 /* c.cond.d fr_srcL, fr_srcR
4014 cfc1 r_dst, $31
4015 srl r_dst, r_dst, 23
4016 andi r_dst, r_dst, 1 */
4017 p = mkFormR(p, 0x11, 0x11, fr_srcL, fr_srcR, 0, op + 48);
4018 p = mkFormR(p, 0x11, 0x2, r_dst, 31, 0, 0);
4019 p = mkFormS(p, 0, r_dst, 0, r_dst, 23, 2);
4020 p = mkFormI(p, 12, r_dst, r_dst, 1);
4021 goto done;
4022 }
4023
4024 case Min_FpGpMove: {
4025 switch (i->Min.FpGpMove.op) {
4026 UInt rt, fs;
4027 case MFpGpMove_mfc1: {
4028 rt = iregNo(i->Min.FpGpMove.dst, mode64);
4029 fs = fregNo(i->Min.FpGpMove.src, mode64);
4030 p = mkFormR(p, 0x11, 0x0, rt, fs, 0x0, 0x0);
4031 break;
4032 }
4033 case MFpGpMove_dmfc1: {
4034 vassert(mode64);
4035 rt = iregNo(i->Min.FpGpMove.dst, mode64);
4036 fs = fregNo(i->Min.FpGpMove.src, mode64);
4037 p = mkFormR(p, 0x11, 0x1, rt, fs, 0x0, 0x0);
4038 break;
4039 }
4040 case MFpGpMove_mtc1: {
4041 rt = iregNo(i->Min.FpGpMove.src, mode64);
4042 fs = fregNo(i->Min.FpGpMove.dst, mode64);
4043 p = mkFormR(p, 0x11, 0x4, rt, fs, 0x0, 0x0);
4044 break;
4045 }
4046 case MFpGpMove_dmtc1: {
4047 vassert(mode64);
4048 rt = iregNo(i->Min.FpGpMove.src, mode64);
4049 fs = fregNo(i->Min.FpGpMove.dst, mode64);
4050 p = mkFormR(p, 0x11, 0x5, rt, fs, 0x0, 0x0);
4051 break;
4052 }
4053 default:
4054 goto bad;
4055 }
4056 goto done;
4057 }
4058
4059 case Min_MoveCond: {
4060 switch (i->Min.MoveCond.op) {
4061 UInt d, s, t;
4062 case MFpMoveCond_movns: {
4063 d = fregNo(i->Min.MoveCond.dst, mode64);
4064 s = fregNo(i->Min.MoveCond.src, mode64);
4065 t = iregNo(i->Min.MoveCond.cond, mode64);
4066 p = mkFormR(p, 0x11, 0x10, t, s, d, 0x13);
4067 break;
4068 }
4069 case MFpMoveCond_movnd: {
4070 d = dregNo(i->Min.MoveCond.dst);
4071 s = dregNo(i->Min.MoveCond.src);
4072 t = iregNo(i->Min.MoveCond.cond, mode64);
4073 p = mkFormR(p, 0x11, 0x11, t, s, d, 0x13);
4074 break;
4075 }
4076 case MMoveCond_movn: {
4077 d = iregNo(i->Min.MoveCond.dst, mode64);
4078 s = iregNo(i->Min.MoveCond.src, mode64);
4079 t = iregNo(i->Min.MoveCond.cond, mode64);
4080 p = mkFormR(p, 0, s, t, d, 0, 0xb);
4081 break;
4082 }
4083 default:
4084 goto bad;
4085 }
4086 goto done;
4087 }
4088
4089 case Min_EvCheck: {
4090 /* This requires a 32-bit dec/test in 32 mode. */
4091 /* We generate:
4092 lw r9, amCounter
4093 addiu r9, r9, -1
4094 sw r9, amCounter
4095 bgez r9, nofail
4096 lw r9, amFailAddr
4097 jalr r9
4098 nop
4099 nofail:
4100 */
4101 UChar* p0 = p;
4102 /* lw r9, amCounter */
4103 p = do_load_or_store_word32(p, True /*isLoad*/ , /*r*/ 9,
4104 i->Min.EvCheck.amCounter, mode64);
4105 /* addiu r9,r9,-1 */
4106 p = mkFormI(p, 9, 9, 9, 0xFFFF);
4107 /* sw r30, amCounter */
4108 p = do_load_or_store_word32(p, False /*!isLoad*/ , /*r*/ 9,
4109 i->Min.EvCheck.amCounter, mode64);
4110 /* bgez t9, nofail */
4111 p = mkFormI(p, 1, 9, 1, 3);
4112 /* lw/ld r9, amFailAddr */
4113 p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9,
4114 i->Min.EvCheck.amFailAddr, mode64);
4115 /* jalr $9 */
4116 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
4117 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
4118 /* nofail: */
4119
4120 /* Crosscheck */
4121 vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
4122 goto done;
4123 }
4124
4125 case Min_ProfInc: {
4126 /* Generate a code template to increment a memory location whose
4127 address will be known later as an immediate value. This code
4128 template will be patched once the memory location is known.
4129 For now we do this with address == 0x65556555. */
4130 if (mode64) {
4131 /* 64-bit:
4132 move r9, 0x6555655565556555ULL
4133 ld r8, 0(r9)
4134 daddiu r8, r8, 1
4135 sd r8, 0(r9) */
4136
4137 /* move r9, 0x6555655565556555ULL */
4138 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x6555655565556555ULL,
4139 True /*mode64*/);
4140 /* ld r8, 0(r9) */
4141 p = mkFormI(p, 55, 9, 8, 0);
4142
4143 /* daddiu r8, r8, 1 */
4144 p = mkFormI(p, 25, 8, 8, 1);
4145
4146 /* sd r8, 0(r9) */
4147 p = mkFormI(p, 63, 9, 8, 0);
4148 } else {
4149 /* 32-bit:
4150 move r9, 0x65556555
4151 lw r8, 0(r9)
4152 addiu r8, r8, 1 # add least significant word
4153 sw r8, 0(r9)
4154 sltiu r1, r8, 1 # set carry-in bit
4155 lw r8, 4(r9)
4156 addu r8, r8, r1
4157 sw r8, 4(r9) */
4158
4159 /* move r9, 0x65556555 */
4160 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x65556555ULL,
4161 False /*!mode64*/);
4162 /* lw r8, 0(r9) */
4163 p = mkFormI(p, 35, 9, 8, 0);
4164
4165 /* addiu r8, r8, 1 # add least significant word */
4166 p = mkFormI(p, 9, 8, 8, 1);
4167
4168 /* sw r8, 0(r9) */
4169 p = mkFormI(p, 43, 9, 8, 0);
4170
4171 /* sltiu r1, r8, 1 # set carry-in bit */
4172 p = mkFormI(p, 11, 8, 1, 1);
4173
4174 /* lw r8, 4(r9) */
4175 p = mkFormI(p, 35, 9, 8, 4);
4176
4177 /* addu r8, r8, r1 */
4178 p = mkFormR(p, 0, 8, 1, 8, 0, 33);
4179
4180 /* sw r8, 4(r9) */
4181 p = mkFormI(p, 43, 9, 8, 4);
4182
4183 }
4184 /* Tell the caller .. */
4185 vassert(!(*is_profInc));
4186 *is_profInc = True;
4187 goto done;
4188 }
4189
4190 default:
4191 goto bad;
4192
4193 }
4194
4195 bad:
4196 vex_printf("\n=> ");
4197 ppMIPSInstr(i, mode64);
4198 vpanic("emit_MIPSInstr");
4199 /* NOTREACHED */ done:
4200 vassert(p - &buf[0] <= 128);
4201 return p - &buf[0];
4202 }
4203
4204 /* How big is an event check? See case for Min_EvCheck in
4205 emit_MIPSInstr just above. That crosschecks what this returns, so
4206 we can tell if we're inconsistent. */
evCheckSzB_MIPS(void)4207 Int evCheckSzB_MIPS ( void )
4208 {
4209 UInt kInstrSize = 4;
4210 return 7*kInstrSize;
4211 }
4212
4213 /* NB: what goes on here has to be very closely coordinated with the
4214 emitInstr case for XDirect, above. */
chainXDirect_MIPS(void * place_to_chain,void * disp_cp_chain_me_EXPECTED,void * place_to_jump_to,Bool mode64)4215 VexInvalRange chainXDirect_MIPS ( void* place_to_chain,
4216 void* disp_cp_chain_me_EXPECTED,
4217 void* place_to_jump_to,
4218 Bool mode64 )
4219 {
4220 /* What we're expecting to see is:
4221 move r9, disp_cp_chain_me_to_EXPECTED
4222 jalr r9
4223 nop
4224 viz
4225 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
4226 0x120F809 # jalr r9
4227 0x00000000 # nop
4228 */
4229 UChar* p = (UChar*)place_to_chain;
4230 vassert(0 == (3 & (HWord)p));
4231 vassert(isLoadImm_EXACTLY2or6(p, /*r*/9,
4232 (UInt)Ptr_to_ULong(disp_cp_chain_me_EXPECTED),
4233 mode64));
4234 vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
4235 vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
4236 /* And what we want to change it to is either:
4237 move r9, place_to_jump_to
4238 jalr r9
4239 nop
4240 viz
4241 <8 bytes generated by mkLoadImm_EXACTLY2or6>
4242 0x120F809 # jalr r9
4243 0x00000000 # nop
4244
4245 The replacement has the same length as the original.
4246 */
4247
4248 p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
4249 Ptr_to_ULong(place_to_jump_to), mode64);
4250 p = emit32(p, 0x120F809);
4251 p = emit32(p, 0x00000000);
4252
4253 Int len = p - (UChar*)place_to_chain;
4254 vassert(len == (mode64 ? 32 : 16)); /* stay sane */
4255 VexInvalRange vir = {(HWord)place_to_chain, len};
4256 return vir;
4257 }
4258
4259 /* NB: what goes on here has to be very closely coordinated with the
4260 emitInstr case for XDirect, above. */
unchainXDirect_MIPS(void * place_to_unchain,void * place_to_jump_to_EXPECTED,void * disp_cp_chain_me,Bool mode64)4261 VexInvalRange unchainXDirect_MIPS ( void* place_to_unchain,
4262 void* place_to_jump_to_EXPECTED,
4263 void* disp_cp_chain_me,
4264 Bool mode64 )
4265 {
4266 /* What we're expecting to see is:
4267 move r9, place_to_jump_to_EXPECTED
4268 jalr r9
4269 nop
4270 viz
4271 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
4272 0x120F809 # jalr r9
4273 0x00000000 # nop
4274 */
4275 UChar* p = (UChar*)place_to_unchain;
4276 vassert(0 == (3 & (HWord)p));
4277 vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9,
4278 Ptr_to_ULong(place_to_jump_to_EXPECTED),
4279 mode64));
4280 vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
4281 vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
4282 /* And what we want to change it to is:
4283 move r9, disp_cp_chain_me
4284 jalr r9
4285 nop
4286 viz
4287 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
4288 0x120F809 # jalr r9
4289 0x00000000 # nop
4290 The replacement has the same length as the original.
4291 */
4292 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
4293 Ptr_to_ULong(disp_cp_chain_me), mode64);
4294 p = emit32(p, 0x120F809);
4295 p = emit32(p, 0x00000000);
4296
4297 Int len = p - (UChar*)place_to_unchain;
4298 vassert(len == (mode64 ? 32 : 16)); /* stay sane */
4299 VexInvalRange vir = {(HWord)place_to_unchain, len};
4300 return vir;
4301 }
4302
4303 /* Patch the counter address into a profile inc point, as previously
4304 created by the Min_ProfInc case for emit_MIPSInstr. */
patchProfInc_MIPS(void * place_to_patch,ULong * location_of_counter,Bool mode64)4305 VexInvalRange patchProfInc_MIPS ( void* place_to_patch,
4306 ULong* location_of_counter, Bool mode64 )
4307 {
4308 if (mode64)
4309 vassert(sizeof(ULong*) == 8);
4310 else
4311 vassert(sizeof(ULong*) == 4);
4312 UChar* p = (UChar*)place_to_patch;
4313 vassert(0 == (3 & (HWord)p));
4314 vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
4315 mode64 ? 0x6555655565556555ULL : 0x65556555,
4316 mode64));
4317
4318 if (mode64) {
4319 vassert(fetch32(p + 24 + 0) == 0xDD280000);
4320 vassert(fetch32(p + 24 + 4) == 0x65080001);
4321 vassert(fetch32(p + 24 + 8) == 0xFD280000);
4322 } else {
4323 vassert(fetch32(p + 8 + 0) == 0x8D280000);
4324 vassert(fetch32(p + 8 + 4) == 0x25080001);
4325 vassert(fetch32(p + 8 + 8) == 0xAD280000);
4326 vassert(fetch32(p + 8 + 12) == 0x2d010001);
4327 vassert(fetch32(p + 8 + 16) == 0x8d280004);
4328 vassert(fetch32(p + 8 + 20) == 0x01014021);
4329 vassert(fetch32(p + 8 + 24) == 0xad280004);
4330 }
4331
4332 p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
4333 Ptr_to_ULong(location_of_counter), mode64);
4334
4335 VexInvalRange vir = {(HWord)p, 8};
4336 return vir;
4337 }
4338
4339
4340 /*---------------------------------------------------------------*/
4341 /*--- end host_mips_defs.c ---*/
4342 /*---------------------------------------------------------------*/
4343