• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                              guest_mips_helpers.c ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2010-2017 RT-RK
11       mips-valgrind@rt-rk.com
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #include "libvex_basictypes.h"
32 #include "libvex_emnote.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
35 #include "libvex_ir.h"
36 #include "libvex.h"
37 
38 #include "main_util.h"
39 #include "main_globals.h"
40 #include "guest_generic_bb_to_IR.h"
41 #include "guest_mips_defs.h"
42 
43 /* This file contains helper functions for mips guest code.  Calls to
44    these functions are generated by the back end.
45 */
46 
47 #define ALWAYSDEFD32(field)                            \
48     { offsetof(VexGuestMIPS32State, field),            \
49       (sizeof ((VexGuestMIPS32State*)0)->field) }
50 
51 #define ALWAYSDEFD64(field)                            \
52     { offsetof(VexGuestMIPS64State, field),            \
53       (sizeof ((VexGuestMIPS64State*)0)->field) }
54 
guest_mips32_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)55 IRExpr *guest_mips32_spechelper(const HChar * function_name, IRExpr ** args,
56                                 IRStmt ** precedingStmts, Int n_precedingStmts)
57 {
58    return NULL;
59 }
60 
guest_mips64_spechelper(const HChar * function_name,IRExpr ** args,IRStmt ** precedingStmts,Int n_precedingStmts)61 IRExpr *guest_mips64_spechelper ( const HChar * function_name, IRExpr ** args,
62                                   IRStmt ** precedingStmts,
63                                   Int n_precedingStmts )
64 {
65    return NULL;
66 }
67 
68 /* VISIBLE TO LIBVEX CLIENT */
LibVEX_GuestMIPS32_initialise(VexGuestMIPS32State * vex_state)69 void LibVEX_GuestMIPS32_initialise( /*OUT*/ VexGuestMIPS32State * vex_state)
70 {
71    vex_state->guest_r0 = 0;   /* Hardwired to 0 */
72    vex_state->guest_r1 = 0;   /* Assembler temporary */
73    vex_state->guest_r2 = 0;   /* Values for function returns ... */
74    vex_state->guest_r3 = 0;   /* ...and expression evaluation */
75    vex_state->guest_r4 = 0;   /* Function arguments */
76    vex_state->guest_r5 = 0;
77    vex_state->guest_r6 = 0;
78    vex_state->guest_r7 = 0;
79    vex_state->guest_r8 = 0;   /* Temporaries */
80    vex_state->guest_r9 = 0;
81    vex_state->guest_r10 = 0;
82    vex_state->guest_r11 = 0;
83    vex_state->guest_r12 = 0;
84    vex_state->guest_r13 = 0;
85    vex_state->guest_r14 = 0;
86    vex_state->guest_r15 = 0;
87    vex_state->guest_r16 = 0;  /* Saved temporaries */
88    vex_state->guest_r17 = 0;
89    vex_state->guest_r18 = 0;
90    vex_state->guest_r19 = 0;
91    vex_state->guest_r20 = 0;
92    vex_state->guest_r21 = 0;
93    vex_state->guest_r22 = 0;
94    vex_state->guest_r23 = 0;
95    vex_state->guest_r24 = 0;  /* Temporaries */
96    vex_state->guest_r25 = 0;
97    vex_state->guest_r26 = 0;  /* Reserved for OS kernel */
98    vex_state->guest_r27 = 0;
99    vex_state->guest_r28 = 0;  /* Global pointer */
100    vex_state->guest_r29 = 0;  /* Stack pointer */
101    vex_state->guest_r30 = 0;  /* Frame pointer */
102    vex_state->guest_r31 = 0;  /* Return address */
103    vex_state->guest_PC = 0;   /* Program counter */
104    vex_state->guest_HI = 0;   /* Multiply and divide register higher result */
105    vex_state->guest_LO = 0;   /* Multiply and divide register lower result */
106 
107    /* FPU Registers */
108    vex_state->guest_f0 = 0x7ff800007ff80000ULL; /* Floting point GP registers */
109    vex_state->guest_f1 = 0x7ff800007ff80000ULL;
110    vex_state->guest_f2 = 0x7ff800007ff80000ULL;
111    vex_state->guest_f3 = 0x7ff800007ff80000ULL;
112    vex_state->guest_f4 = 0x7ff800007ff80000ULL;
113    vex_state->guest_f5 = 0x7ff800007ff80000ULL;
114    vex_state->guest_f6 = 0x7ff800007ff80000ULL;
115    vex_state->guest_f7 = 0x7ff800007ff80000ULL;
116    vex_state->guest_f8 = 0x7ff800007ff80000ULL;
117    vex_state->guest_f9 = 0x7ff800007ff80000ULL;
118    vex_state->guest_f10 = 0x7ff800007ff80000ULL;
119    vex_state->guest_f11 = 0x7ff800007ff80000ULL;
120    vex_state->guest_f12 = 0x7ff800007ff80000ULL;
121    vex_state->guest_f13 = 0x7ff800007ff80000ULL;
122    vex_state->guest_f14 = 0x7ff800007ff80000ULL;
123    vex_state->guest_f15 = 0x7ff800007ff80000ULL;
124    vex_state->guest_f16 = 0x7ff800007ff80000ULL;
125    vex_state->guest_f17 = 0x7ff800007ff80000ULL;
126    vex_state->guest_f18 = 0x7ff800007ff80000ULL;
127    vex_state->guest_f19 = 0x7ff800007ff80000ULL;
128    vex_state->guest_f20 = 0x7ff800007ff80000ULL;
129    vex_state->guest_f21 = 0x7ff800007ff80000ULL;
130    vex_state->guest_f22 = 0x7ff800007ff80000ULL;
131    vex_state->guest_f23 = 0x7ff800007ff80000ULL;
132    vex_state->guest_f24 = 0x7ff800007ff80000ULL;
133    vex_state->guest_f25 = 0x7ff800007ff80000ULL;
134    vex_state->guest_f26 = 0x7ff800007ff80000ULL;
135    vex_state->guest_f27 = 0x7ff800007ff80000ULL;
136    vex_state->guest_f28 = 0x7ff800007ff80000ULL;
137    vex_state->guest_f29 = 0x7ff800007ff80000ULL;
138    vex_state->guest_f30 = 0x7ff800007ff80000ULL;
139    vex_state->guest_f31 = 0x7ff800007ff80000ULL;
140 
141    vex_state->guest_FIR = 0;  /* FP implementation and revision register */
142    vex_state->guest_FCCR = 0; /* FP condition codes register */
143    vex_state->guest_FEXR = 0; /* FP exceptions register */
144    vex_state->guest_FENR = 0; /* FP enables register */
145    vex_state->guest_FCSR = 0; /* FP control/status register */
146    vex_state->guest_ULR = 0; /* TLS */
147 
148    /* Various pseudo-regs mandated by Vex or Valgrind. */
149    /* Emulation notes */
150    vex_state->guest_EMNOTE = 0;
151 
152    /* For clflush: record start and length of area to invalidate */
153    vex_state->guest_CMSTART = 0;
154    vex_state->guest_CMLEN = 0;
155    vex_state->host_EvC_COUNTER = 0;
156    vex_state->host_EvC_FAILADDR = 0;
157 
158    /* Used to record the unredirected guest address at the start of
159       a translation whose start has been redirected. By reading
160       this pseudo-register shortly afterwards, the translation can
161       find out what the corresponding no-redirection address was.
162       Note, this is only set for wrap-style redirects, not for
163       replace-style ones. */
164    vex_state->guest_NRADDR = 0;
165 
166    vex_state->guest_COND = 0;
167 
168    vex_state->guest_CP0_status = 0;
169 
170    vex_state->guest_LLaddr = 0xFFFFFFFF;
171    vex_state->guest_LLdata = 0;
172 
173    /* MIPS32 DSP ASE(r2) specific registers */
174    vex_state->guest_DSPControl = 0;   /* DSPControl register */
175    vex_state->guest_ac0 = 0;          /* Accumulator 0 */
176    vex_state->guest_ac1 = 0;          /* Accumulator 1 */
177    vex_state->guest_ac2 = 0;          /* Accumulator 2 */
178    vex_state->guest_ac3 = 0;          /* Accumulator 3 */
179 }
180 
LibVEX_GuestMIPS64_initialise(VexGuestMIPS64State * vex_state)181 void LibVEX_GuestMIPS64_initialise ( /*OUT*/ VexGuestMIPS64State * vex_state )
182 {
183    vex_state->guest_r0 = 0;  /* Hardwired to 0 */
184    vex_state->guest_r1 = 0;  /* Assembler temporary */
185    vex_state->guest_r2 = 0;  /* Values for function returns ... */
186    vex_state->guest_r3 = 0;
187    vex_state->guest_r4 = 0;  /* Function arguments */
188    vex_state->guest_r5 = 0;
189    vex_state->guest_r6 = 0;
190    vex_state->guest_r7 = 0;
191    vex_state->guest_r8 = 0;
192    vex_state->guest_r9 = 0;
193    vex_state->guest_r10 = 0;
194    vex_state->guest_r11 = 0;
195    vex_state->guest_r12 = 0;  /* Temporaries */
196    vex_state->guest_r13 = 0;
197    vex_state->guest_r14 = 0;
198    vex_state->guest_r15 = 0;
199    vex_state->guest_r16 = 0;  /* Saved temporaries */
200    vex_state->guest_r17 = 0;
201    vex_state->guest_r18 = 0;
202    vex_state->guest_r19 = 0;
203    vex_state->guest_r20 = 0;
204    vex_state->guest_r21 = 0;
205    vex_state->guest_r22 = 0;
206    vex_state->guest_r23 = 0;
207    vex_state->guest_r24 = 0;  /* Temporaries */
208    vex_state->guest_r25 = 0;
209    vex_state->guest_r26 = 0;  /* Reserved for OS kernel */
210    vex_state->guest_r27 = 0;
211    vex_state->guest_r28 = 0;  /* Global pointer */
212    vex_state->guest_r29 = 0;  /* Stack pointer */
213    vex_state->guest_r30 = 0;  /* Frame pointer */
214    vex_state->guest_r31 = 0;  /* Return address */
215    vex_state->guest_PC = 0;   /* Program counter */
216    vex_state->guest_HI = 0;   /* Multiply and divide register higher result */
217    vex_state->guest_LO = 0;   /* Multiply and divide register lower result */
218 
219    /* FPU Registers */
220    vex_state->guest_f0 =  0x7ff800007ff80000ULL;  /* Floting point registers */
221    vex_state->guest_f1 =  0x7ff800007ff80000ULL;
222    vex_state->guest_f2 =  0x7ff800007ff80000ULL;
223    vex_state->guest_f3 =  0x7ff800007ff80000ULL;
224    vex_state->guest_f4 =  0x7ff800007ff80000ULL;
225    vex_state->guest_f5 =  0x7ff800007ff80000ULL;
226    vex_state->guest_f6 =  0x7ff800007ff80000ULL;
227    vex_state->guest_f7 =  0x7ff800007ff80000ULL;
228    vex_state->guest_f8 =  0x7ff800007ff80000ULL;
229    vex_state->guest_f9 =  0x7ff800007ff80000ULL;
230    vex_state->guest_f10 = 0x7ff800007ff80000ULL;
231    vex_state->guest_f11 = 0x7ff800007ff80000ULL;
232    vex_state->guest_f12 = 0x7ff800007ff80000ULL;
233    vex_state->guest_f13 = 0x7ff800007ff80000ULL;
234    vex_state->guest_f14 = 0x7ff800007ff80000ULL;
235    vex_state->guest_f15 = 0x7ff800007ff80000ULL;
236    vex_state->guest_f16 = 0x7ff800007ff80000ULL;
237    vex_state->guest_f17 = 0x7ff800007ff80000ULL;
238    vex_state->guest_f18 = 0x7ff800007ff80000ULL;
239    vex_state->guest_f19 = 0x7ff800007ff80000ULL;
240    vex_state->guest_f20 = 0x7ff800007ff80000ULL;
241    vex_state->guest_f21 = 0x7ff800007ff80000ULL;
242    vex_state->guest_f22 = 0x7ff800007ff80000ULL;
243    vex_state->guest_f23 = 0x7ff800007ff80000ULL;
244    vex_state->guest_f24 = 0x7ff800007ff80000ULL;
245    vex_state->guest_f25 = 0x7ff800007ff80000ULL;
246    vex_state->guest_f26 = 0x7ff800007ff80000ULL;
247    vex_state->guest_f27 = 0x7ff800007ff80000ULL;
248    vex_state->guest_f28 = 0x7ff800007ff80000ULL;
249    vex_state->guest_f29 = 0x7ff800007ff80000ULL;
250    vex_state->guest_f30 = 0x7ff800007ff80000ULL;
251    vex_state->guest_f31 = 0x7ff800007ff80000ULL;
252 
253    vex_state->guest_FIR = 0;   /* FP implementation and revision register */
254    vex_state->guest_FCCR = 0;  /* FP condition codes register */
255    vex_state->guest_FEXR = 0;  /* FP exceptions register */
256    vex_state->guest_FENR = 0;  /* FP enables register */
257    vex_state->guest_FCSR = 0;  /* FP control/status register */
258 
259    vex_state->guest_ULR = 0;
260 
261    /* Various pseudo-regs mandated by Vex or Valgrind. */
262    /* Emulation notes */
263    vex_state->guest_EMNOTE = 0;
264 
265    /* For clflush: record start and length of area to invalidate */
266    vex_state->guest_CMSTART = 0;
267    vex_state->guest_CMLEN = 0;
268    vex_state->host_EvC_COUNTER = 0;
269    vex_state->host_EvC_FAILADDR = 0;
270 
271    /* Used to record the unredirected guest address at the start of
272       a translation whose start has been redirected. By reading
273       this pseudo-register shortly afterwards, the translation can
274       find out what the corresponding no-redirection address was.
275       Note, this is only set for wrap-style redirects, not for
276       replace-style ones. */
277    vex_state->guest_NRADDR = 0;
278 
279    vex_state->guest_COND = 0;
280 
281    vex_state->guest_CP0_status = MIPS_CP0_STATUS_FR;
282 
283    vex_state->guest_LLaddr = 0xFFFFFFFFFFFFFFFFULL;
284    vex_state->guest_LLdata = 0;
285 }
286 
287 /*-----------------------------------------------------------*/
288 /*--- Describing the mips guest state, for the benefit    ---*/
289 /*--- of iropt and instrumenters.                         ---*/
290 /*-----------------------------------------------------------*/
291 
292 /* Figure out if any part of the guest state contained in minoff
293    .. maxoff requires precise memory exceptions.  If in doubt return
294    True (but this generates significantly slower code).
295 
296    We enforce precise exns for guest SP, PC.
297 
298    Only SP is needed in mode VexRegUpdSpAtMemAccess.
299 */
guest_mips32_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)300 Bool guest_mips32_state_requires_precise_mem_exns (
301         Int minoff, Int maxoff, VexRegisterUpdates pxControl
302      )
303 {
304    Int sp_min = offsetof(VexGuestMIPS32State, guest_r29);
305    Int sp_max = sp_min + 4 - 1;
306    Int pc_min = offsetof(VexGuestMIPS32State, guest_PC);
307    Int pc_max = pc_min + 4 - 1;
308 
309    if (maxoff < sp_min || minoff > sp_max) {
310       /* no overlap with sp */
311       if (pxControl == VexRegUpdSpAtMemAccess)
312          return False;  /* We only need to check stack pointer. */
313    } else {
314       return True;
315    }
316 
317    if (maxoff < pc_min || minoff > pc_max) {
318       /* no overlap with pc */
319    } else {
320       return True;
321    }
322 
323    /* We appear to need precise updates of R11 in order to get proper
324       stacktraces from non-optimised code. */
325    Int fp_min = offsetof(VexGuestMIPS32State, guest_r30);
326    Int fp_max = fp_min + 4 - 1;
327 
328    if (maxoff < fp_min || minoff > fp_max) {
329       /* no overlap with fp */
330    } else {
331       return True;
332    }
333 
334    return False;
335 }
336 
guest_mips64_state_requires_precise_mem_exns(Int minoff,Int maxoff,VexRegisterUpdates pxControl)337 Bool guest_mips64_state_requires_precise_mem_exns (
338         Int minoff, Int maxoff, VexRegisterUpdates pxControl
339      )
340 {
341    Int sp_min = offsetof(VexGuestMIPS64State, guest_r29);
342    Int sp_max = sp_min + 8 - 1;
343    Int pc_min = offsetof(VexGuestMIPS64State, guest_PC);
344    Int pc_max = pc_min + 8 - 1;
345 
346    if ( maxoff < sp_min || minoff > sp_max ) {
347       /* no overlap with sp */
348       if (pxControl == VexRegUpdSpAtMemAccess)
349          return False;  /* We only need to check stack pointer. */
350    } else {
351       return True;
352    }
353 
354    if ( maxoff < pc_min || minoff > pc_max ) {
355       /* no overlap with pc */
356    } else {
357       return True;
358    }
359 
360    Int fp_min = offsetof(VexGuestMIPS64State, guest_r30);
361    Int fp_max = fp_min + 8 - 1;
362 
363    if ( maxoff < fp_min || minoff > fp_max ) {
364       /* no overlap with fp */
365    } else {
366       return True;
367    }
368 
369    return False;
370 }
371 
372 VexGuestLayout mips32Guest_layout = {
373    /* Total size of the guest state, in bytes. */
374    .total_sizeB = sizeof(VexGuestMIPS32State),
375    /* Describe the stack pointer. */
376    .offset_SP = offsetof(VexGuestMIPS32State, guest_r29),
377    .sizeof_SP = 4,
378    /* Describe the frame pointer. */
379    .offset_FP = offsetof(VexGuestMIPS32State, guest_r30),
380    .sizeof_FP = 4,
381    /* Describe the instruction pointer. */
382    .offset_IP = offsetof(VexGuestMIPS32State, guest_PC),
383    .sizeof_IP = 4,
384    /* Describe any sections to be regarded by Memcheck as
385       'always-defined'. */
386    .n_alwaysDefd = 8,
387    /* ? :(  */
388    .alwaysDefd = {
389              /* 0 */ ALWAYSDEFD32(guest_r0),
390              /* 1 */ ALWAYSDEFD32(guest_r1),
391              /* 2 */ ALWAYSDEFD32(guest_EMNOTE),
392              /* 3 */ ALWAYSDEFD32(guest_CMSTART),
393              /* 4 */ ALWAYSDEFD32(guest_CMLEN),
394              /* 5 */ ALWAYSDEFD32(guest_r29),
395              /* 6 */ ALWAYSDEFD32(guest_r31),
396              /* 7 */ ALWAYSDEFD32(guest_ULR)
397              }
398 };
399 
400 VexGuestLayout mips64Guest_layout = {
401    /* Total size of the guest state, in bytes. */
402    .total_sizeB = sizeof(VexGuestMIPS64State),
403    /* Describe the stack pointer. */
404    .offset_SP = offsetof(VexGuestMIPS64State, guest_r29),
405    .sizeof_SP = 8,
406    /* Describe the frame pointer. */
407    .offset_FP = offsetof(VexGuestMIPS64State, guest_r30),
408    .sizeof_FP = 8,
409    /* Describe the instruction pointer. */
410    .offset_IP = offsetof(VexGuestMIPS64State, guest_PC),
411    .sizeof_IP = 8,
412    /* Describe any sections to be regarded by Memcheck as
413       'always-defined'. */
414    .n_alwaysDefd = 7,
415    /* ? :(  */
416    .alwaysDefd = {
417                   /* 0 */ ALWAYSDEFD64 (guest_r0),
418                   /* 1 */ ALWAYSDEFD64 (guest_EMNOTE),
419                   /* 2 */ ALWAYSDEFD64 (guest_CMSTART),
420                   /* 3 */ ALWAYSDEFD64 (guest_CMLEN),
421                   /* 4 */ ALWAYSDEFD64 (guest_r29),
422                   /* 5 */ ALWAYSDEFD64 (guest_r31),
423                   /* 6 */ ALWAYSDEFD64 (guest_ULR)
424                   }
425 };
426 
427 #define ASM_VOLATILE_RDHWR(opcode)                                 \
428    __asm__ __volatile__(".word 0x7C02003B | "#opcode" << 11  \n\t" \
429                         : "+r" (x) : :                             \
430                        )
431 
mips_dirtyhelper_rdhwr(UInt rd)432 HWord mips_dirtyhelper_rdhwr ( UInt rd )
433 {
434 #if defined(__mips__)
435    register HWord x __asm__("v0") = 0;
436 
437    switch (rd) {
438       case 0:  /* x = CPUNum() */
439          ASM_VOLATILE_RDHWR(0); /* rdhwr v0, $0 */
440          break;
441 
442       case 1:  /* x = SYNCI_Step() */
443          ASM_VOLATILE_RDHWR(1); /* rdhwr v0, $1 */
444          break;
445 
446       case 2:  /* x = CC() */
447          ASM_VOLATILE_RDHWR(2); /* rdhwr v0, $2 */
448          break;
449 
450       case 3:  /* x = CCRes() */
451          ASM_VOLATILE_RDHWR(3); /* rdhwr v0, $3 */
452          break;
453 
454       case 31:  /* x = CVMX_get_cycles() */
455          ASM_VOLATILE_RDHWR(31); /* rdhwr v0, $31 */
456          break;
457 
458       default:
459          vassert(0);
460          break;
461    }
462    return x;
463 #else
464    return 0;
465 #endif
466 }
467 
468 #define ASM_VOLATILE_UNARY32(inst)                                  \
469    __asm__ volatile(".set  push"        "\n\t"                      \
470                     ".set  hardfloat"   "\n\t"                      \
471                     "cfc1  $t0,  $31"   "\n\t"                      \
472                     "ctc1  %2,   $31"   "\n\t"                      \
473                     "mtc1  %1,   $f20"  "\n\t"                      \
474                     #inst" $f20, $f20"  "\n\t"                      \
475                     "cfc1  %0,   $31"   "\n\t"                      \
476                     "ctc1  $t0,  $31"   "\n\t"                      \
477                     ".set  pop"         "\n\t"                      \
478                     : "=r" (ret)                                    \
479                     : "r" (loFsVal), "r" (fcsr)                     \
480                     : "t0", "$f20"                                  \
481                    );
482 
483 #define ASM_VOLATILE_UNARY32_DOUBLE(inst)                           \
484    __asm__ volatile(".set  push"        "\n\t"                      \
485                     ".set  hardfloat"   "\n\t"                      \
486                     "cfc1  $t0,  $31"   "\n\t"                      \
487                     "ctc1  %2,   $31"   "\n\t"                      \
488                     "ldc1  $f20, 0(%1)" "\n\t"                      \
489                     #inst" $f20, $f20"  "\n\t"                      \
490                     "cfc1  %0,   $31"   "\n\t"                      \
491                     "ctc1  $t0,  $31"   "\n\t"                      \
492                     ".set  pop"         "\n\t"                      \
493                     : "=r" (ret)                                    \
494                     : "r" (&fsVal), "r" (fcsr)                      \
495                     : "t0", "$f20", "$f21"                          \
496                    );
497 
498 #define ASM_VOLATILE_UNARY64(inst)                                  \
499    __asm__ volatile(".set  push"         "\n\t"                     \
500                     ".set  hardfloat"    "\n\t"                     \
501                     ".set  fp=64"        "\n\t"                     \
502                     "cfc1  $t0,  $31"    "\n\t"                     \
503                     "ctc1  %2,   $31"    "\n\t"                     \
504                     "ldc1  $f24, 0(%1)"  "\n\t"                     \
505                     #inst" $f24, $f24"   "\n\t"                     \
506                     "cfc1  %0,   $31"    "\n\t"                     \
507                     "ctc1  $t0,  $31"    "\n\t"                     \
508                     ".set  pop"          "\n\t"                     \
509                     : "=r" (ret)                                    \
510                     : "r" (&(addr[fs])), "r" (fcsr)                 \
511                     : "t0", "$f24"                                  \
512                    );
513 
514 #define ASM_VOLATILE_BINARY32(inst)                                 \
515    __asm__ volatile(".set  push"              "\n\t"                \
516                     ".set  hardfloat"         "\n\t"                \
517                     "cfc1  $t0,  $31"         "\n\t"                \
518                     "ctc1  %3,   $31"         "\n\t"                \
519                     "mtc1  %1,   $f20"        "\n\t"                \
520                     "mtc1  %2,   $f22"        "\n\t"                \
521                     #inst" $f20, $f20, $f22"  "\n\t"                \
522                     "cfc1  %0,   $31"         "\n\t"                \
523                     "ctc1  $t0,  $31"         "\n\t"                \
524                     ".set  pop"               "\n\t"                \
525                     : "=r" (ret)                                    \
526                     : "r" (loFsVal), "r" (loFtVal), "r" (fcsr)      \
527                     : "t0", "$f20", "$f22"                          \
528                    );
529 
530 #define ASM_VOLATILE_BINARY32_DOUBLE(inst)                          \
531    __asm__ volatile(".set  push"              "\n\t"                \
532                     ".set  hardfloat"         "\n\t"                \
533                     "cfc1  $t0,  $31"         "\n\t"                \
534                     "ctc1  %3,   $31"         "\n\t"                \
535                     "ldc1  $f20, 0(%1)"       "\n\t"                \
536                     "ldc1  $f22, 0(%2)"       "\n\t"                \
537                     #inst" $f20, $f20, $f22"  "\n\t"                \
538                     "cfc1  %0,   $31"         "\n\t"                \
539                     "ctc1  $t0,  $31"         "\n\t"                \
540                     ".set  pop"               "\n\t"                \
541                     : "=r" (ret)                                    \
542                     : "r" (&fsVal), "r" (&ftVal), "r" (fcsr)        \
543                     : "t0", "$f20", "$f21", "$f22", "$f23"          \
544                    );
545 
546 #define ASM_VOLATILE_BINARY64(inst)                                     \
547    __asm__ volatile(".set  push"              "\n\t"                    \
548                     ".set  hardfloat"         "\n\t"                    \
549                     "cfc1  $t0,  $31"         "\n\t"                    \
550                     "ctc1  %3,   $31"         "\n\t"                    \
551                     "ldc1  $f24, 0(%1)"       "\n\t"                    \
552                     "ldc1  $f26, 0(%2)"       "\n\t"                    \
553                     #inst" $f24, $f24, $f26"  "\n\t"                    \
554                     "cfc1  %0,   $31"         "\n\t"                    \
555                     "ctc1  $t0,  $31"         "\n\t"                    \
556                     ".set  pop"               "\n\t"                    \
557                     : "=r" (ret)                                        \
558                     : "r" (&(addr[fs])), "r" (&(addr[ft])), "r" (fcsr)  \
559                     : "t0", "$f24", "$f26"                              \
560                    );
561 
562 /* TODO: Add cases for all fpu instructions because all fpu instructions are
563          change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp32(void * gs,UInt fs,UInt ft,flt_op inst)564 extern UInt mips_dirtyhelper_calculate_FCSR_fp32 ( void* gs, UInt fs, UInt ft,
565                                                    flt_op inst )
566 {
567    UInt ret = 0;
568 #if defined(__mips__)
569    VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
570    UInt loFsVal, hiFsVal, loFtVal, hiFtVal;
571 #if defined (_MIPSEL)
572    ULong *addr = (ULong *)&guest_state->guest_f0;
573    loFsVal     = (UInt)addr[fs];
574    hiFsVal     = (UInt)addr[fs+1];
575    loFtVal     = (UInt)addr[ft];
576    hiFtVal     = (UInt)addr[ft+1];
577 #elif defined (_MIPSEB)
578    UInt *addr = (UInt *)&guest_state->guest_f0;
579    loFsVal    = (UInt)addr[fs*2];
580    hiFsVal    = (UInt)addr[fs*2+2];
581    loFtVal    = (UInt)addr[ft*2];
582    hiFtVal    = (UInt)addr[ft*2+2];
583 #endif
584    ULong fsVal   = ((ULong) hiFsVal) << 32 | loFsVal;
585    ULong ftVal   = ((ULong) hiFtVal) << 32 | loFtVal;
586    UInt fcsr     = guest_state->guest_FCSR;
587    switch (inst) {
588       case ROUNDWD:
589          ASM_VOLATILE_UNARY32_DOUBLE(round.w.d)
590          break;
591       case FLOORWS:
592          ASM_VOLATILE_UNARY32(floor.w.s)
593          break;
594       case FLOORWD:
595          ASM_VOLATILE_UNARY32_DOUBLE(floor.w.d)
596          break;
597       case TRUNCWS:
598          ASM_VOLATILE_UNARY32(trunc.w.s)
599          break;
600       case TRUNCWD:
601          ASM_VOLATILE_UNARY32_DOUBLE(trunc.w.d)
602          break;
603       case CEILWS:
604          ASM_VOLATILE_UNARY32(ceil.w.s)
605          break;
606       case CEILWD:
607          ASM_VOLATILE_UNARY32_DOUBLE(ceil.w.d)
608          break;
609       case CVTDS:
610          ASM_VOLATILE_UNARY32(cvt.d.s)
611          break;
612       case CVTDW:
613          ASM_VOLATILE_UNARY32(cvt.d.w)
614          break;
615       case CVTSW:
616          ASM_VOLATILE_UNARY32(cvt.s.w)
617          break;
618       case CVTSD:
619          ASM_VOLATILE_UNARY32_DOUBLE(cvt.s.d)
620          break;
621       case CVTWS:
622          ASM_VOLATILE_UNARY32(cvt.w.s)
623          break;
624       case CVTWD:
625          ASM_VOLATILE_UNARY32_DOUBLE(cvt.w.d)
626          break;
627       case ROUNDWS:
628          ASM_VOLATILE_UNARY32(round.w.s)
629          break;
630       case ADDS:
631           ASM_VOLATILE_BINARY32(add.s)
632           break;
633       case ADDD:
634           ASM_VOLATILE_BINARY32_DOUBLE(add.d)
635           break;
636       case SUBS:
637           ASM_VOLATILE_BINARY32(sub.s)
638           break;
639       case SUBD:
640           ASM_VOLATILE_BINARY32_DOUBLE(sub.d)
641           break;
642       case DIVS:
643           ASM_VOLATILE_BINARY32(div.s)
644           break;
645       default:
646          vassert(0);
647          break;
648    }
649 #endif
650    return ret;
651 }
652 
653 /* TODO: Add cases for all fpu instructions because all fpu instructions are
654          change the value of FCSR register. */
mips_dirtyhelper_calculate_FCSR_fp64(void * gs,UInt fs,UInt ft,flt_op inst)655 extern UInt mips_dirtyhelper_calculate_FCSR_fp64 ( void* gs, UInt fs, UInt ft,
656                                                    flt_op inst )
657 {
658    UInt ret = 0;
659 #if defined(__mips__) && ((__mips == 64) ||                                  \
660                           (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)))
661 #if defined(VGA_mips32)
662    VexGuestMIPS32State* guest_state = (VexGuestMIPS32State*)gs;
663 #else
664    VexGuestMIPS64State* guest_state = (VexGuestMIPS64State*)gs;
665 #endif
666    ULong *addr = (ULong *)&guest_state->guest_f0;
667    UInt fcsr   = guest_state->guest_FCSR;
668    switch (inst) {
669       case ROUNDWD:
670          ASM_VOLATILE_UNARY64(round.w.d)
671          break;
672       case FLOORWS:
673          ASM_VOLATILE_UNARY64(floor.w.s)
674          break;
675       case FLOORWD:
676          ASM_VOLATILE_UNARY64(floor.w.d)
677          break;
678       case TRUNCWS:
679          ASM_VOLATILE_UNARY64(trunc.w.s)
680          break;
681       case TRUNCWD:
682          ASM_VOLATILE_UNARY64(trunc.w.d)
683          break;
684       case CEILWS:
685          ASM_VOLATILE_UNARY64(ceil.w.s)
686          break;
687       case CEILWD:
688          ASM_VOLATILE_UNARY64(ceil.w.d)
689          break;
690       case CVTDS:
691          ASM_VOLATILE_UNARY64(cvt.d.s)
692          break;
693       case CVTDW:
694          ASM_VOLATILE_UNARY64(cvt.d.w)
695          break;
696       case CVTSW:
697          ASM_VOLATILE_UNARY64(cvt.s.w)
698          break;
699       case CVTSD:
700          ASM_VOLATILE_UNARY64(cvt.s.d)
701          break;
702       case CVTWS:
703          ASM_VOLATILE_UNARY64(cvt.w.s)
704          break;
705       case CVTWD:
706          ASM_VOLATILE_UNARY64(cvt.w.d)
707          break;
708       case ROUNDWS:
709          ASM_VOLATILE_UNARY64(round.w.s)
710          break;
711       case CEILLS:
712          ASM_VOLATILE_UNARY64(ceil.l.s)
713          break;
714       case CEILLD:
715          ASM_VOLATILE_UNARY64(ceil.l.d)
716          break;
717       case CVTDL:
718          ASM_VOLATILE_UNARY64(cvt.d.l)
719          break;
720       case CVTLS:
721          ASM_VOLATILE_UNARY64(cvt.l.s)
722          break;
723       case CVTLD:
724          ASM_VOLATILE_UNARY64(cvt.l.d)
725          break;
726       case CVTSL:
727          ASM_VOLATILE_UNARY64(cvt.s.l)
728          break;
729       case FLOORLS:
730          ASM_VOLATILE_UNARY64(floor.l.s)
731          break;
732       case FLOORLD:
733          ASM_VOLATILE_UNARY64(floor.l.d)
734          break;
735       case ROUNDLS:
736          ASM_VOLATILE_UNARY64(round.l.s)
737          break;
738       case ROUNDLD:
739          ASM_VOLATILE_UNARY64(round.l.d)
740          break;
741       case TRUNCLS:
742          ASM_VOLATILE_UNARY64(trunc.l.s)
743          break;
744       case TRUNCLD:
745          ASM_VOLATILE_UNARY64(trunc.l.d)
746          break;
747       case ADDS:
748           ASM_VOLATILE_BINARY64(add.s)
749           break;
750       case ADDD:
751           ASM_VOLATILE_BINARY64(add.d)
752           break;
753       case SUBS:
754           ASM_VOLATILE_BINARY64(sub.s)
755           break;
756       case SUBD:
757           ASM_VOLATILE_BINARY64(sub.d)
758           break;
759       case DIVS:
760           ASM_VOLATILE_BINARY64(div.s)
761           break;
762       default:
763          vassert(0);
764          break;
765    }
766 #endif
767    return ret;
768 }
769 
770 /*---------------------------------------------------------------*/
771 /*--- end                                guest_mips_helpers.c ---*/
772 /*---------------------------------------------------------------*/
773