• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 
3 /*---------------------------------------------------------------*/
4 /*--- Begin                                       main_main.c ---*/
5 /*---------------------------------------------------------------*/
6 
7 /*
8    This file is part of Valgrind, a dynamic binary instrumentation
9    framework.
10 
11    Copyright (C) 2004-2017 OpenWorks LLP
12       info@open-works.net
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27    02110-1301, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 
31    Neither the names of the U.S. Department of Energy nor the
32    University of California nor the names of its contributors may be
33    used to endorse or promote products derived from this software
34    without prior written permission.
35 */
36 
37 #include "libvex.h"
38 #include "libvex_emnote.h"
39 #include "libvex_guest_x86.h"
40 #include "libvex_guest_amd64.h"
41 #include "libvex_guest_arm.h"
42 #include "libvex_guest_arm64.h"
43 #include "libvex_guest_ppc32.h"
44 #include "libvex_guest_ppc64.h"
45 #include "libvex_guest_s390x.h"
46 #include "libvex_guest_mips32.h"
47 #include "libvex_guest_mips64.h"
48 
49 #include "main_globals.h"
50 #include "main_util.h"
51 #include "host_generic_regs.h"
52 #include "ir_opt.h"
53 
54 #include "host_x86_defs.h"
55 #include "host_amd64_defs.h"
56 #include "host_ppc_defs.h"
57 #include "host_arm_defs.h"
58 #include "host_arm64_defs.h"
59 #include "host_s390_defs.h"
60 #include "host_mips_defs.h"
61 
62 #include "guest_generic_bb_to_IR.h"
63 #include "guest_x86_defs.h"
64 #include "guest_amd64_defs.h"
65 #include "guest_arm_defs.h"
66 #include "guest_arm64_defs.h"
67 #include "guest_ppc_defs.h"
68 #include "guest_s390_defs.h"
69 #include "guest_mips_defs.h"
70 
71 #include "host_generic_simd128.h"
72 
73 /* For each architecture <arch>, we define 2 macros:
74    <arch>FN that has as argument a pointer (typically to a function
75             or the return value of a function).
76    <arch>ST that has as argument a statement.
77    If main_main.c is compiled for <arch>, then these macros just expand
78    their arg.
79    Otherwise, the macros expand to respectively NULL and vassert(0).
80    These macros are used to avoid introducing dependencies to object
81    files not needed for the (only) architecture we are compiling for.
82 
83    To still compile the below for all supported architectures, define
84    VEXMULTIARCH. This is used by the file multiarch_main_main.c */
85 
86 #if defined(VGA_x86) || defined(VEXMULTIARCH)
87 #define X86FN(f) f
88 #define X86ST(f) f
89 #else
90 #define X86FN(f) NULL
91 #define X86ST(f) vassert(0)
92 #endif
93 
94 #if defined(VGA_amd64) || defined(VEXMULTIARCH)
95 #define AMD64FN(f) f
96 #define AMD64ST(f) f
97 #else
98 #define AMD64FN(f) NULL
99 #define AMD64ST(f) vassert(0)
100 #endif
101 
102 #if defined(VGA_ppc32) || defined(VEXMULTIARCH)
103 #define PPC32FN(f) f
104 #define PPC32ST(f) f
105 #else
106 #define PPC32FN(f) NULL
107 #define PPC32ST(f) vassert(0)
108 #endif
109 
110 #if defined(VGA_ppc64be) || defined(VGA_ppc64le) || defined(VEXMULTIARCH)
111 #define PPC64FN(f) f
112 #define PPC64ST(f) f
113 #else
114 #define PPC64FN(f) NULL
115 #define PPC64ST(f) vassert(0)
116 #endif
117 
118 #if defined(VGA_s390x) || defined(VEXMULTIARCH)
119 #define S390FN(f) f
120 #define S390ST(f) f
121 #else
122 #define S390FN(f) NULL
123 #define S390ST(f) vassert(0)
124 #endif
125 
126 #if defined(VGA_arm) || defined(VEXMULTIARCH)
127 #define ARMFN(f) f
128 #define ARMST(f) f
129 #else
130 #define ARMFN(f) NULL
131 #define ARMST(f) vassert(0)
132 #endif
133 
134 #if defined(VGA_arm64) || defined(VEXMULTIARCH)
135 #define ARM64FN(f) f
136 #define ARM64ST(f) f
137 #else
138 #define ARM64FN(f) NULL
139 #define ARM64ST(f) vassert(0)
140 #endif
141 
142 #if defined(VGA_mips32) || defined(VEXMULTIARCH)
143 #define MIPS32FN(f) f
144 #define MIPS32ST(f) f
145 #else
146 #define MIPS32FN(f) NULL
147 #define MIPS32ST(f) vassert(0)
148 #endif
149 
150 #if defined(VGA_mips64) || defined(VEXMULTIARCH)
151 #define MIPS64FN(f) f
152 #define MIPS64ST(f) f
153 #else
154 #define MIPS64FN(f) NULL
155 #define MIPS64ST(f) vassert(0)
156 #endif
157 
158 
159 /* This file contains the top level interface to the library. */
160 
161 /* --------- fwds ... --------- */
162 
163 static void  check_hwcaps ( VexArch arch, UInt hwcaps );
164 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
165 static IRType arch_word_size ( VexArch arch );
166 
167 /* --------- helpers --------- */
168 
169 __attribute__((noinline))
udiv32(UInt x,UInt y)170 static UInt udiv32 ( UInt x, UInt y ) { return x/y; }
171 __attribute__((noinline))
sdiv32(Int x,Int y)172 static  Int sdiv32 (  Int x,  Int y ) { return x/y; }
173 
174 
175 /* --------- Initialise the library. --------- */
176 
177 /* Exported to library client. */
178 
LibVEX_default_VexControl(VexControl * vcon)179 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
180 {
181    vex_bzero(vcon, sizeof(*vcon));
182    vcon->iropt_verbosity                = 0;
183    vcon->iropt_level                    = 2;
184    vcon->iropt_register_updates_default = VexRegUpdUnwindregsAtMemAccess;
185    vcon->iropt_unroll_thresh            = 120;
186    vcon->guest_max_insns                = 60;
187    vcon->guest_chase_thresh             = 10;
188    vcon->guest_chase_cond               = False;
189 }
190 
191 
192 /* Exported to library client. */
193 
LibVEX_Init(void (* failure_exit)(void),void (* log_bytes)(const HChar *,SizeT nbytes),Int debuglevel,const VexControl * vcon)194 void LibVEX_Init (
195    /* failure exit function */
196    __attribute__ ((noreturn))
197    void (*failure_exit) ( void ),
198    /* logging output function */
199    void (*log_bytes) ( const HChar*, SizeT nbytes ),
200    /* debug paranoia level */
201    Int debuglevel,
202    /* Control ... */
203    const VexControl* vcon
204 )
205 {
206    /* First off, do enough minimal setup so that the following
207       assertions can fail in a sane fashion, if need be. */
208    vex_failure_exit = failure_exit;
209    vex_log_bytes    = log_bytes;
210 
211    /* Now it's safe to check parameters for sanity. */
212    vassert(!vex_initdone);
213    vassert(failure_exit);
214    vassert(log_bytes);
215    vassert(debuglevel >= 0);
216 
217    vassert(vcon->iropt_verbosity >= 0);
218    vassert(vcon->iropt_level >= 0);
219    vassert(vcon->iropt_level <= 2);
220    vassert(vcon->iropt_unroll_thresh >= 0);
221    vassert(vcon->iropt_unroll_thresh <= 400);
222    vassert(vcon->guest_max_insns >= 1);
223    vassert(vcon->guest_max_insns <= 100);
224    vassert(vcon->guest_chase_thresh >= 0);
225    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
226    vassert(vcon->guest_chase_cond == True
227            || vcon->guest_chase_cond == False);
228 
229    /* Check that Vex has been built with sizes of basic types as
230       stated in priv/libvex_basictypes.h.  Failure of any of these is
231       a serious configuration error and should be corrected
232       immediately.  If any of these assertions fail you can fully
233       expect Vex not to work properly, if at all. */
234 
235    vassert(1 == sizeof(UChar));
236    vassert(1 == sizeof(Char));
237    vassert(2 == sizeof(UShort));
238    vassert(2 == sizeof(Short));
239    vassert(4 == sizeof(UInt));
240    vassert(4 == sizeof(Int));
241    vassert(8 == sizeof(ULong));
242    vassert(8 == sizeof(Long));
243    vassert(4 == sizeof(Float));
244    vassert(8 == sizeof(Double));
245    vassert(1 == sizeof(Bool));
246    vassert(4 == sizeof(Addr32));
247    vassert(8 == sizeof(Addr64));
248    vassert(16 == sizeof(U128));
249    vassert(16 == sizeof(V128));
250    vassert(32 == sizeof(U256));
251 
252    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
253    vassert(sizeof(void*) == sizeof(int*));
254    vassert(sizeof(void*) == sizeof(HWord));
255    vassert(sizeof(void*) == sizeof(Addr));
256    vassert(sizeof(unsigned long) == sizeof(SizeT));
257 
258    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
259    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
260 
261    /* These take a lot of space, so make sure we don't have
262       any unnoticed size regressions. */
263    if (VEX_HOST_WORDSIZE == 4) {
264       vassert(sizeof(IRExpr) == 16);
265       vassert(sizeof(IRStmt) == 20 /* x86 */
266               || sizeof(IRStmt) == 24 /* arm */);
267    } else {
268       vassert(sizeof(IRExpr) == 32);
269       vassert(sizeof(IRStmt) == 32);
270    }
271 
272    /* Ditto */
273    vassert(sizeof(HReg) == 4);
274    /* If N_RREGUNIVERSE_REGS ever exceeds 64, the bitset fields in
275       RRegSet and HRegUsage will need to be changed to something
276       better than ULong. */
277    vassert(N_RREGUNIVERSE_REGS == 64);
278 
279    /* Check that signed integer division on the host rounds towards
280       zero.  If not, h_calc_sdiv32_w_arm_semantics() won't work
281       correctly. */
282    /* 100.0 / 7.0 == 14.2857 */
283    vassert(udiv32(100, 7) == 14);
284    vassert(sdiv32(100, 7) == 14);
285    vassert(sdiv32(-100, 7) == -14); /* and not -15 */
286    vassert(sdiv32(100, -7) == -14); /* ditto */
287    vassert(sdiv32(-100, -7) == 14); /* not sure what this proves */
288 
289    /* Really start up .. */
290    vex_debuglevel         = debuglevel;
291    vex_control            = *vcon;
292    vex_initdone           = True;
293    vexSetAllocMode ( VexAllocModeTEMP );
294 }
295 
296 
297 /* --------- Make a translation. --------- */
298 
299 /* KLUDGE: S390 need to know the hwcaps of the host when generating
300    code. But that info is not passed to emit_S390Instr. Only mode64 is
301    being passed. So, ideally, we want this passed as an argument, too.
302    Until then, we use a global variable. This variable is set as a side
303    effect of LibVEX_Translate. The variable is defined here rather than
304    in host_s390_defs.c to avoid having main_main.c dragging S390
305    object files in non VEXMULTIARCH. */
306 UInt s390_host_hwcaps;
307 
308 
309 /* Exported to library client. */
310 
LibVEX_FrontEnd(VexTranslateArgs * vta,VexTranslateResult * res,VexRegisterUpdates * pxControl)311 IRSB* LibVEX_FrontEnd ( /*MOD*/ VexTranslateArgs* vta,
312                         /*OUT*/ VexTranslateResult* res,
313                         /*OUT*/ VexRegisterUpdates* pxControl)
314 {
315    IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
316    Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
317    DisOneInstrFn disInstrFn;
318 
319    VexGuestLayout* guest_layout;
320    IRSB*           irsb;
321    Int             i;
322    Int             offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP;
323    IRType          guest_word_type;
324    IRType          host_word_type;
325 
326    guest_layout            = NULL;
327    specHelper              = NULL;
328    disInstrFn              = NULL;
329    preciseMemExnsFn        = NULL;
330    guest_word_type         = arch_word_size(vta->arch_guest);
331    host_word_type          = arch_word_size(vta->arch_host);
332    offB_CMSTART            = 0;
333    offB_CMLEN              = 0;
334    offB_GUEST_IP           = 0;
335    szB_GUEST_IP            = 0;
336 
337    vassert(vex_initdone);
338    vassert(vta->needs_self_check  != NULL);
339    vassert(vta->disp_cp_xassisted != NULL);
340    /* Both the chainers and the indir are either NULL or non-NULL. */
341    if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
342       vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
343       vassert(vta->disp_cp_xindir             != NULL);
344    } else {
345       vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
346       vassert(vta->disp_cp_xindir             == NULL);
347    }
348 
349    vexSetAllocModeTEMP_and_clear();
350    vexAllocSanityCheck();
351 
352    vex_traceflags = vta->traceflags;
353 
354    /* KLUDGE: export hwcaps. */
355    if (vta->arch_host == VexArchS390X) {
356       s390_host_hwcaps = vta->archinfo_host.hwcaps;
357    }
358 
359    /* First off, check that the guest and host insn sets
360       are supported. */
361 
362    switch (vta->arch_guest) {
363 
364       case VexArchX86:
365          preciseMemExnsFn
366             = X86FN(guest_x86_state_requires_precise_mem_exns);
367          disInstrFn              = X86FN(disInstr_X86);
368          specHelper              = X86FN(guest_x86_spechelper);
369          guest_layout            = X86FN(&x86guest_layout);
370          offB_CMSTART            = offsetof(VexGuestX86State,guest_CMSTART);
371          offB_CMLEN              = offsetof(VexGuestX86State,guest_CMLEN);
372          offB_GUEST_IP           = offsetof(VexGuestX86State,guest_EIP);
373          szB_GUEST_IP            = sizeof( ((VexGuestX86State*)0)->guest_EIP );
374          vassert(vta->archinfo_guest.endness == VexEndnessLE);
375          vassert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN);
376          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
377          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
378          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
379          break;
380 
381       case VexArchAMD64:
382          preciseMemExnsFn
383             = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
384          disInstrFn              = AMD64FN(disInstr_AMD64);
385          specHelper              = AMD64FN(guest_amd64_spechelper);
386          guest_layout            = AMD64FN(&amd64guest_layout);
387          offB_CMSTART            = offsetof(VexGuestAMD64State,guest_CMSTART);
388          offB_CMLEN              = offsetof(VexGuestAMD64State,guest_CMLEN);
389          offB_GUEST_IP           = offsetof(VexGuestAMD64State,guest_RIP);
390          szB_GUEST_IP            = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
391          vassert(vta->archinfo_guest.endness == VexEndnessLE);
392          vassert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN);
393          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
394          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
395          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
396          break;
397 
398       case VexArchPPC32:
399          preciseMemExnsFn
400             = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
401          disInstrFn              = PPC32FN(disInstr_PPC);
402          specHelper              = PPC32FN(guest_ppc32_spechelper);
403          guest_layout            = PPC32FN(&ppc32Guest_layout);
404          offB_CMSTART            = offsetof(VexGuestPPC32State,guest_CMSTART);
405          offB_CMLEN              = offsetof(VexGuestPPC32State,guest_CMLEN);
406          offB_GUEST_IP           = offsetof(VexGuestPPC32State,guest_CIA);
407          szB_GUEST_IP            = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
408          vassert(vta->archinfo_guest.endness == VexEndnessBE);
409          vassert(0 == sizeof(VexGuestPPC32State) % LibVEX_GUEST_STATE_ALIGN);
410          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
411          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
412          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
413          break;
414 
415       case VexArchPPC64:
416          preciseMemExnsFn
417             = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
418          disInstrFn              = PPC64FN(disInstr_PPC);
419          specHelper              = PPC64FN(guest_ppc64_spechelper);
420          guest_layout            = PPC64FN(&ppc64Guest_layout);
421          offB_CMSTART            = offsetof(VexGuestPPC64State,guest_CMSTART);
422          offB_CMLEN              = offsetof(VexGuestPPC64State,guest_CMLEN);
423          offB_GUEST_IP           = offsetof(VexGuestPPC64State,guest_CIA);
424          szB_GUEST_IP            = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
425          vassert(vta->archinfo_guest.endness == VexEndnessBE ||
426                  vta->archinfo_guest.endness == VexEndnessLE );
427          vassert(0 == sizeof(VexGuestPPC64State) % LibVEX_GUEST_STATE_ALIGN);
428          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
429          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
430          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
431          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
432          break;
433 
434       case VexArchS390X:
435          preciseMemExnsFn
436             = S390FN(guest_s390x_state_requires_precise_mem_exns);
437          disInstrFn              = S390FN(disInstr_S390);
438          specHelper              = S390FN(guest_s390x_spechelper);
439          guest_layout            = S390FN(&s390xGuest_layout);
440          offB_CMSTART            = offsetof(VexGuestS390XState,guest_CMSTART);
441          offB_CMLEN              = offsetof(VexGuestS390XState,guest_CMLEN);
442          offB_GUEST_IP           = offsetof(VexGuestS390XState,guest_IA);
443          szB_GUEST_IP            = sizeof( ((VexGuestS390XState*)0)->guest_IA);
444          vassert(vta->archinfo_guest.endness == VexEndnessBE);
445          vassert(0 == sizeof(VexGuestS390XState) % LibVEX_GUEST_STATE_ALIGN);
446          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
447          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
448          vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
449          break;
450 
451       case VexArchARM:
452          preciseMemExnsFn
453             = ARMFN(guest_arm_state_requires_precise_mem_exns);
454          disInstrFn              = ARMFN(disInstr_ARM);
455          specHelper              = ARMFN(guest_arm_spechelper);
456          guest_layout            = ARMFN(&armGuest_layout);
457          offB_CMSTART            = offsetof(VexGuestARMState,guest_CMSTART);
458          offB_CMLEN              = offsetof(VexGuestARMState,guest_CMLEN);
459          offB_GUEST_IP           = offsetof(VexGuestARMState,guest_R15T);
460          szB_GUEST_IP            = sizeof( ((VexGuestARMState*)0)->guest_R15T );
461          vassert(vta->archinfo_guest.endness == VexEndnessLE);
462          vassert(0 == sizeof(VexGuestARMState) % LibVEX_GUEST_STATE_ALIGN);
463          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
464          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
465          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
466          break;
467 
468       case VexArchARM64:
469          preciseMemExnsFn
470             = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
471          disInstrFn              = ARM64FN(disInstr_ARM64);
472          specHelper              = ARM64FN(guest_arm64_spechelper);
473          guest_layout            = ARM64FN(&arm64Guest_layout);
474          offB_CMSTART            = offsetof(VexGuestARM64State,guest_CMSTART);
475          offB_CMLEN              = offsetof(VexGuestARM64State,guest_CMLEN);
476          offB_GUEST_IP           = offsetof(VexGuestARM64State,guest_PC);
477          szB_GUEST_IP            = sizeof( ((VexGuestARM64State*)0)->guest_PC );
478          vassert(vta->archinfo_guest.endness == VexEndnessLE);
479          vassert(0 == sizeof(VexGuestARM64State) % LibVEX_GUEST_STATE_ALIGN);
480          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
481          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
482          vassert(sizeof( ((VexGuestARM64State*)0)->guest_NRADDR ) == 8);
483          break;
484 
485       case VexArchMIPS32:
486          preciseMemExnsFn
487             = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
488          disInstrFn              = MIPS32FN(disInstr_MIPS);
489          specHelper              = MIPS32FN(guest_mips32_spechelper);
490          guest_layout            = MIPS32FN(&mips32Guest_layout);
491          offB_CMSTART            = offsetof(VexGuestMIPS32State,guest_CMSTART);
492          offB_CMLEN              = offsetof(VexGuestMIPS32State,guest_CMLEN);
493          offB_GUEST_IP           = offsetof(VexGuestMIPS32State,guest_PC);
494          szB_GUEST_IP            = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
495          vassert(vta->archinfo_guest.endness == VexEndnessLE
496                  || vta->archinfo_guest.endness == VexEndnessBE);
497          vassert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN);
498          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
499          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
500          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
501          break;
502 
503       case VexArchMIPS64:
504          preciseMemExnsFn
505             = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
506          disInstrFn              = MIPS64FN(disInstr_MIPS);
507          specHelper              = MIPS64FN(guest_mips64_spechelper);
508          guest_layout            = MIPS64FN(&mips64Guest_layout);
509          offB_CMSTART            = offsetof(VexGuestMIPS64State,guest_CMSTART);
510          offB_CMLEN              = offsetof(VexGuestMIPS64State,guest_CMLEN);
511          offB_GUEST_IP           = offsetof(VexGuestMIPS64State,guest_PC);
512          szB_GUEST_IP            = sizeof( ((VexGuestMIPS64State*)0)->guest_PC );
513          vassert(vta->archinfo_guest.endness == VexEndnessLE
514                  || vta->archinfo_guest.endness == VexEndnessBE);
515          vassert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN);
516          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
517          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
518          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8);
519          break;
520 
521       default:
522          vpanic("LibVEX_Translate: unsupported guest insn set");
523    }
524 
525    // Are the guest's hardware capabilities feasible. The function will
526    // not return if hwcaps are infeasible in some sense.
527    // FIXME: how can we know the guest's hardware capabilities?
528    check_hwcaps(vta->arch_guest, vta->archinfo_guest.hwcaps);
529 
530    res->status         = VexTransOK;
531    res->n_sc_extents   = 0;
532    res->offs_profInc   = -1;
533    res->n_guest_instrs = 0;
534 
535 #ifndef VEXMULTIARCH
536    /* yet more sanity checks ... */
537    if (vta->arch_guest == vta->arch_host) {
538       /* doesn't necessarily have to be true, but if it isn't it means
539          we are simulating one flavour of an architecture a different
540          flavour of the same architecture, which is pretty strange. */
541       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
542       /* ditto */
543       vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
544    }
545 #endif
546 
547    vexAllocSanityCheck();
548 
549    if (vex_traceflags & VEX_TRACE_FE)
550       vex_printf("\n------------------------"
551                    " Front end "
552                    "------------------------\n\n");
553 
554    *pxControl = vex_control.iropt_register_updates_default;
555    vassert(*pxControl >= VexRegUpdSpAtMemAccess
556            && *pxControl <= VexRegUpdAllregsAtEachInsn);
557 
558    irsb = bb_to_IR ( vta->guest_extents,
559                      &res->n_sc_extents,
560                      &res->n_guest_instrs,
561                      pxControl,
562                      vta->callback_opaque,
563                      disInstrFn,
564                      vta->guest_bytes,
565                      vta->guest_bytes_addr,
566                      vta->chase_into_ok,
567                      vta->archinfo_host.endness,
568                      vta->sigill_diag,
569                      vta->arch_guest,
570                      &vta->archinfo_guest,
571                      &vta->abiinfo_both,
572                      guest_word_type,
573                      vta->needs_self_check,
574                      vta->preamble_function,
575                      offB_CMSTART,
576                      offB_CMLEN,
577                      offB_GUEST_IP,
578                      szB_GUEST_IP );
579 
580    vexAllocSanityCheck();
581 
582    if (irsb == NULL) {
583       /* Access failure. */
584       vexSetAllocModeTEMP_and_clear();
585       return NULL;
586    }
587 
588    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
589    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
590    for (i = 0; i < vta->guest_extents->n_used; i++) {
591       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
592    }
593 
594    /* bb_to_IR() could have caused pxControl to change. */
595    vassert(*pxControl >= VexRegUpdSpAtMemAccess
596            && *pxControl <= VexRegUpdAllregsAtEachInsn);
597 
598    /* If debugging, show the raw guest bytes for this bb. */
599    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
600       if (vta->guest_extents->n_used > 1) {
601          vex_printf("can't show code due to extents > 1\n");
602       } else {
603          /* HACK */
604          const UChar* p = vta->guest_bytes;
605          UInt   sum = 0;
606          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
607          vex_printf("GuestBytes %lx %u ", vta->guest_bytes_addr,
608                                           guest_bytes_read );
609          for (i = 0; i < guest_bytes_read; i++) {
610             UInt b = (UInt)p[i];
611             vex_printf(" %02x", b );
612             sum = (sum << 1) ^ b;
613          }
614          vex_printf("  %08x\n\n", sum);
615       }
616    }
617 
618    /* Sanity check the initial IR. */
619    sanityCheckIRSB( irsb, "initial IR",
620                     False/*can be non-flat*/, guest_word_type );
621 
622    vexAllocSanityCheck();
623 
624    /* Clean it up, hopefully a lot. */
625    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, *pxControl,
626                               vta->guest_bytes_addr,
627                               vta->arch_guest );
628 
629    // JRS 2016 Aug 03: Sanity checking is expensive, we already checked
630    // the output of the front end, and iropt never screws up the IR by
631    // itself, unless it is being hacked on.  So remove this post-iropt
632    // check in "production" use.
633    // sanityCheckIRSB( irsb, "after initial iropt",
634    //                  True/*must be flat*/, guest_word_type );
635 
636    if (vex_traceflags & VEX_TRACE_OPT1) {
637       vex_printf("\n------------------------"
638                    " After pre-instr IR optimisation "
639                    "------------------------\n\n");
640       ppIRSB ( irsb );
641       vex_printf("\n");
642    }
643 
644    vexAllocSanityCheck();
645 
646    /* Get the thing instrumented. */
647    if (vta->instrument1)
648       irsb = vta->instrument1(vta->callback_opaque,
649                               irsb, guest_layout,
650                               vta->guest_extents,
651                               &vta->archinfo_host,
652                               guest_word_type, host_word_type);
653    vexAllocSanityCheck();
654 
655    if (vta->instrument2)
656       irsb = vta->instrument2(vta->callback_opaque,
657                               irsb, guest_layout,
658                               vta->guest_extents,
659                               &vta->archinfo_host,
660                               guest_word_type, host_word_type);
661 
662    if (vex_traceflags & VEX_TRACE_INST) {
663       vex_printf("\n------------------------"
664                    " After instrumentation "
665                    "------------------------\n\n");
666       ppIRSB ( irsb );
667       vex_printf("\n");
668    }
669 
670    // JRS 2016 Aug 03: as above, this never actually fails in practice.
671    // And we'll sanity check anyway after the post-instrumentation
672    // cleanup pass.  So skip this check in "production" use.
673    // if (vta->instrument1 || vta->instrument2)
674    //    sanityCheckIRSB( irsb, "after instrumentation",
675    //                     True/*must be flat*/, guest_word_type );
676 
677    /* Do a post-instrumentation cleanup pass. */
678    if (vta->instrument1 || vta->instrument2) {
679       do_deadcode_BB( irsb );
680       irsb = cprop_BB( irsb );
681       do_deadcode_BB( irsb );
682       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
683                        True/*must be flat*/, guest_word_type );
684    }
685 
686    vexAllocSanityCheck();
687 
688    if (vex_traceflags & VEX_TRACE_OPT2) {
689       vex_printf("\n------------------------"
690                    " After post-instr IR optimisation "
691                    "------------------------\n\n");
692       ppIRSB ( irsb );
693       vex_printf("\n");
694    }
695 
696    return irsb;
697 }
698 
699 
700 /* Back end of the compilation pipeline.  Is not exported. */
701 
libvex_BackEnd(const VexTranslateArgs * vta,VexTranslateResult * res,IRSB * irsb,VexRegisterUpdates pxControl)702 static void libvex_BackEnd ( const VexTranslateArgs *vta,
703                              /*MOD*/ VexTranslateResult* res,
704                              /*MOD*/ IRSB* irsb,
705                              VexRegisterUpdates pxControl )
706 {
707    /* This the bundle of functions we need to do the back-end stuff
708       (insn selection, reg-alloc, assembly) whilst being insulated
709       from the target instruction set. */
710    Bool         (*isMove)       ( const HInstr*, HReg*, HReg* );
711    void         (*getRegUsage)  ( HRegUsage*, const HInstr*, Bool );
712    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
713    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
714    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
715    HInstr*      (*directReload) ( HInstr*, HReg, Short );
716    void         (*ppInstr)      ( const HInstr*, Bool );
717    void         (*ppReg)        ( HReg );
718    HInstrArray* (*iselSB)       ( const IRSB*, VexArch, const VexArchInfo*,
719                                   const VexAbiInfo*, Int, Int, Bool, Bool,
720                                   Addr );
721    Int          (*emit)         ( /*MB_MOD*/Bool*,
722                                   UChar*, Int, const HInstr*, Bool, VexEndness,
723                                   const void*, const void*, const void*,
724                                   const void* );
725    Bool (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
726 
727    const RRegUniverse* rRegUniv = NULL;
728 
729    Bool            mode64, chainingAllowed;
730    Int             i, j, k, out_used;
731    Int guest_sizeB;
732    Int offB_HOST_EvC_COUNTER;
733    Int offB_HOST_EvC_FAILADDR;
734    Addr            max_ga;
735    UChar           insn_bytes[128];
736    HInstrArray*    vcode;
737    HInstrArray*    rcode;
738 
739    isMove                  = NULL;
740    getRegUsage             = NULL;
741    mapRegs                 = NULL;
742    genSpill                = NULL;
743    genReload               = NULL;
744    directReload            = NULL;
745    ppInstr                 = NULL;
746    ppReg                   = NULL;
747    iselSB                  = NULL;
748    emit                    = NULL;
749 
750    mode64                 = False;
751    chainingAllowed        = False;
752    guest_sizeB            = 0;
753    offB_HOST_EvC_COUNTER  = 0;
754    offB_HOST_EvC_FAILADDR = 0;
755    preciseMemExnsFn       = NULL;
756 
757    vassert(vex_initdone);
758    vassert(vta->disp_cp_xassisted != NULL);
759 
760    vex_traceflags = vta->traceflags;
761 
762    /* Both the chainers and the indir are either NULL or non-NULL. */
763    if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
764       vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
765       vassert(vta->disp_cp_xindir             != NULL);
766       chainingAllowed = True;
767    } else {
768       vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
769       vassert(vta->disp_cp_xindir             == NULL);
770    }
771 
772    switch (vta->arch_guest) {
773 
774       case VexArchX86:
775          preciseMemExnsFn
776             = X86FN(guest_x86_state_requires_precise_mem_exns);
777          guest_sizeB            = sizeof(VexGuestX86State);
778          offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
779          offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
780          break;
781 
782       case VexArchAMD64:
783          preciseMemExnsFn
784             = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
785          guest_sizeB            = sizeof(VexGuestAMD64State);
786          offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
787          offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
788          break;
789 
790       case VexArchPPC32:
791          preciseMemExnsFn
792             = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
793          guest_sizeB            = sizeof(VexGuestPPC32State);
794          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
795          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
796          break;
797 
798       case VexArchPPC64:
799          preciseMemExnsFn
800             = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
801          guest_sizeB            = sizeof(VexGuestPPC64State);
802          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
803          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
804          break;
805 
806       case VexArchS390X:
807          preciseMemExnsFn
808             = S390FN(guest_s390x_state_requires_precise_mem_exns);
809          guest_sizeB            = sizeof(VexGuestS390XState);
810          offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
811          offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
812          break;
813 
814       case VexArchARM:
815          preciseMemExnsFn
816             = ARMFN(guest_arm_state_requires_precise_mem_exns);
817          guest_sizeB            = sizeof(VexGuestARMState);
818          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
819          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
820          break;
821 
822       case VexArchARM64:
823          preciseMemExnsFn
824             = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
825          guest_sizeB            = sizeof(VexGuestARM64State);
826          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
827          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
828          break;
829 
830       case VexArchMIPS32:
831          preciseMemExnsFn
832             = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
833          guest_sizeB            = sizeof(VexGuestMIPS32State);
834          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
835          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
836          break;
837 
838       case VexArchMIPS64:
839          preciseMemExnsFn
840             = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
841          guest_sizeB            = sizeof(VexGuestMIPS64State);
842          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
843          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
844          break;
845 
846       default:
847          vpanic("LibVEX_Codegen: unsupported guest insn set");
848    }
849 
850 
851    switch (vta->arch_host) {
852 
853       case VexArchX86:
854          mode64       = False;
855          rRegUniv     = X86FN(getRRegUniverse_X86());
856          isMove       = CAST_TO_TYPEOF(isMove) X86FN(isMove_X86Instr);
857          getRegUsage
858             = CAST_TO_TYPEOF(getRegUsage) X86FN(getRegUsage_X86Instr);
859          mapRegs      = CAST_TO_TYPEOF(mapRegs) X86FN(mapRegs_X86Instr);
860          genSpill     = CAST_TO_TYPEOF(genSpill) X86FN(genSpill_X86);
861          genReload    = CAST_TO_TYPEOF(genReload) X86FN(genReload_X86);
862          directReload = CAST_TO_TYPEOF(directReload) X86FN(directReload_X86);
863          ppInstr      = CAST_TO_TYPEOF(ppInstr) X86FN(ppX86Instr);
864          ppReg        = CAST_TO_TYPEOF(ppReg) X86FN(ppHRegX86);
865          iselSB       = X86FN(iselSB_X86);
866          emit         = CAST_TO_TYPEOF(emit) X86FN(emit_X86Instr);
867          vassert(vta->archinfo_host.endness == VexEndnessLE);
868          break;
869 
870       case VexArchAMD64:
871          mode64       = True;
872          rRegUniv     = AMD64FN(getRRegUniverse_AMD64());
873          isMove       = CAST_TO_TYPEOF(isMove) AMD64FN(isMove_AMD64Instr);
874          getRegUsage
875             = CAST_TO_TYPEOF(getRegUsage) AMD64FN(getRegUsage_AMD64Instr);
876          mapRegs      = CAST_TO_TYPEOF(mapRegs) AMD64FN(mapRegs_AMD64Instr);
877          genSpill     = CAST_TO_TYPEOF(genSpill) AMD64FN(genSpill_AMD64);
878          genReload    = CAST_TO_TYPEOF(genReload) AMD64FN(genReload_AMD64);
879          directReload = CAST_TO_TYPEOF(directReload) AMD64FN(directReload_AMD64);
880          ppInstr      = CAST_TO_TYPEOF(ppInstr) AMD64FN(ppAMD64Instr);
881          ppReg        = CAST_TO_TYPEOF(ppReg) AMD64FN(ppHRegAMD64);
882          iselSB       = AMD64FN(iselSB_AMD64);
883          emit         = CAST_TO_TYPEOF(emit) AMD64FN(emit_AMD64Instr);
884          vassert(vta->archinfo_host.endness == VexEndnessLE);
885          break;
886 
887       case VexArchPPC32:
888          mode64       = False;
889          rRegUniv     = PPC32FN(getRRegUniverse_PPC(mode64));
890          isMove       = CAST_TO_TYPEOF(isMove) PPC32FN(isMove_PPCInstr);
891          getRegUsage
892             = CAST_TO_TYPEOF(getRegUsage) PPC32FN(getRegUsage_PPCInstr);
893          mapRegs      = CAST_TO_TYPEOF(mapRegs) PPC32FN(mapRegs_PPCInstr);
894          genSpill     = CAST_TO_TYPEOF(genSpill) PPC32FN(genSpill_PPC);
895          genReload    = CAST_TO_TYPEOF(genReload) PPC32FN(genReload_PPC);
896          ppInstr      = CAST_TO_TYPEOF(ppInstr) PPC32FN(ppPPCInstr);
897          ppReg        = CAST_TO_TYPEOF(ppReg) PPC32FN(ppHRegPPC);
898          iselSB       = PPC32FN(iselSB_PPC);
899          emit         = CAST_TO_TYPEOF(emit) PPC32FN(emit_PPCInstr);
900          vassert(vta->archinfo_host.endness == VexEndnessBE);
901          break;
902 
903       case VexArchPPC64:
904          mode64       = True;
905          rRegUniv     = PPC64FN(getRRegUniverse_PPC(mode64));
906          isMove       = CAST_TO_TYPEOF(isMove) PPC64FN(isMove_PPCInstr);
907          getRegUsage
908             = CAST_TO_TYPEOF(getRegUsage) PPC64FN(getRegUsage_PPCInstr);
909          mapRegs      = CAST_TO_TYPEOF(mapRegs) PPC64FN(mapRegs_PPCInstr);
910          genSpill     = CAST_TO_TYPEOF(genSpill) PPC64FN(genSpill_PPC);
911          genReload    = CAST_TO_TYPEOF(genReload) PPC64FN(genReload_PPC);
912          ppInstr      = CAST_TO_TYPEOF(ppInstr) PPC64FN(ppPPCInstr);
913          ppReg        = CAST_TO_TYPEOF(ppReg) PPC64FN(ppHRegPPC);
914          iselSB       = PPC64FN(iselSB_PPC);
915          emit         = CAST_TO_TYPEOF(emit) PPC64FN(emit_PPCInstr);
916          vassert(vta->archinfo_host.endness == VexEndnessBE ||
917                  vta->archinfo_host.endness == VexEndnessLE );
918          break;
919 
920       case VexArchS390X:
921          mode64       = True;
922          rRegUniv     = S390FN(getRRegUniverse_S390());
923          isMove       = CAST_TO_TYPEOF(isMove) S390FN(isMove_S390Instr);
924          getRegUsage
925             = CAST_TO_TYPEOF(getRegUsage) S390FN(getRegUsage_S390Instr);
926          mapRegs      = CAST_TO_TYPEOF(mapRegs) S390FN(mapRegs_S390Instr);
927          genSpill     = CAST_TO_TYPEOF(genSpill) S390FN(genSpill_S390);
928          genReload    = CAST_TO_TYPEOF(genReload) S390FN(genReload_S390);
929          // fixs390: consider implementing directReload_S390
930          ppInstr      = CAST_TO_TYPEOF(ppInstr) S390FN(ppS390Instr);
931          ppReg        = CAST_TO_TYPEOF(ppReg) S390FN(ppHRegS390);
932          iselSB       = S390FN(iselSB_S390);
933          emit         = CAST_TO_TYPEOF(emit) S390FN(emit_S390Instr);
934          vassert(vta->archinfo_host.endness == VexEndnessBE);
935          break;
936 
937       case VexArchARM:
938          mode64       = False;
939          rRegUniv     = ARMFN(getRRegUniverse_ARM());
940          isMove       = CAST_TO_TYPEOF(isMove) ARMFN(isMove_ARMInstr);
941          getRegUsage
942             = CAST_TO_TYPEOF(getRegUsage) ARMFN(getRegUsage_ARMInstr);
943          mapRegs      = CAST_TO_TYPEOF(mapRegs) ARMFN(mapRegs_ARMInstr);
944          genSpill     = CAST_TO_TYPEOF(genSpill) ARMFN(genSpill_ARM);
945          genReload    = CAST_TO_TYPEOF(genReload) ARMFN(genReload_ARM);
946          ppInstr      = CAST_TO_TYPEOF(ppInstr) ARMFN(ppARMInstr);
947          ppReg        = CAST_TO_TYPEOF(ppReg) ARMFN(ppHRegARM);
948          iselSB       = ARMFN(iselSB_ARM);
949          emit         = CAST_TO_TYPEOF(emit) ARMFN(emit_ARMInstr);
950          vassert(vta->archinfo_host.endness == VexEndnessLE);
951          break;
952 
953       case VexArchARM64:
954          mode64       = True;
955          rRegUniv     = ARM64FN(getRRegUniverse_ARM64());
956          isMove       = CAST_TO_TYPEOF(isMove) ARM64FN(isMove_ARM64Instr);
957          getRegUsage
958             = CAST_TO_TYPEOF(getRegUsage) ARM64FN(getRegUsage_ARM64Instr);
959          mapRegs      = CAST_TO_TYPEOF(mapRegs) ARM64FN(mapRegs_ARM64Instr);
960          genSpill     = CAST_TO_TYPEOF(genSpill) ARM64FN(genSpill_ARM64);
961          genReload    = CAST_TO_TYPEOF(genReload) ARM64FN(genReload_ARM64);
962          ppInstr      = CAST_TO_TYPEOF(ppInstr) ARM64FN(ppARM64Instr);
963          ppReg        = CAST_TO_TYPEOF(ppReg) ARM64FN(ppHRegARM64);
964          iselSB       = ARM64FN(iselSB_ARM64);
965          emit         = CAST_TO_TYPEOF(emit) ARM64FN(emit_ARM64Instr);
966          vassert(vta->archinfo_host.endness == VexEndnessLE);
967          break;
968 
969       case VexArchMIPS32:
970          mode64       = False;
971          rRegUniv     = MIPS32FN(getRRegUniverse_MIPS(mode64));
972          isMove       = CAST_TO_TYPEOF(isMove) MIPS32FN(isMove_MIPSInstr);
973          getRegUsage
974             = CAST_TO_TYPEOF(getRegUsage) MIPS32FN(getRegUsage_MIPSInstr);
975          mapRegs      = CAST_TO_TYPEOF(mapRegs) MIPS32FN(mapRegs_MIPSInstr);
976          genSpill     = CAST_TO_TYPEOF(genSpill) MIPS32FN(genSpill_MIPS);
977          genReload    = CAST_TO_TYPEOF(genReload) MIPS32FN(genReload_MIPS);
978          ppInstr      = CAST_TO_TYPEOF(ppInstr) MIPS32FN(ppMIPSInstr);
979          ppReg        = CAST_TO_TYPEOF(ppReg) MIPS32FN(ppHRegMIPS);
980          iselSB       = MIPS32FN(iselSB_MIPS);
981          emit         = CAST_TO_TYPEOF(emit) MIPS32FN(emit_MIPSInstr);
982          vassert(vta->archinfo_host.endness == VexEndnessLE
983                  || vta->archinfo_host.endness == VexEndnessBE);
984          break;
985 
986       case VexArchMIPS64:
987          mode64       = True;
988          rRegUniv     = MIPS64FN(getRRegUniverse_MIPS(mode64));
989          isMove       = CAST_TO_TYPEOF(isMove) MIPS64FN(isMove_MIPSInstr);
990          getRegUsage
991             = CAST_TO_TYPEOF(getRegUsage) MIPS64FN(getRegUsage_MIPSInstr);
992          mapRegs      = CAST_TO_TYPEOF(mapRegs) MIPS64FN(mapRegs_MIPSInstr);
993          genSpill     = CAST_TO_TYPEOF(genSpill) MIPS64FN(genSpill_MIPS);
994          genReload    = CAST_TO_TYPEOF(genReload) MIPS64FN(genReload_MIPS);
995          ppInstr      = CAST_TO_TYPEOF(ppInstr) MIPS64FN(ppMIPSInstr);
996          ppReg        = CAST_TO_TYPEOF(ppReg) MIPS64FN(ppHRegMIPS);
997          iselSB       = MIPS64FN(iselSB_MIPS);
998          emit         = CAST_TO_TYPEOF(emit) MIPS64FN(emit_MIPSInstr);
999          vassert(vta->archinfo_host.endness == VexEndnessLE
1000                  || vta->archinfo_host.endness == VexEndnessBE);
1001          break;
1002 
1003       default:
1004          vpanic("LibVEX_Translate: unsupported host insn set");
1005    }
1006 
1007    // Are the host's hardware capabilities feasible. The function will
1008    // not return if hwcaps are infeasible in some sense.
1009    check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps);
1010 
1011 
1012    /* Turn it into virtual-registerised code.  Build trees -- this
1013       also throws away any dead bindings. */
1014    max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl );
1015 
1016    if (vta->finaltidy) {
1017       irsb = vta->finaltidy(irsb);
1018    }
1019 
1020    vexAllocSanityCheck();
1021 
1022    if (vex_traceflags & VEX_TRACE_TREES) {
1023       vex_printf("\n------------------------"
1024                    "  After tree-building "
1025                    "------------------------\n\n");
1026       ppIRSB ( irsb );
1027       vex_printf("\n");
1028    }
1029 
1030    /* HACK */
1031    if (0) {
1032       *(vta->host_bytes_used) = 0;
1033       res->status = VexTransOK; return;
1034    }
1035    /* end HACK */
1036 
1037    if (vex_traceflags & VEX_TRACE_VCODE)
1038       vex_printf("\n------------------------"
1039                    " Instruction selection "
1040                    "------------------------\n");
1041 
1042    /* No guest has its IP field at offset zero.  If this fails it
1043       means some transformation pass somewhere failed to update/copy
1044       irsb->offsIP properly. */
1045    vassert(irsb->offsIP >= 16);
1046 
1047    vcode = iselSB ( irsb, vta->arch_host,
1048                     &vta->archinfo_host,
1049                     &vta->abiinfo_both,
1050                     offB_HOST_EvC_COUNTER,
1051                     offB_HOST_EvC_FAILADDR,
1052                     chainingAllowed,
1053                     vta->addProfInc,
1054                     max_ga );
1055 
1056    vexAllocSanityCheck();
1057 
1058    if (vex_traceflags & VEX_TRACE_VCODE)
1059       vex_printf("\n");
1060 
1061    if (vex_traceflags & VEX_TRACE_VCODE) {
1062       for (i = 0; i < vcode->arr_used; i++) {
1063          vex_printf("%3d   ", i);
1064          ppInstr(vcode->arr[i], mode64);
1065          vex_printf("\n");
1066       }
1067       vex_printf("\n");
1068    }
1069 
1070    /* Register allocate. */
1071    rcode = doRegisterAllocation ( vcode, rRegUniv,
1072                                   isMove, getRegUsage, mapRegs,
1073                                   genSpill, genReload, directReload,
1074                                   guest_sizeB,
1075                                   ppInstr, ppReg, mode64 );
1076 
1077    vexAllocSanityCheck();
1078 
1079    if (vex_traceflags & VEX_TRACE_RCODE) {
1080       vex_printf("\n------------------------"
1081                    " Register-allocated code "
1082                    "------------------------\n\n");
1083       for (i = 0; i < rcode->arr_used; i++) {
1084          vex_printf("%3d   ", i);
1085          ppInstr(rcode->arr[i], mode64);
1086          vex_printf("\n");
1087       }
1088       vex_printf("\n");
1089    }
1090 
1091    /* HACK */
1092    if (0) {
1093       *(vta->host_bytes_used) = 0;
1094       res->status = VexTransOK; return;
1095    }
1096    /* end HACK */
1097 
1098    /* Assemble */
1099    if (vex_traceflags & VEX_TRACE_ASM) {
1100       vex_printf("\n------------------------"
1101                    " Assembly "
1102                    "------------------------\n\n");
1103    }
1104 
1105    out_used = 0; /* tracks along the host_bytes array */
1106    for (i = 0; i < rcode->arr_used; i++) {
1107       HInstr* hi           = rcode->arr[i];
1108       Bool    hi_isProfInc = False;
1109       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1110          ppInstr(hi, mode64);
1111          vex_printf("\n");
1112       }
1113       j = emit( &hi_isProfInc,
1114                 insn_bytes, sizeof insn_bytes, hi,
1115                 mode64, vta->archinfo_host.endness,
1116                 vta->disp_cp_chain_me_to_slowEP,
1117                 vta->disp_cp_chain_me_to_fastEP,
1118                 vta->disp_cp_xindir,
1119                 vta->disp_cp_xassisted );
1120       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1121          for (k = 0; k < j; k++)
1122             vex_printf("%02x ", (UInt)insn_bytes[k]);
1123          vex_printf("\n\n");
1124       }
1125       if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
1126          vexSetAllocModeTEMP_and_clear();
1127          vex_traceflags = 0;
1128          res->status = VexTransOutputFull;
1129          return;
1130       }
1131       if (UNLIKELY(hi_isProfInc)) {
1132          vassert(vta->addProfInc); /* else where did it come from? */
1133          vassert(res->offs_profInc == -1); /* there can be only one (tm) */
1134          vassert(out_used >= 0);
1135          res->offs_profInc = out_used;
1136       }
1137       { UChar* dst = &vta->host_bytes[out_used];
1138         for (k = 0; k < j; k++) {
1139            dst[k] = insn_bytes[k];
1140         }
1141         out_used += j;
1142       }
1143    }
1144    *(vta->host_bytes_used) = out_used;
1145 
1146    vexAllocSanityCheck();
1147 
1148    vexSetAllocModeTEMP_and_clear();
1149 
1150    if (vex_traceflags) {
1151       /* Print the expansion ratio for this SB. */
1152       j = 0; /* total guest bytes */
1153       for (i = 0; i < vta->guest_extents->n_used; i++) {
1154          j += vta->guest_extents->len[i];
1155       }
1156       if (1) vex_printf("VexExpansionRatio %d %d   %d :10\n\n",
1157                         j, out_used, (10 * out_used) / (j == 0 ? 1 : j));
1158    }
1159 
1160    vex_traceflags = 0;
1161    res->status = VexTransOK;
1162    return;
1163 }
1164 
1165 
1166 /* Exported to library client. */
1167 
LibVEX_Translate(VexTranslateArgs * vta)1168 VexTranslateResult LibVEX_Translate ( /*MOD*/ VexTranslateArgs* vta )
1169 {
1170    VexTranslateResult res = { 0 };
1171    VexRegisterUpdates pxControl = VexRegUpd_INVALID;
1172 
1173    IRSB* irsb = LibVEX_FrontEnd(vta, &res, &pxControl);
1174    libvex_BackEnd(vta, &res, irsb, pxControl);
1175    return res;
1176 }
1177 
1178 
1179 /* --------- Chain/Unchain XDirects. --------- */
1180 
LibVEX_Chain(VexArch arch_host,VexEndness endness_host,void * place_to_chain,const void * disp_cp_chain_me_EXPECTED,const void * place_to_jump_to)1181 VexInvalRange LibVEX_Chain ( VexArch     arch_host,
1182                              VexEndness  endness_host,
1183                              void*       place_to_chain,
1184                              const void* disp_cp_chain_me_EXPECTED,
1185                              const void* place_to_jump_to )
1186 {
1187    switch (arch_host) {
1188       case VexArchX86:
1189          X86ST(return chainXDirect_X86(endness_host,
1190                                        place_to_chain,
1191                                        disp_cp_chain_me_EXPECTED,
1192                                        place_to_jump_to));
1193       case VexArchAMD64:
1194          AMD64ST(return chainXDirect_AMD64(endness_host,
1195                                            place_to_chain,
1196                                            disp_cp_chain_me_EXPECTED,
1197                                            place_to_jump_to));
1198       case VexArchARM:
1199          ARMST(return chainXDirect_ARM(endness_host,
1200                                        place_to_chain,
1201                                        disp_cp_chain_me_EXPECTED,
1202                                        place_to_jump_to));
1203       case VexArchARM64:
1204          ARM64ST(return chainXDirect_ARM64(endness_host,
1205                                            place_to_chain,
1206                                            disp_cp_chain_me_EXPECTED,
1207                                            place_to_jump_to));
1208       case VexArchS390X:
1209          S390ST(return chainXDirect_S390(endness_host,
1210                                          place_to_chain,
1211                                          disp_cp_chain_me_EXPECTED,
1212                                          place_to_jump_to));
1213       case VexArchPPC32:
1214          PPC32ST(return chainXDirect_PPC(endness_host,
1215                                          place_to_chain,
1216                                          disp_cp_chain_me_EXPECTED,
1217                                          place_to_jump_to, False/*!mode64*/));
1218       case VexArchPPC64:
1219          PPC64ST(return chainXDirect_PPC(endness_host,
1220                                          place_to_chain,
1221                                          disp_cp_chain_me_EXPECTED,
1222                                          place_to_jump_to, True/*mode64*/));
1223       case VexArchMIPS32:
1224          MIPS32ST(return chainXDirect_MIPS(endness_host,
1225                                            place_to_chain,
1226                                            disp_cp_chain_me_EXPECTED,
1227                                            place_to_jump_to, False/*!mode64*/));
1228       case VexArchMIPS64:
1229          MIPS64ST(return chainXDirect_MIPS(endness_host,
1230                                            place_to_chain,
1231                                            disp_cp_chain_me_EXPECTED,
1232                                            place_to_jump_to, True/*!mode64*/));
1233       default:
1234          vassert(0);
1235    }
1236 }
1237 
LibVEX_UnChain(VexArch arch_host,VexEndness endness_host,void * place_to_unchain,const void * place_to_jump_to_EXPECTED,const void * disp_cp_chain_me)1238 VexInvalRange LibVEX_UnChain ( VexArch     arch_host,
1239                                VexEndness  endness_host,
1240                                void*       place_to_unchain,
1241                                const void* place_to_jump_to_EXPECTED,
1242                                const void* disp_cp_chain_me )
1243 {
1244    switch (arch_host) {
1245       case VexArchX86:
1246          X86ST(return unchainXDirect_X86(endness_host,
1247                                          place_to_unchain,
1248                                          place_to_jump_to_EXPECTED,
1249                                          disp_cp_chain_me));
1250       case VexArchAMD64:
1251          AMD64ST(return unchainXDirect_AMD64(endness_host,
1252                                              place_to_unchain,
1253                                              place_to_jump_to_EXPECTED,
1254                                              disp_cp_chain_me));
1255       case VexArchARM:
1256          ARMST(return unchainXDirect_ARM(endness_host,
1257                                          place_to_unchain,
1258                                          place_to_jump_to_EXPECTED,
1259                                          disp_cp_chain_me));
1260       case VexArchARM64:
1261          ARM64ST(return unchainXDirect_ARM64(endness_host,
1262                                              place_to_unchain,
1263                                              place_to_jump_to_EXPECTED,
1264                                              disp_cp_chain_me));
1265       case VexArchS390X:
1266          S390ST(return unchainXDirect_S390(endness_host,
1267                                            place_to_unchain,
1268                                            place_to_jump_to_EXPECTED,
1269                                            disp_cp_chain_me));
1270       case VexArchPPC32:
1271          PPC32ST(return unchainXDirect_PPC(endness_host,
1272                                            place_to_unchain,
1273                                            place_to_jump_to_EXPECTED,
1274                                            disp_cp_chain_me, False/*!mode64*/));
1275       case VexArchPPC64:
1276          PPC64ST(return unchainXDirect_PPC(endness_host,
1277                                            place_to_unchain,
1278                                            place_to_jump_to_EXPECTED,
1279                                            disp_cp_chain_me, True/*mode64*/));
1280       case VexArchMIPS32:
1281          MIPS32ST(return unchainXDirect_MIPS(endness_host,
1282                                              place_to_unchain,
1283                                              place_to_jump_to_EXPECTED,
1284                                              disp_cp_chain_me, False/*!mode64*/));
1285       case VexArchMIPS64:
1286          MIPS64ST(return unchainXDirect_MIPS(endness_host,
1287                                              place_to_unchain,
1288                                              place_to_jump_to_EXPECTED,
1289                                              disp_cp_chain_me, True/*!mode64*/));
1290       default:
1291          vassert(0);
1292    }
1293 }
1294 
LibVEX_evCheckSzB(VexArch arch_host)1295 Int LibVEX_evCheckSzB ( VexArch    arch_host )
1296 {
1297    static Int cached = 0; /* DO NOT MAKE NON-STATIC */
1298    if (UNLIKELY(cached == 0)) {
1299       switch (arch_host) {
1300          case VexArchX86:
1301             X86ST(cached = evCheckSzB_X86()); break;
1302          case VexArchAMD64:
1303             AMD64ST(cached = evCheckSzB_AMD64()); break;
1304          case VexArchARM:
1305             ARMST(cached = evCheckSzB_ARM()); break;
1306          case VexArchARM64:
1307             ARM64ST(cached = evCheckSzB_ARM64()); break;
1308          case VexArchS390X:
1309             S390ST(cached = evCheckSzB_S390()); break;
1310          case VexArchPPC32:
1311             PPC32ST(cached = evCheckSzB_PPC()); break;
1312          case VexArchPPC64:
1313             PPC64ST(cached = evCheckSzB_PPC()); break;
1314          case VexArchMIPS32:
1315             MIPS32ST(cached = evCheckSzB_MIPS()); break;
1316          case VexArchMIPS64:
1317             MIPS64ST(cached = evCheckSzB_MIPS()); break;
1318          default:
1319             vassert(0);
1320       }
1321    }
1322    return cached;
1323 }
1324 
LibVEX_PatchProfInc(VexArch arch_host,VexEndness endness_host,void * place_to_patch,const ULong * location_of_counter)1325 VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
1326                                     VexEndness endness_host,
1327                                     void*      place_to_patch,
1328                                     const ULong* location_of_counter )
1329 {
1330    switch (arch_host) {
1331       case VexArchX86:
1332          X86ST(return patchProfInc_X86(endness_host, place_to_patch,
1333                                        location_of_counter));
1334       case VexArchAMD64:
1335          AMD64ST(return patchProfInc_AMD64(endness_host, place_to_patch,
1336                                            location_of_counter));
1337       case VexArchARM:
1338          ARMST(return patchProfInc_ARM(endness_host, place_to_patch,
1339                                        location_of_counter));
1340       case VexArchARM64:
1341          ARM64ST(return patchProfInc_ARM64(endness_host, place_to_patch,
1342                                            location_of_counter));
1343       case VexArchS390X:
1344          S390ST(return patchProfInc_S390(endness_host, place_to_patch,
1345                                          location_of_counter));
1346       case VexArchPPC32:
1347          PPC32ST(return patchProfInc_PPC(endness_host, place_to_patch,
1348                                          location_of_counter, False/*!mode64*/));
1349       case VexArchPPC64:
1350          PPC64ST(return patchProfInc_PPC(endness_host, place_to_patch,
1351                                          location_of_counter, True/*mode64*/));
1352       case VexArchMIPS32:
1353          MIPS32ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1354                                            location_of_counter, False/*!mode64*/));
1355       case VexArchMIPS64:
1356          MIPS64ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1357                                            location_of_counter, True/*!mode64*/));
1358       default:
1359          vassert(0);
1360    }
1361 }
1362 
1363 
1364 /* --------- Emulation warnings. --------- */
1365 
LibVEX_EmNote_string(VexEmNote ew)1366 const HChar* LibVEX_EmNote_string ( VexEmNote ew )
1367 {
1368    switch (ew) {
1369      case EmNote_NONE:
1370         return "none";
1371      case EmWarn_X86_x87exns:
1372         return "Unmasking x87 FP exceptions";
1373      case EmWarn_X86_x87precision:
1374         return "Selection of non-80-bit x87 FP precision";
1375      case EmWarn_X86_sseExns:
1376         return "Unmasking SSE FP exceptions";
1377      case EmWarn_X86_fz:
1378         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
1379      case EmWarn_X86_daz:
1380         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
1381      case EmWarn_X86_acFlag:
1382         return "Setting %eflags.ac (setting noted but ignored)";
1383      case EmWarn_PPCexns:
1384         return "Unmasking PPC32/64 FP exceptions";
1385      case EmWarn_PPC64_redir_overflow:
1386         return "PPC64 function redirection stack overflow";
1387      case EmWarn_PPC64_redir_underflow:
1388         return "PPC64 function redirection stack underflow";
1389      case EmWarn_S390X_fpext_rounding:
1390         return "The specified rounding mode cannot be supported. That\n"
1391                "  feature requires the floating point extension facility\n"
1392                "  which is not available on this host. Continuing using\n"
1393                "  the rounding mode from FPC. Results may differ!";
1394      case EmWarn_S390X_invalid_rounding:
1395         return "The specified rounding mode is invalid.\n"
1396                "  Continuing using 'round to nearest'. Results may differ!";
1397      case EmFail_S390X_stfle:
1398         return "Instruction stfle is not supported on this host";
1399      case EmFail_S390X_stckf:
1400         return "Instruction stckf is not supported on this host";
1401      case EmFail_S390X_ecag:
1402         return "Instruction ecag is not supported on this host";
1403      case EmFail_S390X_pfpo:
1404         return "Instruction pfpo is not supported on this host";
1405      case EmFail_S390X_DFP_insn:
1406         return "DFP instructions are not supported on this host";
1407      case EmFail_S390X_fpext:
1408         return "Encountered an instruction that requires the floating "
1409                "point extension facility.\n"
1410                "  That facility is not available on this host";
1411      case EmFail_S390X_invalid_PFPO_rounding_mode:
1412         return "The rounding mode in GPR 0 for the PFPO instruction"
1413                " is invalid";
1414      case EmFail_S390X_invalid_PFPO_function:
1415         return "The function code in GPR 0 for the PFPO instruction"
1416                " is invalid";
1417      default:
1418         vpanic("LibVEX_EmNote_string: unknown warning");
1419    }
1420 }
1421 
1422 /* ------------------ Arch/HwCaps stuff. ------------------ */
1423 
LibVEX_ppVexArch(VexArch arch)1424 const HChar* LibVEX_ppVexArch ( VexArch arch )
1425 {
1426    switch (arch) {
1427       case VexArch_INVALID: return "INVALID";
1428       case VexArchX86:      return "X86";
1429       case VexArchAMD64:    return "AMD64";
1430       case VexArchARM:      return "ARM";
1431       case VexArchARM64:    return "ARM64";
1432       case VexArchPPC32:    return "PPC32";
1433       case VexArchPPC64:    return "PPC64";
1434       case VexArchS390X:    return "S390X";
1435       case VexArchMIPS32:   return "MIPS32";
1436       case VexArchMIPS64:   return "MIPS64";
1437       default:              return "VexArch???";
1438    }
1439 }
1440 
LibVEX_ppVexEndness(VexEndness endness)1441 const HChar* LibVEX_ppVexEndness ( VexEndness endness )
1442 {
1443    switch (endness) {
1444       case VexEndness_INVALID: return "INVALID";
1445       case VexEndnessLE:       return "LittleEndian";
1446       case VexEndnessBE:       return "BigEndian";
1447       default:                 return "VexEndness???";
1448    }
1449 }
1450 
1451 /* Return a string with the hardware capabilities to the extent as
1452    they pertain to the translation process. No attempt is made, to
1453    detect *all* capabilities an architecture may have. */
LibVEX_ppVexHwCaps(VexArch arch,UInt hwcaps)1454 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
1455 {
1456    return show_hwcaps(arch, hwcaps);
1457 }
1458 
1459 
1460 /* Write default settings info *vai. */
LibVEX_default_VexArchInfo(VexArchInfo * vai)1461 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
1462 {
1463    vex_bzero(vai, sizeof(*vai));
1464    vai->hwcaps                  = 0;
1465    vai->endness                 = VexEndness_INVALID;
1466    vai->ppc_icache_line_szB     = 0;
1467    vai->ppc_dcbz_szB            = 0;
1468    vai->ppc_dcbzl_szB           = 0;
1469    vai->arm64_dMinLine_lg2_szB  = 0;
1470    vai->arm64_iMinLine_lg2_szB  = 0;
1471    vai->arm64_requires_fallback_LLSC = False;
1472    vai->hwcache_info.num_levels = 0;
1473    vai->hwcache_info.num_caches = 0;
1474    vai->hwcache_info.caches     = NULL;
1475    vai->hwcache_info.icaches_maintain_coherence = True;  // whatever
1476 }
1477 
1478 /* Write default settings info *vbi. */
LibVEX_default_VexAbiInfo(VexAbiInfo * vbi)1479 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
1480 {
1481    vex_bzero(vbi, sizeof(*vbi));
1482    vbi->guest_stack_redzone_size       = 0;
1483    vbi->guest_amd64_assume_fs_is_const = False;
1484    vbi->guest_amd64_assume_gs_is_const = False;
1485    vbi->guest_ppc_zap_RZ_at_blr        = False;
1486    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
1487    vbi->guest__use_fallback_LLSC       = False;
1488    vbi->host_ppc_calls_use_fndescrs    = False;
1489 }
1490 
1491 
arch_word_size(VexArch arch)1492 static IRType arch_word_size (VexArch arch) {
1493    switch (arch) {
1494       case VexArchX86:
1495       case VexArchARM:
1496       case VexArchMIPS32:
1497       case VexArchPPC32:
1498          return Ity_I32;
1499 
1500       case VexArchAMD64:
1501       case VexArchARM64:
1502       case VexArchMIPS64:
1503       case VexArchPPC64:
1504       case VexArchS390X:
1505          return Ity_I64;
1506 
1507       default:
1508          vex_printf("Fatal: unknown arch in arch_word_size\n");
1509          vassert(0);
1510    }
1511 }
1512 
1513 
1514 /* Convenience macro to be used in show_hwcaps_ARCH functions */
1515 #define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
1516 
1517 /* Return a string showing the hwcaps in a nice way.  The string will
1518    be NULL for unrecognised hardware capabilities. */
1519 
show_hwcaps_x86(UInt hwcaps)1520 static const HChar* show_hwcaps_x86 ( UInt hwcaps )
1521 {
1522    static const HChar prefix[] = "x86";
1523    static const struct {
1524       UInt  hwcaps_bit;
1525       HChar name[7];
1526    } hwcaps_list[] = {
1527       { VEX_HWCAPS_X86_MMXEXT, "mmxext" },
1528       { VEX_HWCAPS_X86_SSE1,   "sse1"   },
1529       { VEX_HWCAPS_X86_SSE2,   "sse2"   },
1530       { VEX_HWCAPS_X86_SSE3,   "sse3"   },
1531       { VEX_HWCAPS_X86_LZCNT,  "lzcnt"  },
1532    };
1533    /* Allocate a large enough buffer */
1534    static HChar buf[sizeof prefix +
1535                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1536    if (buf[0] != '\0') return buf;  /* already constructed */
1537 
1538    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1539 
1540    if (hwcaps == 0) {
1541       vex_sprintf(p, "-%s", "sse0");
1542    } else {
1543       UInt i;
1544       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1545          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1546             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1547       }
1548    }
1549    return buf;
1550 }
1551 
show_hwcaps_amd64(UInt hwcaps)1552 static const HChar* show_hwcaps_amd64 ( UInt hwcaps )
1553 {
1554    static const HChar prefix[] = "amd64";
1555    static const struct {
1556       UInt  hwcaps_bit;
1557       HChar name[7];
1558    } hwcaps_list[] = {
1559       { VEX_HWCAPS_AMD64_CX16,   "cx16"   },
1560       { VEX_HWCAPS_AMD64_LZCNT,  "lzcnt"  },
1561       { VEX_HWCAPS_AMD64_RDTSCP, "rdtscp" },
1562       { VEX_HWCAPS_AMD64_SSE3,   "sse3"   },
1563       { VEX_HWCAPS_AMD64_AVX,    "avx"    },
1564       { VEX_HWCAPS_AMD64_AVX2,   "avx2"   },
1565       { VEX_HWCAPS_AMD64_BMI,    "bmi"    },
1566    };
1567    /* Allocate a large enough buffer */
1568    static HChar buf[sizeof prefix +
1569                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1570    if (buf[0] != '\0') return buf;  /* already constructed */
1571 
1572    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1573 
1574    if (hwcaps == 0) {
1575       vex_sprintf(p, "-%s", "sse2");
1576    } else {
1577       UInt i;
1578       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1579          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1580             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1581       }
1582    }
1583    return buf;
1584 }
1585 
show_hwcaps_ppc32(UInt hwcaps)1586 static const HChar* show_hwcaps_ppc32 ( UInt hwcaps )
1587 {
1588    static const HChar prefix[] = "ppc32-int";
1589    static const struct {
1590       UInt  hwcaps_bit;
1591       HChar name[8];
1592    } hwcaps_list[] = {
1593       { VEX_HWCAPS_PPC32_F,       "flt"     },
1594       { VEX_HWCAPS_PPC32_V,       "vmx"     },
1595       { VEX_HWCAPS_PPC32_FX,      "FX"      },
1596       { VEX_HWCAPS_PPC32_GX,      "GX"      },
1597       { VEX_HWCAPS_PPC32_VX,      "VX"      },
1598       { VEX_HWCAPS_PPC32_DFP,     "DFP"     },
1599       { VEX_HWCAPS_PPC32_ISA2_07, "ISA2_07" },
1600       { VEX_HWCAPS_PPC32_ISA3_0,  "ISA3_0"  },
1601    };
1602    /* Allocate a large enough buffer */
1603    static HChar buf[sizeof prefix +
1604                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1605    if (buf[0] != '\0') return buf;  /* already constructed */
1606 
1607    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1608 
1609    if (hwcaps == 0) return buf;
1610 
1611    UInt i;
1612    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1613       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1614          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1615    }
1616    return buf;
1617 }
1618 
show_hwcaps_ppc64(UInt hwcaps)1619 static const HChar* show_hwcaps_ppc64 ( UInt hwcaps )
1620 {
1621    static const HChar prefix[] = "ppc64-int-flt";
1622    static const struct {
1623       UInt  hwcaps_bit;
1624       HChar name[8];
1625    } hwcaps_list[] = {
1626       { VEX_HWCAPS_PPC64_FX,      "FX"      },
1627       { VEX_HWCAPS_PPC64_GX,      "GX"      },
1628       { VEX_HWCAPS_PPC64_V,       "vmx"     },
1629       { VEX_HWCAPS_PPC64_DFP,     "DFP"     },
1630       { VEX_HWCAPS_PPC64_ISA2_07, "ISA2_07" },
1631       { VEX_HWCAPS_PPC64_ISA3_0,  "ISA3_0"  },
1632    };
1633    /* Allocate a large enough buffer */
1634    static HChar buf[sizeof prefix +
1635                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1636    if (buf[0] != '\0') return buf;  /* already constructed */
1637 
1638    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1639 
1640    if (hwcaps == 0) return buf;
1641 
1642    UInt i;
1643    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1644       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1645          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1646    }
1647    return buf;
1648 }
1649 
show_hwcaps_arm(UInt hwcaps)1650 static const HChar* show_hwcaps_arm ( UInt hwcaps )
1651 {
1652    static const HChar prefix[] = "ARM";
1653    static const struct {
1654       UInt  hwcaps_bit;
1655       HChar name[6];
1656    } hwcaps_list[] = {
1657       { VEX_HWCAPS_ARM_NEON, "neon" },
1658       { VEX_HWCAPS_ARM_VFP | VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3, "vfp" },
1659    };
1660    /* Allocate a large enough buffer */
1661    static HChar buf[sizeof prefix + 12 +    // level
1662                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1663    if (buf[0] != '\0') return buf;  /* already constructed */
1664 
1665    HChar *p;
1666    UInt i, level;
1667 
1668    level = VEX_ARM_ARCHLEVEL(hwcaps);
1669 
1670    p = buf + vex_sprintf(buf, "%sv%u", prefix, level);
1671    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1672       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1673          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1674    }
1675    return buf;
1676 }
1677 
show_hwcaps_arm64(UInt hwcaps)1678 static const HChar* show_hwcaps_arm64 ( UInt hwcaps )
1679 {
1680    /* Since there are no variants, just insist that hwcaps is zero,
1681       and declare it invalid otherwise. */
1682   if (hwcaps == 0)
1683      return "baseline";
1684   return "Unsupported";
1685 }
1686 
show_hwcaps_s390x(UInt hwcaps)1687 static const HChar* show_hwcaps_s390x ( UInt hwcaps )
1688 {
1689    static const HChar prefix[] = "s390x";
1690    static const struct {
1691       UInt  hwcaps_bit;
1692       HChar name[6];
1693    } hwcaps_list[] = {
1694       { VEX_HWCAPS_S390X_LDISP, "ldisp" },
1695       { VEX_HWCAPS_S390X_EIMM,  "eimm" },
1696       { VEX_HWCAPS_S390X_GIE,   "gie" },
1697       { VEX_HWCAPS_S390X_DFP,   "dfp" },
1698       { VEX_HWCAPS_S390X_FGX,   "fgx" },
1699       { VEX_HWCAPS_S390X_STFLE, "stfle" },
1700       { VEX_HWCAPS_S390X_ETF2,  "etf2" },
1701       { VEX_HWCAPS_S390X_ETF3,  "etf3" },
1702       { VEX_HWCAPS_S390X_STCKF, "stckf" },
1703       { VEX_HWCAPS_S390X_FPEXT, "fpext" },
1704       { VEX_HWCAPS_S390X_LSC,   "lsc" },
1705       { VEX_HWCAPS_S390X_PFPO,  "pfpo" },
1706    };
1707    /* Allocate a large enough buffer */
1708    static HChar buf[sizeof prefix +
1709                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1710 
1711    if (buf[0] != '\0') return buf;  /* already constructed */
1712 
1713    HChar *p;
1714    UInt i;
1715 
1716    hwcaps = VEX_HWCAPS_S390X(hwcaps);
1717 
1718    p = buf + vex_sprintf(buf, "%s", prefix);
1719    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1720       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1721          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1722    }
1723 
1724    /* If there are no facilities, add "zarch" */
1725    if (hwcaps == 0)
1726      vex_sprintf(p, "-%s", "zarch");
1727 
1728    return buf;
1729 }
1730 
show_hwcaps_mips32(UInt hwcaps)1731 static const HChar* show_hwcaps_mips32 ( UInt hwcaps )
1732 {
1733    /* MIPS baseline. */
1734    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1735       /* MIPS baseline with dspr2. */
1736       if (VEX_MIPS_PROC_DSP2(hwcaps)) {
1737          return "MIPS-baseline-dspr2";
1738       }
1739       /* MIPS baseline with dsp. */
1740       if (VEX_MIPS_PROC_DSP(hwcaps)) {
1741          return "MIPS-baseline-dsp";
1742       }
1743       return "MIPS-baseline";
1744    }
1745 
1746    /* Broadcom baseline. */
1747    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_BROADCOM) {
1748       return "Broadcom-baseline";
1749    }
1750 
1751    /* Netlogic baseline. */
1752    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1753       return "Netlogic-baseline";
1754    }
1755 
1756    /* Cavium baseline. */
1757    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1758       return "Cavium-baseline";
1759    }
1760 
1761    /* Ingenic baseline. */
1762    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_INGENIC_E1) {
1763       return "Ingenic-baseline";
1764    }
1765 
1766    /* Loongson baseline. */
1767    if ((VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_LEGACY) &&
1768        (VEX_MIPS_PROC_ID(hwcaps) == VEX_PRID_IMP_LOONGSON_64)) {
1769       return "Loongson-baseline";
1770    }
1771 
1772    return "Unsupported baseline";
1773 }
1774 
show_hwcaps_mips64(UInt hwcaps)1775 static const HChar* show_hwcaps_mips64 ( UInt hwcaps )
1776 {
1777    /* Netlogic baseline. */
1778    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1779       return "Netlogic-baseline";
1780    }
1781 
1782    /* Cavium baseline. */
1783    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1784       return "Cavium-baseline";
1785    }
1786 
1787    /* Loongson baseline. */
1788    if ((VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_LEGACY) &&
1789        (VEX_MIPS_PROC_ID(hwcaps) == VEX_PRID_IMP_LOONGSON_64)) {
1790       return "Loongson-baseline";
1791    }
1792 
1793    /* MIPS64 baseline. */
1794    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1795       return "mips64-baseline";
1796    }
1797 
1798    return "Unsupported baseline";
1799 }
1800 
1801 #undef NUM_HWCAPS
1802 
1803 /* Thie function must not return NULL. */
1804 
show_hwcaps(VexArch arch,UInt hwcaps)1805 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
1806 {
1807    switch (arch) {
1808       case VexArchX86:    return show_hwcaps_x86(hwcaps);
1809       case VexArchAMD64:  return show_hwcaps_amd64(hwcaps);
1810       case VexArchPPC32:  return show_hwcaps_ppc32(hwcaps);
1811       case VexArchPPC64:  return show_hwcaps_ppc64(hwcaps);
1812       case VexArchARM:    return show_hwcaps_arm(hwcaps);
1813       case VexArchARM64:  return show_hwcaps_arm64(hwcaps);
1814       case VexArchS390X:  return show_hwcaps_s390x(hwcaps);
1815       case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
1816       case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
1817       default: return NULL;
1818    }
1819 }
1820 
1821 /* To be used to complain about hwcaps we cannot handle */
1822 __attribute__((noreturn))
invalid_hwcaps(VexArch arch,UInt hwcaps,const HChar * message)1823 static void invalid_hwcaps ( VexArch arch, UInt hwcaps, const HChar *message )
1824 {
1825    vfatal("\nVEX: %s"
1826           "     Found: %s\n", message, show_hwcaps(arch, hwcaps));
1827 }
1828 
1829 /* This function will not return iff the hwcaps don't pass the test. */
check_hwcaps(VexArch arch,UInt hwcaps)1830 static void check_hwcaps ( VexArch arch, UInt hwcaps )
1831 {
1832    switch (arch) {
1833       case VexArchX86: {
1834          if (hwcaps == 0) return;    // baseline
1835 
1836          /* Monotonic: SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */
1837          static const UInt extras[] = {
1838             VEX_HWCAPS_X86_MMXEXT, VEX_HWCAPS_X86_SSE1, VEX_HWCAPS_X86_SSE2,
1839             VEX_HWCAPS_X86_SSE3
1840          };
1841 
1842          UInt i, caps = 0;
1843          for (i = 0; i < sizeof extras / sizeof extras[0]; ++i) {
1844             caps |= extras[i];
1845             if (caps == hwcaps) return;
1846             /* For SSE2 or later LZCNT is optional */
1847             if ((caps & VEX_HWCAPS_X86_SSE2) != 0) {
1848                if ((caps | VEX_HWCAPS_X86_LZCNT) == hwcaps) return;
1849             }
1850          }
1851          invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n");
1852       }
1853 
1854       case VexArchAMD64: {
1855          /* SSE3 and CX16 are orthogonal and > baseline, although we really
1856             don't expect to come across anything which can do SSE3 but can't
1857             do CX16.  Still, we can handle that case.  LZCNT is similarly
1858             orthogonal. */
1859 
1860          /* Throw out obviously stupid cases: */
1861          Bool have_sse3 = (hwcaps & VEX_HWCAPS_AMD64_SSE3) != 0;
1862          Bool have_avx  = (hwcaps & VEX_HWCAPS_AMD64_AVX)  != 0;
1863          Bool have_bmi  = (hwcaps & VEX_HWCAPS_AMD64_BMI)  != 0;
1864          Bool have_avx2 = (hwcaps & VEX_HWCAPS_AMD64_AVX2) != 0;
1865 
1866          /* AVX without SSE3 */
1867          if (have_avx && !have_sse3)
1868             invalid_hwcaps(arch, hwcaps,
1869                            "Support for AVX requires SSE3 capabilities\n");
1870          /* AVX2 or BMI without AVX */
1871          if (have_avx2 && !have_avx)
1872             invalid_hwcaps(arch, hwcaps,
1873                            "Support for AVX2 requires AVX capabilities\n");
1874          if (have_bmi && !have_avx)
1875             invalid_hwcaps(arch, hwcaps,
1876                            "Support for BMI requires AVX capabilities\n");
1877          return;
1878       }
1879 
1880       case VexArchPPC32: {
1881          /* Monotonic with complications.  Basically V > F > baseline,
1882             but once you have F then you can have FX or GX too. */
1883          if (hwcaps == 0) return;   // baseline
1884 
1885          if ((hwcaps & VEX_HWCAPS_PPC32_F) == 0)
1886             invalid_hwcaps(arch, hwcaps,
1887                            "Missing floating point capability\n");
1888          /* V, FX, and GX can appear in any combination */
1889 
1890          /* DFP requires V and FX and GX */
1891          UInt v_fx_gx = VEX_HWCAPS_PPC32_V | VEX_HWCAPS_PPC32_FX |
1892                         VEX_HWCAPS_PPC32_GX;
1893          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1894 
1895          if ((hwcaps & VEX_HWCAPS_PPC32_DFP) && ! has_v_fx_gx)
1896             invalid_hwcaps(arch, hwcaps,
1897                            "DFP requires VMX and FX and GX capabilities\n");
1898 
1899          /* VX requires V and FX and GX */
1900          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1901             invalid_hwcaps(arch, hwcaps,
1902                            "VX requires VMX and FX and GX capabilities\n");
1903 
1904          /* ISA2_07 requires everything else */
1905          if ((hwcaps & VEX_HWCAPS_PPC32_ISA2_07) != 0) {
1906             if (! has_v_fx_gx)
1907                invalid_hwcaps(arch, hwcaps,
1908                           "ISA2_07 requires VMX and FX and GX capabilities\n");
1909             if (! (hwcaps & VEX_HWCAPS_PPC32_VX))
1910                invalid_hwcaps(arch, hwcaps,
1911                               "ISA2_07 requires VX capabilities\n");
1912             if (! (hwcaps & VEX_HWCAPS_PPC32_DFP))
1913                invalid_hwcaps(arch, hwcaps,
1914                               "ISA2_07 requires DFP capabilities\n");
1915          }
1916 
1917          /* ISA 3.0 not supported on 32-bit machines */
1918          if ((hwcaps & VEX_HWCAPS_PPC32_ISA3_0) != 0) {
1919             invalid_hwcaps(arch, hwcaps,
1920                            "ISA 3.0 not supported in 32-bit mode \n");
1921          }
1922          return;
1923       }
1924 
1925       case VexArchPPC64: {
1926          /* Monotonic with complications.  Basically V > baseline(==F),
1927             but once you have F then you can have FX or GX too. */
1928          if (hwcaps == 0) return;   // baseline
1929 
1930          /* V, FX, and GX can appear in any combination */
1931 
1932          /* DFP requires V and FX and GX */
1933          UInt v_fx_gx = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX |
1934                         VEX_HWCAPS_PPC64_GX;
1935          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1936 
1937          if ((hwcaps & VEX_HWCAPS_PPC64_DFP) && ! has_v_fx_gx)
1938             invalid_hwcaps(arch, hwcaps,
1939                            "DFP requires VMX and FX and GX capabilities\n");
1940 
1941          /* VX requires V and FX and GX */
1942          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1943             invalid_hwcaps(arch, hwcaps,
1944                            "VX requires VMX and FX and GX capabilities\n");
1945 
1946          /* ISA2_07 requires everything else */
1947          if ((hwcaps & VEX_HWCAPS_PPC64_ISA2_07) != 0) {
1948             if (! has_v_fx_gx)
1949                invalid_hwcaps(arch, hwcaps,
1950                         "ISA2_07 requires VMX and FX and GX capabilities\n");
1951             if (! (hwcaps & VEX_HWCAPS_PPC64_VX))
1952                invalid_hwcaps(arch, hwcaps,
1953                               "ISA2_07 requires VX capabilities\n");
1954             if (! (hwcaps & VEX_HWCAPS_PPC64_DFP))
1955                invalid_hwcaps(arch, hwcaps,
1956                               "ISA2_07 requires DFP capabilities\n");
1957          }
1958 
1959          /* ISA3_0 requires everything else */
1960          if ((hwcaps & VEX_HWCAPS_PPC64_ISA3_0) != 0) {
1961             if ( !((hwcaps
1962                     & VEX_HWCAPS_PPC64_ISA2_07) == VEX_HWCAPS_PPC64_ISA2_07))
1963                invalid_hwcaps(arch, hwcaps,
1964                           "ISA3_0 requires ISA2_07 capabilities\n");
1965             if ( !has_v_fx_gx)
1966                invalid_hwcaps(arch, hwcaps,
1967                         "ISA3_0 requires VMX and FX and GX capabilities\n");
1968             if ( !(hwcaps & VEX_HWCAPS_PPC64_VX))
1969                invalid_hwcaps(arch, hwcaps,
1970                               "ISA3_0 requires VX capabilities\n");
1971             if ( !(hwcaps & VEX_HWCAPS_PPC64_DFP))
1972                invalid_hwcaps(arch, hwcaps,
1973                               "ISA3_0 requires DFP capabilities\n");
1974          }
1975          return;
1976       }
1977 
1978       case VexArchARM: {
1979          Bool NEON  = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
1980          Bool VFP3  = ((hwcaps & VEX_HWCAPS_ARM_VFP3) != 0);
1981          UInt level = VEX_ARM_ARCHLEVEL(hwcaps);
1982          switch (level) {
1983             case 5:
1984                if (NEON)
1985                   invalid_hwcaps(arch, hwcaps,
1986                           "NEON instructions are not supported for ARMv5.\n");
1987                return;
1988             case 6:
1989                if (NEON)
1990                   invalid_hwcaps(arch, hwcaps,
1991                           "NEON instructions are not supported for ARMv6.\n");
1992                return;
1993             case 7:
1994                return;
1995             case 8:
1996                if (!NEON || !VFP3)
1997                   invalid_hwcaps(arch, hwcaps,
1998                           "NEON and VFP3 are required for ARMv8.\n");
1999                return;
2000             default:
2001                invalid_hwcaps(arch, hwcaps,
2002                               "ARM architecture level is not supported.\n");
2003          }
2004       }
2005 
2006       case VexArchARM64:
2007          if (hwcaps != 0)
2008             invalid_hwcaps(arch, hwcaps,
2009                            "Unsupported hardware capabilities.\n");
2010          return;
2011 
2012       case VexArchS390X:
2013          if (! s390_host_has_ldisp)
2014             invalid_hwcaps(arch, hwcaps,
2015                            "Host does not have long displacement facility.\n");
2016          return;
2017 
2018       case VexArchMIPS32:
2019          switch (VEX_MIPS_COMP_ID(hwcaps)) {
2020             case VEX_PRID_COMP_MIPS:
2021             case VEX_PRID_COMP_CAVIUM:
2022             case VEX_PRID_COMP_INGENIC_E1:
2023             case VEX_PRID_COMP_BROADCOM:
2024             case VEX_PRID_COMP_NETLOGIC:
2025                return;
2026             default:
2027                invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
2028          }
2029 
2030       case VexArchMIPS64:
2031          switch (VEX_MIPS_COMP_ID(hwcaps)) {
2032             case VEX_PRID_COMP_MIPS:
2033             case VEX_PRID_COMP_CAVIUM:
2034             case VEX_PRID_COMP_NETLOGIC:
2035                return;
2036             default:
2037                invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
2038          }
2039 
2040       default:
2041          vpanic("unknown architecture");
2042    }
2043 }
2044 
2045 
2046 /*---------------------------------------------------------------*/
2047 /*--- end                                         main_main.c ---*/
2048 /*---------------------------------------------------------------*/
2049