• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* -*- mode: C; c-basic-offset: 3; -*- */
2 
3 /*---------------------------------------------------------------*/
4 /*--- Begin                                       main_main.c ---*/
5 /*---------------------------------------------------------------*/
6 
7 /*
8    This file is part of Valgrind, a dynamic binary instrumentation
9    framework.
10 
11    Copyright (C) 2004-2015 OpenWorks LLP
12       info@open-works.net
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
27    02110-1301, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 
31    Neither the names of the U.S. Department of Energy nor the
32    University of California nor the names of its contributors may be
33    used to endorse or promote products derived from this software
34    without prior written permission.
35 */
36 
37 #include "libvex.h"
38 #include "libvex_emnote.h"
39 #include "libvex_guest_x86.h"
40 #include "libvex_guest_amd64.h"
41 #include "libvex_guest_arm.h"
42 #include "libvex_guest_arm64.h"
43 #include "libvex_guest_ppc32.h"
44 #include "libvex_guest_ppc64.h"
45 #include "libvex_guest_s390x.h"
46 #include "libvex_guest_mips32.h"
47 #include "libvex_guest_mips64.h"
48 #include "libvex_guest_tilegx.h"
49 
50 #include "main_globals.h"
51 #include "main_util.h"
52 #include "host_generic_regs.h"
53 #include "ir_opt.h"
54 
55 #include "host_x86_defs.h"
56 #include "host_amd64_defs.h"
57 #include "host_ppc_defs.h"
58 #include "host_arm_defs.h"
59 #include "host_arm64_defs.h"
60 #include "host_s390_defs.h"
61 #include "host_mips_defs.h"
62 #include "host_tilegx_defs.h"
63 
64 #include "guest_generic_bb_to_IR.h"
65 #include "guest_x86_defs.h"
66 #include "guest_amd64_defs.h"
67 #include "guest_arm_defs.h"
68 #include "guest_arm64_defs.h"
69 #include "guest_ppc_defs.h"
70 #include "guest_s390_defs.h"
71 #include "guest_mips_defs.h"
72 #include "guest_tilegx_defs.h"
73 
74 #include "host_generic_simd128.h"
75 
76 /* For each architecture <arch>, we define 2 macros:
77    <arch>FN that has as argument a pointer (typically to a function
78             or the return value of a function).
79    <arch>ST that has as argument a statement.
80    If main_main.c is compiled for <arch>, then these macros just expand
81    their arg.
82    Otherwise, the macros expand to respectively NULL and vassert(0).
83    These macros are used to avoid introducing dependencies to object
84    files not needed for the (only) architecture we are compiling for.
85 
86    To still compile the below for all supported architectures, define
87    VEXMULTIARCH. This is used by the file multiarch_main_main.c */
88 
89 #if defined(VGA_x86) || defined(VEXMULTIARCH)
90 #define X86FN(f) f
91 #define X86ST(f) f
92 #else
93 #define X86FN(f) NULL
94 #define X86ST(f) vassert(0)
95 #endif
96 
97 #if defined(VGA_amd64) || defined(VEXMULTIARCH)
98 #define AMD64FN(f) f
99 #define AMD64ST(f) f
100 #else
101 #define AMD64FN(f) NULL
102 #define AMD64ST(f) vassert(0)
103 #endif
104 
105 #if defined(VGA_ppc32) || defined(VEXMULTIARCH)
106 #define PPC32FN(f) f
107 #define PPC32ST(f) f
108 #else
109 #define PPC32FN(f) NULL
110 #define PPC32ST(f) vassert(0)
111 #endif
112 
113 #if defined(VGA_ppc64be) || defined(VGA_ppc64le) || defined(VEXMULTIARCH)
114 #define PPC64FN(f) f
115 #define PPC64ST(f) f
116 #else
117 #define PPC64FN(f) NULL
118 #define PPC64ST(f) vassert(0)
119 #endif
120 
121 #if defined(VGA_s390x) || defined(VEXMULTIARCH)
122 #define S390FN(f) f
123 #define S390ST(f) f
124 #else
125 #define S390FN(f) NULL
126 #define S390ST(f) vassert(0)
127 #endif
128 
129 #if defined(VGA_arm) || defined(VEXMULTIARCH)
130 #define ARMFN(f) f
131 #define ARMST(f) f
132 #else
133 #define ARMFN(f) NULL
134 #define ARMST(f) vassert(0)
135 #endif
136 
137 #if defined(VGA_arm64) || defined(VEXMULTIARCH)
138 #define ARM64FN(f) f
139 #define ARM64ST(f) f
140 #else
141 #define ARM64FN(f) NULL
142 #define ARM64ST(f) vassert(0)
143 #endif
144 
145 #if defined(VGA_mips32) || defined(VEXMULTIARCH)
146 #define MIPS32FN(f) f
147 #define MIPS32ST(f) f
148 #else
149 #define MIPS32FN(f) NULL
150 #define MIPS32ST(f) vassert(0)
151 #endif
152 
153 #if defined(VGA_mips64) || defined(VEXMULTIARCH)
154 #define MIPS64FN(f) f
155 #define MIPS64ST(f) f
156 #else
157 #define MIPS64FN(f) NULL
158 #define MIPS64ST(f) vassert(0)
159 #endif
160 
161 #if defined(VGA_tilegx) || defined(VEXMULTIARCH)
162 #define TILEGXFN(f) f
163 #define TILEGXST(f) f
164 #else
165 #define TILEGXFN(f) NULL
166 #define TILEGXST(f) vassert(0)
167 #endif
168 
169 
170 /* This file contains the top level interface to the library. */
171 
172 /* --------- fwds ... --------- */
173 
174 static void  check_hwcaps ( VexArch arch, UInt hwcaps );
175 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
176 
177 
178 /* --------- helpers --------- */
179 
180 __attribute__((noinline))
udiv32(UInt x,UInt y)181 static UInt udiv32 ( UInt x, UInt y ) { return x/y; }
182 __attribute__((noinline))
sdiv32(Int x,Int y)183 static  Int sdiv32 (  Int x,  Int y ) { return x/y; }
184 
185 
186 /* --------- Initialise the library. --------- */
187 
188 /* Exported to library client. */
189 
LibVEX_default_VexControl(VexControl * vcon)190 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
191 {
192    vex_bzero(vcon, sizeof(*vcon));
193    vcon->iropt_verbosity                = 0;
194    vcon->iropt_level                    = 2;
195    vcon->iropt_register_updates_default = VexRegUpdUnwindregsAtMemAccess;
196    vcon->iropt_unroll_thresh            = 120;
197    vcon->guest_max_insns                = 60;
198    vcon->guest_chase_thresh             = 10;
199    vcon->guest_chase_cond               = False;
200 }
201 
202 
203 /* Exported to library client. */
204 
LibVEX_Init(void (* failure_exit)(void),void (* log_bytes)(const HChar *,SizeT nbytes),Int debuglevel,const VexControl * vcon)205 void LibVEX_Init (
206    /* failure exit function */
207    __attribute__ ((noreturn))
208    void (*failure_exit) ( void ),
209    /* logging output function */
210    void (*log_bytes) ( const HChar*, SizeT nbytes ),
211    /* debug paranoia level */
212    Int debuglevel,
213    /* Control ... */
214    const VexControl* vcon
215 )
216 {
217    /* First off, do enough minimal setup so that the following
218       assertions can fail in a sane fashion, if need be. */
219    vex_failure_exit = failure_exit;
220    vex_log_bytes    = log_bytes;
221 
222    /* Now it's safe to check parameters for sanity. */
223    vassert(!vex_initdone);
224    vassert(failure_exit);
225    vassert(log_bytes);
226    vassert(debuglevel >= 0);
227 
228    vassert(vcon->iropt_verbosity >= 0);
229    vassert(vcon->iropt_level >= 0);
230    vassert(vcon->iropt_level <= 2);
231    vassert(vcon->iropt_unroll_thresh >= 0);
232    vassert(vcon->iropt_unroll_thresh <= 400);
233    vassert(vcon->guest_max_insns >= 1);
234    vassert(vcon->guest_max_insns <= 100);
235    vassert(vcon->guest_chase_thresh >= 0);
236    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
237    vassert(vcon->guest_chase_cond == True
238            || vcon->guest_chase_cond == False);
239 
240    /* Check that Vex has been built with sizes of basic types as
241       stated in priv/libvex_basictypes.h.  Failure of any of these is
242       a serious configuration error and should be corrected
243       immediately.  If any of these assertions fail you can fully
244       expect Vex not to work properly, if at all. */
245 
246    vassert(1 == sizeof(UChar));
247    vassert(1 == sizeof(Char));
248    vassert(2 == sizeof(UShort));
249    vassert(2 == sizeof(Short));
250    vassert(4 == sizeof(UInt));
251    vassert(4 == sizeof(Int));
252    vassert(8 == sizeof(ULong));
253    vassert(8 == sizeof(Long));
254    vassert(4 == sizeof(Float));
255    vassert(8 == sizeof(Double));
256    vassert(1 == sizeof(Bool));
257    vassert(4 == sizeof(Addr32));
258    vassert(8 == sizeof(Addr64));
259    vassert(16 == sizeof(U128));
260    vassert(16 == sizeof(V128));
261    vassert(32 == sizeof(U256));
262 
263    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
264    vassert(sizeof(void*) == sizeof(int*));
265    vassert(sizeof(void*) == sizeof(HWord));
266    vassert(sizeof(void*) == sizeof(Addr));
267    vassert(sizeof(unsigned long) == sizeof(SizeT));
268 
269    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
270    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
271 
272    /* These take a lot of space, so make sure we don't have
273       any unnoticed size regressions. */
274    if (VEX_HOST_WORDSIZE == 4) {
275       vassert(sizeof(IRExpr) == 16);
276       vassert(sizeof(IRStmt) == 20 /* x86 */
277               || sizeof(IRStmt) == 24 /* arm */);
278    } else {
279       vassert(sizeof(IRExpr) == 32);
280       vassert(sizeof(IRStmt) == 32);
281    }
282 
283    /* Ditto */
284    vassert(sizeof(HReg) == 4);
285    /* If N_RREGUNIVERSE_REGS ever exceeds 64, the bitset fields in
286       RRegSet and HRegUsage will need to be changed to something
287       better than ULong. */
288    vassert(N_RREGUNIVERSE_REGS == 64);
289 
290    /* Check that signed integer division on the host rounds towards
291       zero.  If not, h_calc_sdiv32_w_arm_semantics() won't work
292       correctly. */
293    /* 100.0 / 7.0 == 14.2857 */
294    vassert(udiv32(100, 7) == 14);
295    vassert(sdiv32(100, 7) == 14);
296    vassert(sdiv32(-100, 7) == -14); /* and not -15 */
297    vassert(sdiv32(100, -7) == -14); /* ditto */
298    vassert(sdiv32(-100, -7) == 14); /* not sure what this proves */
299 
300    /* Really start up .. */
301    vex_debuglevel         = debuglevel;
302    vex_control            = *vcon;
303    vex_initdone           = True;
304    vexSetAllocMode ( VexAllocModeTEMP );
305 }
306 
307 
308 /* --------- Make a translation. --------- */
309 /* KLUDGE: S390 need to know the hwcaps of the host when generating
310    code. But that info is not passed to emit_S390Instr. Only mode64 is
311    being passed. So, ideally, we want this passed as an argument, too.
312    Until then, we use a global variable. This variable is set as a side
313    effect of LibVEX_Translate. The variable is defined here rather than
314    in host_s390_defs.c to avoid having main_main.c dragging S390
315    object files in non VEXMULTIARCH. */
316 UInt s390_host_hwcaps;
317 
318 
319 /* Exported to library client. */
320 
LibVEX_Translate(VexTranslateArgs * vta)321 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
322 {
323    /* This the bundle of functions we need to do the back-end stuff
324       (insn selection, reg-alloc, assembly) whilst being insulated
325       from the target instruction set. */
326    Bool         (*isMove)       ( const HInstr*, HReg*, HReg* );
327    void         (*getRegUsage)  ( HRegUsage*, const HInstr*, Bool );
328    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
329    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
330    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
331    HInstr*      (*directReload) ( HInstr*, HReg, Short );
332    void         (*ppInstr)      ( const HInstr*, Bool );
333    void         (*ppReg)        ( HReg );
334    HInstrArray* (*iselSB)       ( const IRSB*, VexArch, const VexArchInfo*,
335                                   const VexAbiInfo*, Int, Int, Bool, Bool,
336                                   Addr );
337    Int          (*emit)         ( /*MB_MOD*/Bool*,
338                                   UChar*, Int, const HInstr*, Bool, VexEndness,
339                                   const void*, const void*, const void*,
340                                   const void* );
341    IRExpr*      (*specHelper)   ( const HChar*, IRExpr**, IRStmt**, Int );
342    Bool         (*preciseMemExnsFn) ( Int, Int, VexRegisterUpdates );
343 
344    const RRegUniverse* rRegUniv = NULL;
345 
346    DisOneInstrFn disInstrFn;
347 
348    VexGuestLayout* guest_layout;
349    IRSB*           irsb;
350    HInstrArray*    vcode;
351    HInstrArray*    rcode;
352    Int             i, j, k, out_used, guest_sizeB;
353    Int             offB_CMSTART, offB_CMLEN, offB_GUEST_IP, szB_GUEST_IP;
354    Int             offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR;
355    UChar           insn_bytes[128];
356    IRType          guest_word_type;
357    IRType          host_word_type;
358    Bool            mode64, chainingAllowed;
359    Addr            max_ga;
360 
361    guest_layout           = NULL;
362    isMove                 = NULL;
363    getRegUsage            = NULL;
364    mapRegs                = NULL;
365    genSpill               = NULL;
366    genReload              = NULL;
367    directReload           = NULL;
368    ppInstr                = NULL;
369    ppReg                  = NULL;
370    iselSB                 = NULL;
371    emit                   = NULL;
372    specHelper             = NULL;
373    preciseMemExnsFn       = NULL;
374    disInstrFn             = NULL;
375    guest_word_type        = Ity_INVALID;
376    host_word_type         = Ity_INVALID;
377    offB_CMSTART           = 0;
378    offB_CMLEN             = 0;
379    offB_GUEST_IP          = 0;
380    szB_GUEST_IP           = 0;
381    offB_HOST_EvC_COUNTER  = 0;
382    offB_HOST_EvC_FAILADDR = 0;
383    mode64                 = False;
384    chainingAllowed        = False;
385 
386    vex_traceflags = vta->traceflags;
387 
388    vassert(vex_initdone);
389    vassert(vta->needs_self_check  != NULL);
390    vassert(vta->disp_cp_xassisted != NULL);
391    /* Both the chainers and the indir are either NULL or non-NULL. */
392    if (vta->disp_cp_chain_me_to_slowEP        != NULL) {
393       vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
394       vassert(vta->disp_cp_xindir             != NULL);
395       chainingAllowed = True;
396    } else {
397       vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
398       vassert(vta->disp_cp_xindir             == NULL);
399    }
400 
401    vexSetAllocModeTEMP_and_clear();
402    vexAllocSanityCheck();
403 
404    /* First off, check that the guest and host insn sets
405       are supported. */
406 
407    switch (vta->arch_host) {
408 
409       case VexArchX86:
410          mode64       = False;
411          rRegUniv     = X86FN(getRRegUniverse_X86());
412          isMove       = (__typeof__(isMove)) X86FN(isMove_X86Instr);
413          getRegUsage
414             = (__typeof__(getRegUsage)) X86FN(getRegUsage_X86Instr);
415          mapRegs      = (__typeof__(mapRegs)) X86FN(mapRegs_X86Instr);
416          genSpill     = (__typeof__(genSpill)) X86FN(genSpill_X86);
417          genReload    = (__typeof__(genReload)) X86FN(genReload_X86);
418          directReload = (__typeof__(directReload)) X86FN(directReload_X86);
419          ppInstr      = (__typeof__(ppInstr)) X86FN(ppX86Instr);
420          ppReg        = (__typeof__(ppReg)) X86FN(ppHRegX86);
421          iselSB       = X86FN(iselSB_X86);
422          emit         = (__typeof__(emit)) X86FN(emit_X86Instr);
423          host_word_type = Ity_I32;
424          vassert(vta->archinfo_host.endness == VexEndnessLE);
425          break;
426 
427       case VexArchAMD64:
428          mode64       = True;
429          rRegUniv     = AMD64FN(getRRegUniverse_AMD64());
430          isMove       = (__typeof__(isMove)) AMD64FN(isMove_AMD64Instr);
431          getRegUsage
432             = (__typeof__(getRegUsage)) AMD64FN(getRegUsage_AMD64Instr);
433          mapRegs      = (__typeof__(mapRegs)) AMD64FN(mapRegs_AMD64Instr);
434          genSpill     = (__typeof__(genSpill)) AMD64FN(genSpill_AMD64);
435          genReload    = (__typeof__(genReload)) AMD64FN(genReload_AMD64);
436          ppInstr      = (__typeof__(ppInstr)) AMD64FN(ppAMD64Instr);
437          ppReg        = (__typeof__(ppReg)) AMD64FN(ppHRegAMD64);
438          iselSB       = AMD64FN(iselSB_AMD64);
439          emit         = (__typeof__(emit)) AMD64FN(emit_AMD64Instr);
440          host_word_type = Ity_I64;
441          vassert(vta->archinfo_host.endness == VexEndnessLE);
442          break;
443 
444       case VexArchPPC32:
445          mode64       = False;
446          rRegUniv     = PPC32FN(getRRegUniverse_PPC(mode64));
447          isMove       = (__typeof__(isMove)) PPC32FN(isMove_PPCInstr);
448          getRegUsage
449             = (__typeof__(getRegUsage)) PPC32FN(getRegUsage_PPCInstr);
450          mapRegs      = (__typeof__(mapRegs)) PPC32FN(mapRegs_PPCInstr);
451          genSpill     = (__typeof__(genSpill)) PPC32FN(genSpill_PPC);
452          genReload    = (__typeof__(genReload)) PPC32FN(genReload_PPC);
453          ppInstr      = (__typeof__(ppInstr)) PPC32FN(ppPPCInstr);
454          ppReg        = (__typeof__(ppReg)) PPC32FN(ppHRegPPC);
455          iselSB       = PPC32FN(iselSB_PPC);
456          emit         = (__typeof__(emit)) PPC32FN(emit_PPCInstr);
457          host_word_type = Ity_I32;
458          vassert(vta->archinfo_host.endness == VexEndnessBE);
459          break;
460 
461       case VexArchPPC64:
462          mode64       = True;
463          rRegUniv     = PPC64FN(getRRegUniverse_PPC(mode64));
464          isMove       = (__typeof__(isMove)) PPC64FN(isMove_PPCInstr);
465          getRegUsage
466             = (__typeof__(getRegUsage)) PPC64FN(getRegUsage_PPCInstr);
467          mapRegs      = (__typeof__(mapRegs)) PPC64FN(mapRegs_PPCInstr);
468          genSpill     = (__typeof__(genSpill)) PPC64FN(genSpill_PPC);
469          genReload    = (__typeof__(genReload)) PPC64FN(genReload_PPC);
470          ppInstr      = (__typeof__(ppInstr)) PPC64FN(ppPPCInstr);
471          ppReg        = (__typeof__(ppReg)) PPC64FN(ppHRegPPC);
472          iselSB       = PPC64FN(iselSB_PPC);
473          emit         = (__typeof__(emit)) PPC64FN(emit_PPCInstr);
474          host_word_type = Ity_I64;
475          vassert(vta->archinfo_host.endness == VexEndnessBE ||
476                  vta->archinfo_host.endness == VexEndnessLE );
477          break;
478 
479       case VexArchS390X:
480          mode64       = True;
481          /* KLUDGE: export hwcaps. */
482          s390_host_hwcaps = vta->archinfo_host.hwcaps;
483          rRegUniv     = S390FN(getRRegUniverse_S390());
484          isMove       = (__typeof__(isMove)) S390FN(isMove_S390Instr);
485          getRegUsage
486             = (__typeof__(getRegUsage)) S390FN(getRegUsage_S390Instr);
487          mapRegs      = (__typeof__(mapRegs)) S390FN(mapRegs_S390Instr);
488          genSpill     = (__typeof__(genSpill)) S390FN(genSpill_S390);
489          genReload    = (__typeof__(genReload)) S390FN(genReload_S390);
490          // fixs390: consider implementing directReload_S390
491          ppInstr      = (__typeof__(ppInstr)) S390FN(ppS390Instr);
492          ppReg        = (__typeof__(ppReg)) S390FN(ppHRegS390);
493          iselSB       = S390FN(iselSB_S390);
494          emit         = (__typeof__(emit)) S390FN(emit_S390Instr);
495          host_word_type = Ity_I64;
496          vassert(vta->archinfo_host.endness == VexEndnessBE);
497          break;
498 
499       case VexArchARM:
500          mode64       = False;
501          rRegUniv     = ARMFN(getRRegUniverse_ARM());
502          isMove       = (__typeof__(isMove)) ARMFN(isMove_ARMInstr);
503          getRegUsage
504             = (__typeof__(getRegUsage)) ARMFN(getRegUsage_ARMInstr);
505          mapRegs      = (__typeof__(mapRegs)) ARMFN(mapRegs_ARMInstr);
506          genSpill     = (__typeof__(genSpill)) ARMFN(genSpill_ARM);
507          genReload    = (__typeof__(genReload)) ARMFN(genReload_ARM);
508          ppInstr      = (__typeof__(ppInstr)) ARMFN(ppARMInstr);
509          ppReg        = (__typeof__(ppReg)) ARMFN(ppHRegARM);
510          iselSB       = ARMFN(iselSB_ARM);
511          emit         = (__typeof__(emit)) ARMFN(emit_ARMInstr);
512          host_word_type = Ity_I32;
513          vassert(vta->archinfo_host.endness == VexEndnessLE);
514          break;
515 
516       case VexArchARM64:
517          mode64       = True;
518          rRegUniv     = ARM64FN(getRRegUniverse_ARM64());
519          isMove       = (__typeof__(isMove)) ARM64FN(isMove_ARM64Instr);
520          getRegUsage
521             = (__typeof__(getRegUsage)) ARM64FN(getRegUsage_ARM64Instr);
522          mapRegs      = (__typeof__(mapRegs)) ARM64FN(mapRegs_ARM64Instr);
523          genSpill     = (__typeof__(genSpill)) ARM64FN(genSpill_ARM64);
524          genReload    = (__typeof__(genReload)) ARM64FN(genReload_ARM64);
525          ppInstr      = (__typeof__(ppInstr)) ARM64FN(ppARM64Instr);
526          ppReg        = (__typeof__(ppReg)) ARM64FN(ppHRegARM64);
527          iselSB       = ARM64FN(iselSB_ARM64);
528          emit         = (__typeof__(emit)) ARM64FN(emit_ARM64Instr);
529          host_word_type = Ity_I64;
530          vassert(vta->archinfo_host.endness == VexEndnessLE);
531          break;
532 
533       case VexArchMIPS32:
534          mode64       = False;
535          rRegUniv     = MIPS32FN(getRRegUniverse_MIPS(mode64));
536          isMove       = (__typeof__(isMove)) MIPS32FN(isMove_MIPSInstr);
537          getRegUsage
538             = (__typeof__(getRegUsage)) MIPS32FN(getRegUsage_MIPSInstr);
539          mapRegs      = (__typeof__(mapRegs)) MIPS32FN(mapRegs_MIPSInstr);
540          genSpill     = (__typeof__(genSpill)) MIPS32FN(genSpill_MIPS);
541          genReload    = (__typeof__(genReload)) MIPS32FN(genReload_MIPS);
542          ppInstr      = (__typeof__(ppInstr)) MIPS32FN(ppMIPSInstr);
543          ppReg        = (__typeof__(ppReg)) MIPS32FN(ppHRegMIPS);
544          iselSB       = MIPS32FN(iselSB_MIPS);
545          emit         = (__typeof__(emit)) MIPS32FN(emit_MIPSInstr);
546          host_word_type = Ity_I32;
547          vassert(vta->archinfo_host.endness == VexEndnessLE
548                  || vta->archinfo_host.endness == VexEndnessBE);
549          break;
550 
551       case VexArchMIPS64:
552          mode64       = True;
553          rRegUniv     = MIPS64FN(getRRegUniverse_MIPS(mode64));
554          isMove       = (__typeof__(isMove)) MIPS64FN(isMove_MIPSInstr);
555          getRegUsage
556             = (__typeof__(getRegUsage)) MIPS64FN(getRegUsage_MIPSInstr);
557          mapRegs      = (__typeof__(mapRegs)) MIPS64FN(mapRegs_MIPSInstr);
558          genSpill     = (__typeof__(genSpill)) MIPS64FN(genSpill_MIPS);
559          genReload    = (__typeof__(genReload)) MIPS64FN(genReload_MIPS);
560          ppInstr      = (__typeof__(ppInstr)) MIPS64FN(ppMIPSInstr);
561          ppReg        = (__typeof__(ppReg)) MIPS64FN(ppHRegMIPS);
562          iselSB       = MIPS64FN(iselSB_MIPS);
563          emit         = (__typeof__(emit)) MIPS64FN(emit_MIPSInstr);
564          host_word_type = Ity_I64;
565          vassert(vta->archinfo_host.endness == VexEndnessLE
566                  || vta->archinfo_host.endness == VexEndnessBE);
567          break;
568 
569       case VexArchTILEGX:
570          mode64      = True;
571          rRegUniv    = TILEGXFN(getRRegUniverse_TILEGX());
572          isMove      = (__typeof__(isMove)) TILEGXFN(isMove_TILEGXInstr);
573          getRegUsage =
574             (__typeof__(getRegUsage)) TILEGXFN(getRegUsage_TILEGXInstr);
575          mapRegs     = (__typeof__(mapRegs)) TILEGXFN(mapRegs_TILEGXInstr);
576          genSpill    = (__typeof__(genSpill)) TILEGXFN(genSpill_TILEGX);
577          genReload   = (__typeof__(genReload)) TILEGXFN(genReload_TILEGX);
578          ppInstr     = (__typeof__(ppInstr)) TILEGXFN(ppTILEGXInstr);
579          ppReg       = (__typeof__(ppReg)) TILEGXFN(ppHRegTILEGX);
580          iselSB      = TILEGXFN(iselSB_TILEGX);
581          emit        = (__typeof__(emit)) TILEGXFN(emit_TILEGXInstr);
582          host_word_type    = Ity_I64;
583          vassert(vta->archinfo_host.endness == VexEndnessLE);
584          break;
585 
586       default:
587          vpanic("LibVEX_Translate: unsupported host insn set");
588    }
589 
590    // Are the host's hardware capabilities feasible. The function will
591    // not return if hwcaps are infeasible in some sense.
592    check_hwcaps(vta->arch_host, vta->archinfo_host.hwcaps);
593 
594    switch (vta->arch_guest) {
595 
596       case VexArchX86:
597          preciseMemExnsFn
598             = X86FN(guest_x86_state_requires_precise_mem_exns);
599          disInstrFn             = X86FN(disInstr_X86);
600          specHelper             = X86FN(guest_x86_spechelper);
601          guest_sizeB            = sizeof(VexGuestX86State);
602          guest_word_type        = Ity_I32;
603          guest_layout           = X86FN(&x86guest_layout);
604          offB_CMSTART           = offsetof(VexGuestX86State,guest_CMSTART);
605          offB_CMLEN             = offsetof(VexGuestX86State,guest_CMLEN);
606          offB_GUEST_IP          = offsetof(VexGuestX86State,guest_EIP);
607          szB_GUEST_IP           = sizeof( ((VexGuestX86State*)0)->guest_EIP );
608          offB_HOST_EvC_COUNTER  = offsetof(VexGuestX86State,host_EvC_COUNTER);
609          offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
610          vassert(vta->archinfo_guest.endness == VexEndnessLE);
611          vassert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN);
612          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMSTART) == 4);
613          vassert(sizeof( ((VexGuestX86State*)0)->guest_CMLEN  ) == 4);
614          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
615          break;
616 
617       case VexArchAMD64:
618          preciseMemExnsFn
619             = AMD64FN(guest_amd64_state_requires_precise_mem_exns);
620          disInstrFn             = AMD64FN(disInstr_AMD64);
621          specHelper             = AMD64FN(guest_amd64_spechelper);
622          guest_sizeB            = sizeof(VexGuestAMD64State);
623          guest_word_type        = Ity_I64;
624          guest_layout           = AMD64FN(&amd64guest_layout);
625          offB_CMSTART           = offsetof(VexGuestAMD64State,guest_CMSTART);
626          offB_CMLEN             = offsetof(VexGuestAMD64State,guest_CMLEN);
627          offB_GUEST_IP          = offsetof(VexGuestAMD64State,guest_RIP);
628          szB_GUEST_IP           = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
629          offB_HOST_EvC_COUNTER  = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
630          offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
631          vassert(vta->archinfo_guest.endness == VexEndnessLE);
632          vassert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN);
633          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMSTART ) == 8);
634          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_CMLEN   ) == 8);
635          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
636          break;
637 
638       case VexArchPPC32:
639          preciseMemExnsFn
640             = PPC32FN(guest_ppc32_state_requires_precise_mem_exns);
641          disInstrFn             = PPC32FN(disInstr_PPC);
642          specHelper             = PPC32FN(guest_ppc32_spechelper);
643          guest_sizeB            = sizeof(VexGuestPPC32State);
644          guest_word_type        = Ity_I32;
645          guest_layout           = PPC32FN(&ppc32Guest_layout);
646          offB_CMSTART           = offsetof(VexGuestPPC32State,guest_CMSTART);
647          offB_CMLEN             = offsetof(VexGuestPPC32State,guest_CMLEN);
648          offB_GUEST_IP          = offsetof(VexGuestPPC32State,guest_CIA);
649          szB_GUEST_IP           = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
650          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
651          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
652          vassert(vta->archinfo_guest.endness == VexEndnessBE);
653          vassert(0 == sizeof(VexGuestPPC32State) % LibVEX_GUEST_STATE_ALIGN);
654          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMSTART ) == 4);
655          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_CMLEN   ) == 4);
656          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
657          break;
658 
659       case VexArchPPC64:
660          preciseMemExnsFn
661             = PPC64FN(guest_ppc64_state_requires_precise_mem_exns);
662          disInstrFn             = PPC64FN(disInstr_PPC);
663          specHelper             = PPC64FN(guest_ppc64_spechelper);
664          guest_sizeB            = sizeof(VexGuestPPC64State);
665          guest_word_type        = Ity_I64;
666          guest_layout           = PPC64FN(&ppc64Guest_layout);
667          offB_CMSTART           = offsetof(VexGuestPPC64State,guest_CMSTART);
668          offB_CMLEN             = offsetof(VexGuestPPC64State,guest_CMLEN);
669          offB_GUEST_IP          = offsetof(VexGuestPPC64State,guest_CIA);
670          szB_GUEST_IP           = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
671          offB_HOST_EvC_COUNTER  = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
672          offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
673          vassert(vta->archinfo_guest.endness == VexEndnessBE ||
674                  vta->archinfo_guest.endness == VexEndnessLE );
675          vassert(0 == sizeof(VexGuestPPC64State) % LibVEX_GUEST_STATE_ALIGN);
676          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMSTART    ) == 8);
677          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_CMLEN      ) == 8);
678          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
679          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
680          break;
681 
682       case VexArchS390X:
683          preciseMemExnsFn
684             = S390FN(guest_s390x_state_requires_precise_mem_exns);
685          disInstrFn       = S390FN(disInstr_S390);
686          specHelper       = S390FN(guest_s390x_spechelper);
687          guest_sizeB      = sizeof(VexGuestS390XState);
688          guest_word_type  = Ity_I64;
689          guest_layout     = S390FN(&s390xGuest_layout);
690          offB_CMSTART     = offsetof(VexGuestS390XState,guest_CMSTART);
691          offB_CMLEN       = offsetof(VexGuestS390XState,guest_CMLEN);
692          offB_GUEST_IP          = offsetof(VexGuestS390XState,guest_IA);
693          szB_GUEST_IP           = sizeof( ((VexGuestS390XState*)0)->guest_IA);
694          offB_HOST_EvC_COUNTER  = offsetof(VexGuestS390XState,host_EvC_COUNTER);
695          offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
696          vassert(vta->archinfo_guest.endness == VexEndnessBE);
697          vassert(0 == sizeof(VexGuestS390XState) % LibVEX_GUEST_STATE_ALIGN);
698          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMSTART    ) == 8);
699          vassert(sizeof( ((VexGuestS390XState*)0)->guest_CMLEN      ) == 8);
700          vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR     ) == 8);
701          break;
702 
703       case VexArchARM:
704          preciseMemExnsFn
705             = ARMFN(guest_arm_state_requires_precise_mem_exns);
706          disInstrFn             = ARMFN(disInstr_ARM);
707          specHelper             = ARMFN(guest_arm_spechelper);
708          guest_sizeB            = sizeof(VexGuestARMState);
709          guest_word_type        = Ity_I32;
710          guest_layout           = ARMFN(&armGuest_layout);
711          offB_CMSTART           = offsetof(VexGuestARMState,guest_CMSTART);
712          offB_CMLEN             = offsetof(VexGuestARMState,guest_CMLEN);
713          offB_GUEST_IP          = offsetof(VexGuestARMState,guest_R15T);
714          szB_GUEST_IP           = sizeof( ((VexGuestARMState*)0)->guest_R15T );
715          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARMState,host_EvC_COUNTER);
716          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
717          vassert(vta->archinfo_guest.endness == VexEndnessLE);
718          vassert(0 == sizeof(VexGuestARMState) % LibVEX_GUEST_STATE_ALIGN);
719          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMSTART) == 4);
720          vassert(sizeof( ((VexGuestARMState*)0)->guest_CMLEN  ) == 4);
721          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
722          break;
723 
724       case VexArchARM64:
725          preciseMemExnsFn
726             = ARM64FN(guest_arm64_state_requires_precise_mem_exns);
727          disInstrFn           = ARM64FN(disInstr_ARM64);
728          specHelper           = ARM64FN(guest_arm64_spechelper);
729          guest_sizeB          = sizeof(VexGuestARM64State);
730          guest_word_type      = Ity_I64;
731          guest_layout         = ARM64FN(&arm64Guest_layout);
732          offB_CMSTART         = offsetof(VexGuestARM64State,guest_CMSTART);
733          offB_CMLEN           = offsetof(VexGuestARM64State,guest_CMLEN);
734          offB_GUEST_IP        = offsetof(VexGuestARM64State,guest_PC);
735          szB_GUEST_IP         = sizeof( ((VexGuestARM64State*)0)->guest_PC );
736          offB_HOST_EvC_COUNTER  = offsetof(VexGuestARM64State,host_EvC_COUNTER);
737          offB_HOST_EvC_FAILADDR = offsetof(VexGuestARM64State,host_EvC_FAILADDR);
738          vassert(vta->archinfo_guest.endness == VexEndnessLE);
739          vassert(0 == sizeof(VexGuestARM64State) % LibVEX_GUEST_STATE_ALIGN);
740          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMSTART) == 8);
741          vassert(sizeof( ((VexGuestARM64State*)0)->guest_CMLEN  ) == 8);
742          vassert(sizeof( ((VexGuestARM64State*)0)->guest_NRADDR ) == 8);
743          break;
744 
745       case VexArchMIPS32:
746          preciseMemExnsFn
747             = MIPS32FN(guest_mips32_state_requires_precise_mem_exns);
748          disInstrFn             = MIPS32FN(disInstr_MIPS);
749          specHelper             = MIPS32FN(guest_mips32_spechelper);
750          guest_sizeB            = sizeof(VexGuestMIPS32State);
751          guest_word_type        = Ity_I32;
752          guest_layout           = MIPS32FN(&mips32Guest_layout);
753          offB_CMSTART           = offsetof(VexGuestMIPS32State,guest_CMSTART);
754          offB_CMLEN             = offsetof(VexGuestMIPS32State,guest_CMLEN);
755          offB_GUEST_IP          = offsetof(VexGuestMIPS32State,guest_PC);
756          szB_GUEST_IP           = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
757          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
758          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
759          vassert(vta->archinfo_guest.endness == VexEndnessLE
760                  || vta->archinfo_guest.endness == VexEndnessBE);
761          vassert(0 == sizeof(VexGuestMIPS32State) % LibVEX_GUEST_STATE_ALIGN);
762          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMSTART) == 4);
763          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_CMLEN  ) == 4);
764          vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
765          break;
766 
767       case VexArchMIPS64:
768          preciseMemExnsFn
769             = MIPS64FN(guest_mips64_state_requires_precise_mem_exns);
770          disInstrFn             = MIPS64FN(disInstr_MIPS);
771          specHelper             = MIPS64FN(guest_mips64_spechelper);
772          guest_sizeB            = sizeof(VexGuestMIPS64State);
773          guest_word_type        = Ity_I64;
774          guest_layout           = MIPS64FN(&mips64Guest_layout);
775          offB_CMSTART           = offsetof(VexGuestMIPS64State,guest_CMSTART);
776          offB_CMLEN             = offsetof(VexGuestMIPS64State,guest_CMLEN);
777          offB_GUEST_IP          = offsetof(VexGuestMIPS64State,guest_PC);
778          szB_GUEST_IP           = sizeof( ((VexGuestMIPS64State*)0)->guest_PC );
779          offB_HOST_EvC_COUNTER  = offsetof(VexGuestMIPS64State,host_EvC_COUNTER);
780          offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS64State,host_EvC_FAILADDR);
781          vassert(vta->archinfo_guest.endness == VexEndnessLE
782                  || vta->archinfo_guest.endness == VexEndnessBE);
783          vassert(0 == sizeof(VexGuestMIPS64State) % LibVEX_GUEST_STATE_ALIGN);
784          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMSTART) == 8);
785          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_CMLEN  ) == 8);
786          vassert(sizeof( ((VexGuestMIPS64State*)0)->guest_NRADDR ) == 8);
787          break;
788 
789       case VexArchTILEGX:
790          preciseMemExnsFn =
791             TILEGXFN(guest_tilegx_state_requires_precise_mem_exns);
792          disInstrFn       = TILEGXFN(disInstr_TILEGX);
793          specHelper       = TILEGXFN(guest_tilegx_spechelper);
794          guest_sizeB      = sizeof(VexGuestTILEGXState);
795          guest_word_type  = Ity_I64;
796          guest_layout     = TILEGXFN(&tilegxGuest_layout);
797          offB_CMSTART     = offsetof(VexGuestTILEGXState,guest_CMSTART);
798          offB_CMLEN       = offsetof(VexGuestTILEGXState,guest_CMLEN);
799          offB_GUEST_IP          = offsetof(VexGuestTILEGXState,guest_pc);
800          szB_GUEST_IP           = sizeof( ((VexGuestTILEGXState*)0)->guest_pc );
801          offB_HOST_EvC_COUNTER  = offsetof(VexGuestTILEGXState,host_EvC_COUNTER);
802          offB_HOST_EvC_FAILADDR = offsetof(VexGuestTILEGXState,host_EvC_FAILADDR);
803          vassert(vta->archinfo_guest.endness == VexEndnessLE);
804          vassert(0 ==
805                  sizeof(VexGuestTILEGXState) % LibVEX_GUEST_STATE_ALIGN);
806          vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_CMSTART    ) == 8);
807          vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_CMLEN      ) == 8);
808          vassert(sizeof( ((VexGuestTILEGXState*)0)->guest_NRADDR     ) == 8);
809          break;
810 
811       default:
812          vpanic("LibVEX_Translate: unsupported guest insn set");
813    }
814 
815    // Are the guest's hardware capabilities feasible. The function will
816    // not return if hwcaps are infeasible in some sense.
817    // FIXME: how can we know the guest's hardware capabilities?
818    check_hwcaps(vta->arch_guest, vta->archinfo_guest.hwcaps);
819 
820    /* Set up result struct. */
821    VexTranslateResult res;
822    res.status         = VexTransOK;
823    res.n_sc_extents   = 0;
824    res.offs_profInc   = -1;
825    res.n_guest_instrs = 0;
826 
827    /* yet more sanity checks ... */
828    if (vta->arch_guest == vta->arch_host) {
829       /* doesn't necessarily have to be true, but if it isn't it means
830          we are simulating one flavour of an architecture a different
831          flavour of the same architecture, which is pretty strange. */
832       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
833       /* ditto */
834       vassert(vta->archinfo_guest.endness == vta->archinfo_host.endness);
835    }
836 
837    vexAllocSanityCheck();
838 
839    if (vex_traceflags & VEX_TRACE_FE)
840       vex_printf("\n------------------------"
841                    " Front end "
842                    "------------------------\n\n");
843 
844    VexRegisterUpdates pxControl = vex_control.iropt_register_updates_default;
845    vassert(pxControl >= VexRegUpdSpAtMemAccess
846            && pxControl <= VexRegUpdAllregsAtEachInsn);
847 
848    irsb = bb_to_IR ( vta->guest_extents,
849                      &res.n_sc_extents,
850                      &res.n_guest_instrs,
851                      &pxControl,
852                      vta->callback_opaque,
853                      disInstrFn,
854                      vta->guest_bytes,
855                      vta->guest_bytes_addr,
856                      vta->chase_into_ok,
857                      vta->archinfo_host.endness,
858                      vta->sigill_diag,
859                      vta->arch_guest,
860                      &vta->archinfo_guest,
861                      &vta->abiinfo_both,
862                      guest_word_type,
863                      vta->needs_self_check,
864                      vta->preamble_function,
865                      offB_CMSTART,
866                      offB_CMLEN,
867                      offB_GUEST_IP,
868                      szB_GUEST_IP );
869 
870    vexAllocSanityCheck();
871 
872    if (irsb == NULL) {
873       /* Access failure. */
874       vexSetAllocModeTEMP_and_clear();
875       vex_traceflags = 0;
876       res.status = VexTransAccessFail; return res;
877    }
878 
879    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
880    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
881    for (i = 0; i < vta->guest_extents->n_used; i++) {
882       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
883    }
884 
885    /* bb_to_IR() could have caused pxControl to change. */
886    vassert(pxControl >= VexRegUpdSpAtMemAccess
887            && pxControl <= VexRegUpdAllregsAtEachInsn);
888 
889    /* If debugging, show the raw guest bytes for this bb. */
890    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
891       if (vta->guest_extents->n_used > 1) {
892          vex_printf("can't show code due to extents > 1\n");
893       } else {
894          /* HACK */
895          const UChar* p = vta->guest_bytes;
896          UInt   sum = 0;
897          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
898          vex_printf("GuestBytes %lx %u ", vta->guest_bytes_addr,
899                                           guest_bytes_read );
900          for (i = 0; i < guest_bytes_read; i++) {
901             UInt b = (UInt)p[i];
902             vex_printf(" %02x", b );
903             sum = (sum << 1) ^ b;
904          }
905          vex_printf("  %08x\n\n", sum);
906       }
907    }
908 
909    /* Sanity check the initial IR. */
910    sanityCheckIRSB( irsb, "initial IR",
911                     False/*can be non-flat*/, guest_word_type );
912 
913    vexAllocSanityCheck();
914 
915    /* Clean it up, hopefully a lot. */
916    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn, pxControl,
917                               vta->guest_bytes_addr,
918                               vta->arch_guest );
919    sanityCheckIRSB( irsb, "after initial iropt",
920                     True/*must be flat*/, guest_word_type );
921 
922    if (vex_traceflags & VEX_TRACE_OPT1) {
923       vex_printf("\n------------------------"
924                    " After pre-instr IR optimisation "
925                    "------------------------\n\n");
926       ppIRSB ( irsb );
927       vex_printf("\n");
928    }
929 
930    vexAllocSanityCheck();
931 
932    /* Get the thing instrumented. */
933    if (vta->instrument1)
934       irsb = vta->instrument1(vta->callback_opaque,
935                               irsb, guest_layout,
936                               vta->guest_extents,
937                               &vta->archinfo_host,
938                               guest_word_type, host_word_type);
939    vexAllocSanityCheck();
940 
941    if (vta->instrument2)
942       irsb = vta->instrument2(vta->callback_opaque,
943                               irsb, guest_layout,
944                               vta->guest_extents,
945                               &vta->archinfo_host,
946                               guest_word_type, host_word_type);
947 
948    if (vex_traceflags & VEX_TRACE_INST) {
949       vex_printf("\n------------------------"
950                    " After instrumentation "
951                    "------------------------\n\n");
952       ppIRSB ( irsb );
953       vex_printf("\n");
954    }
955 
956    if (vta->instrument1 || vta->instrument2)
957       sanityCheckIRSB( irsb, "after instrumentation",
958                        True/*must be flat*/, guest_word_type );
959 
960    /* Do a post-instrumentation cleanup pass. */
961    if (vta->instrument1 || vta->instrument2) {
962       do_deadcode_BB( irsb );
963       irsb = cprop_BB( irsb );
964       do_deadcode_BB( irsb );
965       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
966                        True/*must be flat*/, guest_word_type );
967    }
968 
969    vexAllocSanityCheck();
970 
971    if (vex_traceflags & VEX_TRACE_OPT2) {
972       vex_printf("\n------------------------"
973                    " After post-instr IR optimisation "
974                    "------------------------\n\n");
975       ppIRSB ( irsb );
976       vex_printf("\n");
977    }
978 
979    /* Turn it into virtual-registerised code.  Build trees -- this
980       also throws away any dead bindings. */
981    max_ga = ado_treebuild_BB( irsb, preciseMemExnsFn, pxControl );
982 
983    if (vta->finaltidy) {
984       irsb = vta->finaltidy(irsb);
985    }
986 
987    vexAllocSanityCheck();
988 
989    if (vex_traceflags & VEX_TRACE_TREES) {
990       vex_printf("\n------------------------"
991                    "  After tree-building "
992                    "------------------------\n\n");
993       ppIRSB ( irsb );
994       vex_printf("\n");
995    }
996 
997    /* HACK */
998    if (0) {
999       *(vta->host_bytes_used) = 0;
1000       res.status = VexTransOK; return res;
1001    }
1002    /* end HACK */
1003 
1004    if (vex_traceflags & VEX_TRACE_VCODE)
1005       vex_printf("\n------------------------"
1006                    " Instruction selection "
1007                    "------------------------\n");
1008 
1009    /* No guest has its IP field at offset zero.  If this fails it
1010       means some transformation pass somewhere failed to update/copy
1011       irsb->offsIP properly. */
1012    vassert(irsb->offsIP >= 16);
1013 
1014    vcode = iselSB ( irsb, vta->arch_host,
1015                     &vta->archinfo_host,
1016                     &vta->abiinfo_both,
1017                     offB_HOST_EvC_COUNTER,
1018                     offB_HOST_EvC_FAILADDR,
1019                     chainingAllowed,
1020                     vta->addProfInc,
1021                     max_ga );
1022 
1023    vexAllocSanityCheck();
1024 
1025    if (vex_traceflags & VEX_TRACE_VCODE)
1026       vex_printf("\n");
1027 
1028    if (vex_traceflags & VEX_TRACE_VCODE) {
1029       for (i = 0; i < vcode->arr_used; i++) {
1030          vex_printf("%3d   ", i);
1031          ppInstr(vcode->arr[i], mode64);
1032          vex_printf("\n");
1033       }
1034       vex_printf("\n");
1035    }
1036 
1037    /* Register allocate. */
1038    rcode = doRegisterAllocation ( vcode, rRegUniv,
1039                                   isMove, getRegUsage, mapRegs,
1040                                   genSpill, genReload, directReload,
1041                                   guest_sizeB,
1042                                   ppInstr, ppReg, mode64 );
1043 
1044    vexAllocSanityCheck();
1045 
1046    if (vex_traceflags & VEX_TRACE_RCODE) {
1047       vex_printf("\n------------------------"
1048                    " Register-allocated code "
1049                    "------------------------\n\n");
1050       for (i = 0; i < rcode->arr_used; i++) {
1051          vex_printf("%3d   ", i);
1052          ppInstr(rcode->arr[i], mode64);
1053          vex_printf("\n");
1054       }
1055       vex_printf("\n");
1056    }
1057 
1058    /* HACK */
1059    if (0) {
1060       *(vta->host_bytes_used) = 0;
1061       res.status = VexTransOK; return res;
1062    }
1063    /* end HACK */
1064 
1065    /* Assemble */
1066    if (vex_traceflags & VEX_TRACE_ASM) {
1067       vex_printf("\n------------------------"
1068                    " Assembly "
1069                    "------------------------\n\n");
1070    }
1071 
1072    out_used = 0; /* tracks along the host_bytes array */
1073    for (i = 0; i < rcode->arr_used; i++) {
1074       HInstr* hi           = rcode->arr[i];
1075       Bool    hi_isProfInc = False;
1076       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1077          ppInstr(hi, mode64);
1078          vex_printf("\n");
1079       }
1080       j = emit( &hi_isProfInc,
1081                 insn_bytes, sizeof insn_bytes, hi,
1082                 mode64, vta->archinfo_host.endness,
1083                 vta->disp_cp_chain_me_to_slowEP,
1084                 vta->disp_cp_chain_me_to_fastEP,
1085                 vta->disp_cp_xindir,
1086                 vta->disp_cp_xassisted );
1087       if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
1088          for (k = 0; k < j; k++)
1089             vex_printf("%02x ", (UInt)insn_bytes[k]);
1090          vex_printf("\n\n");
1091       }
1092       if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
1093          vexSetAllocModeTEMP_and_clear();
1094          vex_traceflags = 0;
1095          res.status = VexTransOutputFull;
1096          return res;
1097       }
1098       if (UNLIKELY(hi_isProfInc)) {
1099          vassert(vta->addProfInc); /* else where did it come from? */
1100          vassert(res.offs_profInc == -1); /* there can be only one (tm) */
1101          vassert(out_used >= 0);
1102          res.offs_profInc = out_used;
1103       }
1104       { UChar* dst = &vta->host_bytes[out_used];
1105         for (k = 0; k < j; k++) {
1106            dst[k] = insn_bytes[k];
1107         }
1108         out_used += j;
1109       }
1110    }
1111    *(vta->host_bytes_used) = out_used;
1112 
1113    vexAllocSanityCheck();
1114 
1115    vexSetAllocModeTEMP_and_clear();
1116 
1117    if (vex_traceflags) {
1118       /* Print the expansion ratio for this SB. */
1119       j = 0; /* total guest bytes */
1120       for (i = 0; i < vta->guest_extents->n_used; i++) {
1121          j += vta->guest_extents->len[i];
1122       }
1123       if (1) vex_printf("VexExpansionRatio %d %d   %d :10\n\n",
1124                         j, out_used, (10 * out_used) / (j == 0 ? 1 : j));
1125    }
1126 
1127    vex_traceflags = 0;
1128    res.status = VexTransOK;
1129    return res;
1130 }
1131 
1132 
1133 /* --------- Chain/Unchain XDirects. --------- */
1134 
LibVEX_Chain(VexArch arch_host,VexEndness endness_host,void * place_to_chain,const void * disp_cp_chain_me_EXPECTED,const void * place_to_jump_to)1135 VexInvalRange LibVEX_Chain ( VexArch     arch_host,
1136                              VexEndness  endness_host,
1137                              void*       place_to_chain,
1138                              const void* disp_cp_chain_me_EXPECTED,
1139                              const void* place_to_jump_to )
1140 {
1141    switch (arch_host) {
1142       case VexArchX86:
1143          X86ST(return chainXDirect_X86(endness_host,
1144                                        place_to_chain,
1145                                        disp_cp_chain_me_EXPECTED,
1146                                        place_to_jump_to));
1147       case VexArchAMD64:
1148          AMD64ST(return chainXDirect_AMD64(endness_host,
1149                                            place_to_chain,
1150                                            disp_cp_chain_me_EXPECTED,
1151                                            place_to_jump_to));
1152       case VexArchARM:
1153          ARMST(return chainXDirect_ARM(endness_host,
1154                                        place_to_chain,
1155                                        disp_cp_chain_me_EXPECTED,
1156                                        place_to_jump_to));
1157       case VexArchARM64:
1158          ARM64ST(return chainXDirect_ARM64(endness_host,
1159                                            place_to_chain,
1160                                            disp_cp_chain_me_EXPECTED,
1161                                            place_to_jump_to));
1162       case VexArchS390X:
1163          S390ST(return chainXDirect_S390(endness_host,
1164                                          place_to_chain,
1165                                          disp_cp_chain_me_EXPECTED,
1166                                          place_to_jump_to));
1167       case VexArchPPC32:
1168          PPC32ST(return chainXDirect_PPC(endness_host,
1169                                          place_to_chain,
1170                                          disp_cp_chain_me_EXPECTED,
1171                                          place_to_jump_to, False/*!mode64*/));
1172       case VexArchPPC64:
1173          PPC64ST(return chainXDirect_PPC(endness_host,
1174                                          place_to_chain,
1175                                          disp_cp_chain_me_EXPECTED,
1176                                          place_to_jump_to, True/*mode64*/));
1177       case VexArchMIPS32:
1178          MIPS32ST(return chainXDirect_MIPS(endness_host,
1179                                            place_to_chain,
1180                                            disp_cp_chain_me_EXPECTED,
1181                                            place_to_jump_to, False/*!mode64*/));
1182       case VexArchMIPS64:
1183          MIPS64ST(return chainXDirect_MIPS(endness_host,
1184                                            place_to_chain,
1185                                            disp_cp_chain_me_EXPECTED,
1186                                            place_to_jump_to, True/*!mode64*/));
1187 
1188       case VexArchTILEGX:
1189          TILEGXST(return chainXDirect_TILEGX(endness_host,
1190                                              place_to_chain,
1191                                              disp_cp_chain_me_EXPECTED,
1192                                              place_to_jump_to, True/*!mode64*/));
1193       default:
1194          vassert(0);
1195    }
1196 }
1197 
LibVEX_UnChain(VexArch arch_host,VexEndness endness_host,void * place_to_unchain,const void * place_to_jump_to_EXPECTED,const void * disp_cp_chain_me)1198 VexInvalRange LibVEX_UnChain ( VexArch     arch_host,
1199                                VexEndness  endness_host,
1200                                void*       place_to_unchain,
1201                                const void* place_to_jump_to_EXPECTED,
1202                                const void* disp_cp_chain_me )
1203 {
1204    switch (arch_host) {
1205       case VexArchX86:
1206          X86ST(return unchainXDirect_X86(endness_host,
1207                                          place_to_unchain,
1208                                          place_to_jump_to_EXPECTED,
1209                                          disp_cp_chain_me));
1210       case VexArchAMD64:
1211          AMD64ST(return unchainXDirect_AMD64(endness_host,
1212                                              place_to_unchain,
1213                                              place_to_jump_to_EXPECTED,
1214                                              disp_cp_chain_me));
1215       case VexArchARM:
1216          ARMST(return unchainXDirect_ARM(endness_host,
1217                                          place_to_unchain,
1218                                          place_to_jump_to_EXPECTED,
1219                                          disp_cp_chain_me));
1220       case VexArchARM64:
1221          ARM64ST(return unchainXDirect_ARM64(endness_host,
1222                                              place_to_unchain,
1223                                              place_to_jump_to_EXPECTED,
1224                                              disp_cp_chain_me));
1225       case VexArchS390X:
1226          S390ST(return unchainXDirect_S390(endness_host,
1227                                            place_to_unchain,
1228                                            place_to_jump_to_EXPECTED,
1229                                            disp_cp_chain_me));
1230       case VexArchPPC32:
1231          PPC32ST(return unchainXDirect_PPC(endness_host,
1232                                            place_to_unchain,
1233                                            place_to_jump_to_EXPECTED,
1234                                            disp_cp_chain_me, False/*!mode64*/));
1235       case VexArchPPC64:
1236          PPC64ST(return unchainXDirect_PPC(endness_host,
1237                                            place_to_unchain,
1238                                            place_to_jump_to_EXPECTED,
1239                                            disp_cp_chain_me, True/*mode64*/));
1240       case VexArchMIPS32:
1241          MIPS32ST(return unchainXDirect_MIPS(endness_host,
1242                                              place_to_unchain,
1243                                              place_to_jump_to_EXPECTED,
1244                                              disp_cp_chain_me, False/*!mode64*/));
1245       case VexArchMIPS64:
1246          MIPS64ST(return unchainXDirect_MIPS(endness_host,
1247                                              place_to_unchain,
1248                                              place_to_jump_to_EXPECTED,
1249                                              disp_cp_chain_me, True/*!mode64*/));
1250 
1251       case VexArchTILEGX:
1252          TILEGXST(return unchainXDirect_TILEGX(endness_host,
1253                                       place_to_unchain,
1254                                       place_to_jump_to_EXPECTED,
1255                                                disp_cp_chain_me, True/*!mode64*/));
1256 
1257       default:
1258          vassert(0);
1259    }
1260 }
1261 
LibVEX_evCheckSzB(VexArch arch_host)1262 Int LibVEX_evCheckSzB ( VexArch    arch_host )
1263 {
1264    static Int cached = 0; /* DO NOT MAKE NON-STATIC */
1265    if (UNLIKELY(cached == 0)) {
1266       switch (arch_host) {
1267          case VexArchX86:
1268             X86ST(cached = evCheckSzB_X86()); break;
1269          case VexArchAMD64:
1270             AMD64ST(cached = evCheckSzB_AMD64()); break;
1271          case VexArchARM:
1272             ARMST(cached = evCheckSzB_ARM()); break;
1273          case VexArchARM64:
1274             ARM64ST(cached = evCheckSzB_ARM64()); break;
1275          case VexArchS390X:
1276             S390ST(cached = evCheckSzB_S390()); break;
1277          case VexArchPPC32:
1278             PPC32ST(cached = evCheckSzB_PPC()); break;
1279          case VexArchPPC64:
1280             PPC64ST(cached = evCheckSzB_PPC()); break;
1281          case VexArchMIPS32:
1282             MIPS32ST(cached = evCheckSzB_MIPS()); break;
1283          case VexArchMIPS64:
1284             MIPS64ST(cached = evCheckSzB_MIPS()); break;
1285          case VexArchTILEGX:
1286             TILEGXST(cached = evCheckSzB_TILEGX()); break;
1287          default:
1288             vassert(0);
1289       }
1290    }
1291    return cached;
1292 }
1293 
LibVEX_PatchProfInc(VexArch arch_host,VexEndness endness_host,void * place_to_patch,const ULong * location_of_counter)1294 VexInvalRange LibVEX_PatchProfInc ( VexArch    arch_host,
1295                                     VexEndness endness_host,
1296                                     void*      place_to_patch,
1297                                     const ULong* location_of_counter )
1298 {
1299    switch (arch_host) {
1300       case VexArchX86:
1301          X86ST(return patchProfInc_X86(endness_host, place_to_patch,
1302                                        location_of_counter));
1303       case VexArchAMD64:
1304          AMD64ST(return patchProfInc_AMD64(endness_host, place_to_patch,
1305                                            location_of_counter));
1306       case VexArchARM:
1307          ARMST(return patchProfInc_ARM(endness_host, place_to_patch,
1308                                        location_of_counter));
1309       case VexArchARM64:
1310          ARM64ST(return patchProfInc_ARM64(endness_host, place_to_patch,
1311                                            location_of_counter));
1312       case VexArchS390X:
1313          S390ST(return patchProfInc_S390(endness_host, place_to_patch,
1314                                          location_of_counter));
1315       case VexArchPPC32:
1316          PPC32ST(return patchProfInc_PPC(endness_host, place_to_patch,
1317                                          location_of_counter, False/*!mode64*/));
1318       case VexArchPPC64:
1319          PPC64ST(return patchProfInc_PPC(endness_host, place_to_patch,
1320                                          location_of_counter, True/*mode64*/));
1321       case VexArchMIPS32:
1322          MIPS32ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1323                                            location_of_counter, False/*!mode64*/));
1324       case VexArchMIPS64:
1325          MIPS64ST(return patchProfInc_MIPS(endness_host, place_to_patch,
1326                                            location_of_counter, True/*!mode64*/));
1327       case VexArchTILEGX:
1328          TILEGXST(return patchProfInc_TILEGX(endness_host, place_to_patch,
1329                                              location_of_counter,
1330                                              True/*!mode64*/));
1331       default:
1332          vassert(0);
1333    }
1334 }
1335 
1336 
1337 /* --------- Emulation warnings. --------- */
1338 
LibVEX_EmNote_string(VexEmNote ew)1339 const HChar* LibVEX_EmNote_string ( VexEmNote ew )
1340 {
1341    switch (ew) {
1342      case EmNote_NONE:
1343         return "none";
1344      case EmWarn_X86_x87exns:
1345         return "Unmasking x87 FP exceptions";
1346      case EmWarn_X86_x87precision:
1347         return "Selection of non-80-bit x87 FP precision";
1348      case EmWarn_X86_sseExns:
1349         return "Unmasking SSE FP exceptions";
1350      case EmWarn_X86_fz:
1351         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
1352      case EmWarn_X86_daz:
1353         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
1354      case EmWarn_X86_acFlag:
1355         return "Setting %eflags.ac (setting noted but ignored)";
1356      case EmWarn_PPCexns:
1357         return "Unmasking PPC32/64 FP exceptions";
1358      case EmWarn_PPC64_redir_overflow:
1359         return "PPC64 function redirection stack overflow";
1360      case EmWarn_PPC64_redir_underflow:
1361         return "PPC64 function redirection stack underflow";
1362      case EmWarn_S390X_fpext_rounding:
1363         return "The specified rounding mode cannot be supported. That\n"
1364                "  feature requires the floating point extension facility\n"
1365                "  which is not available on this host. Continuing using\n"
1366                "  the rounding mode from FPC. Results may differ!";
1367      case EmWarn_S390X_invalid_rounding:
1368         return "The specified rounding mode is invalid.\n"
1369                "  Continuing using 'round to nearest'. Results may differ!";
1370      case EmFail_S390X_stfle:
1371         return "Instruction stfle is not supported on this host";
1372      case EmFail_S390X_stckf:
1373         return "Instruction stckf is not supported on this host";
1374      case EmFail_S390X_ecag:
1375         return "Instruction ecag is not supported on this host";
1376      case EmFail_S390X_pfpo:
1377         return "Instruction pfpo is not supported on this host";
1378      case EmFail_S390X_DFP_insn:
1379         return "DFP instructions are not supported on this host";
1380      case EmFail_S390X_fpext:
1381         return "Encountered an instruction that requires the floating "
1382                "point extension facility.\n"
1383                "  That facility is not available on this host";
1384      case EmFail_S390X_invalid_PFPO_rounding_mode:
1385         return "The rounding mode in GPR 0 for the PFPO instruction"
1386                " is invalid";
1387      case EmFail_S390X_invalid_PFPO_function:
1388         return "The function code in GPR 0 for the PFPO instruction"
1389                " is invalid";
1390      default:
1391         vpanic("LibVEX_EmNote_string: unknown warning");
1392    }
1393 }
1394 
1395 /* ------------------ Arch/HwCaps stuff. ------------------ */
1396 
LibVEX_ppVexArch(VexArch arch)1397 const HChar* LibVEX_ppVexArch ( VexArch arch )
1398 {
1399    switch (arch) {
1400       case VexArch_INVALID: return "INVALID";
1401       case VexArchX86:      return "X86";
1402       case VexArchAMD64:    return "AMD64";
1403       case VexArchARM:      return "ARM";
1404       case VexArchARM64:    return "ARM64";
1405       case VexArchPPC32:    return "PPC32";
1406       case VexArchPPC64:    return "PPC64";
1407       case VexArchS390X:    return "S390X";
1408       case VexArchMIPS32:   return "MIPS32";
1409       case VexArchMIPS64:   return "MIPS64";
1410       case VexArchTILEGX:   return "TILEGX";
1411       default:              return "VexArch???";
1412    }
1413 }
1414 
LibVEX_ppVexEndness(VexEndness endness)1415 const HChar* LibVEX_ppVexEndness ( VexEndness endness )
1416 {
1417    switch (endness) {
1418       case VexEndness_INVALID: return "INVALID";
1419       case VexEndnessLE:       return "LittleEndian";
1420       case VexEndnessBE:       return "BigEndian";
1421       default:                 return "VexEndness???";
1422    }
1423 }
1424 
1425 /* Return a string with the hardware capabilities to the extent as
1426    they pertain to the translation process. No attempt is made, to
1427    detect *all* capabilities an architecture may have. */
LibVEX_ppVexHwCaps(VexArch arch,UInt hwcaps)1428 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
1429 {
1430    return show_hwcaps(arch, hwcaps);
1431 }
1432 
1433 
1434 /* Write default settings info *vai. */
LibVEX_default_VexArchInfo(VexArchInfo * vai)1435 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
1436 {
1437    vex_bzero(vai, sizeof(*vai));
1438    vai->hwcaps                  = 0;
1439    vai->endness                 = VexEndness_INVALID;
1440    vai->ppc_icache_line_szB     = 0;
1441    vai->ppc_dcbz_szB            = 0;
1442    vai->ppc_dcbzl_szB           = 0;
1443    vai->arm64_dMinLine_lg2_szB  = 0;
1444    vai->arm64_iMinLine_lg2_szB  = 0;
1445    vai->hwcache_info.num_levels = 0;
1446    vai->hwcache_info.num_caches = 0;
1447    vai->hwcache_info.caches     = NULL;
1448    vai->hwcache_info.icaches_maintain_coherence = True;  // whatever
1449 }
1450 
1451 /* Write default settings info *vbi. */
LibVEX_default_VexAbiInfo(VexAbiInfo * vbi)1452 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
1453 {
1454    vex_bzero(vbi, sizeof(*vbi));
1455    vbi->guest_stack_redzone_size       = 0;
1456    vbi->guest_amd64_assume_fs_is_const = False;
1457    vbi->guest_amd64_assume_gs_is_const = False;
1458    vbi->guest_ppc_zap_RZ_at_blr        = False;
1459    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
1460    vbi->host_ppc_calls_use_fndescrs    = False;
1461 }
1462 
1463 
1464 /* Convenience macro to be used in show_hwcaps_ARCH functions */
1465 #define NUM_HWCAPS (sizeof hwcaps_list / sizeof hwcaps_list[0])
1466 
1467 /* Return a string showing the hwcaps in a nice way.  The string will
1468    be NULL for unrecognised hardware capabilities. */
1469 
show_hwcaps_x86(UInt hwcaps)1470 static const HChar* show_hwcaps_x86 ( UInt hwcaps )
1471 {
1472    static const HChar prefix[] = "x86";
1473    static const struct {
1474       UInt  hwcaps_bit;
1475       HChar name[7];
1476    } hwcaps_list[] = {
1477       { VEX_HWCAPS_X86_MMXEXT, "mmxext" },
1478       { VEX_HWCAPS_X86_SSE1,   "sse1"   },
1479       { VEX_HWCAPS_X86_SSE2,   "sse2"   },
1480       { VEX_HWCAPS_X86_SSE3,   "sse3"   },
1481       { VEX_HWCAPS_X86_LZCNT,  "lzcnt"  },
1482    };
1483    /* Allocate a large enough buffer */
1484    static HChar buf[sizeof prefix +
1485                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1486    if (buf[0] != '\0') return buf;  /* already constructed */
1487 
1488    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1489 
1490    if (hwcaps == 0) {
1491       vex_sprintf(p, "-%s", "sse0");
1492    } else {
1493       UInt i;
1494       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1495          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1496             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1497       }
1498    }
1499    return buf;
1500 }
1501 
show_hwcaps_amd64(UInt hwcaps)1502 static const HChar* show_hwcaps_amd64 ( UInt hwcaps )
1503 {
1504    static const HChar prefix[] = "amd64";
1505    static const struct {
1506       UInt  hwcaps_bit;
1507       HChar name[7];
1508    } hwcaps_list[] = {
1509       { VEX_HWCAPS_AMD64_CX16,   "cx16"   },
1510       { VEX_HWCAPS_AMD64_LZCNT,  "lzcnt"  },
1511       { VEX_HWCAPS_AMD64_RDTSCP, "rdtscp" },
1512       { VEX_HWCAPS_AMD64_SSE3,   "sse3"   },
1513       { VEX_HWCAPS_AMD64_AVX,    "avx"    },
1514       { VEX_HWCAPS_AMD64_AVX2,   "avx2"   },
1515       { VEX_HWCAPS_AMD64_BMI,    "bmi"    },
1516    };
1517    /* Allocate a large enough buffer */
1518    static HChar buf[sizeof prefix +
1519                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1520    if (buf[0] != '\0') return buf;  /* already constructed */
1521 
1522    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1523 
1524    if (hwcaps == 0) {
1525       vex_sprintf(p, "-%s", "sse2");
1526    } else {
1527       UInt i;
1528       for (i = 0 ; i < NUM_HWCAPS; ++i) {
1529          if (hwcaps & hwcaps_list[i].hwcaps_bit)
1530             p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1531       }
1532    }
1533    return buf;
1534 }
1535 
show_hwcaps_ppc32(UInt hwcaps)1536 static const HChar* show_hwcaps_ppc32 ( UInt hwcaps )
1537 {
1538    static const HChar prefix[] = "ppc32-int";
1539    static const struct {
1540       UInt  hwcaps_bit;
1541       HChar name[8];
1542    } hwcaps_list[] = {
1543       { VEX_HWCAPS_PPC32_F,       "flt"     },
1544       { VEX_HWCAPS_PPC32_V,       "vmx"     },
1545       { VEX_HWCAPS_PPC32_FX,      "FX"      },
1546       { VEX_HWCAPS_PPC32_GX,      "GX"      },
1547       { VEX_HWCAPS_PPC32_VX,      "VX"      },
1548       { VEX_HWCAPS_PPC32_DFP,     "DFP"     },
1549       { VEX_HWCAPS_PPC32_ISA2_07, "ISA2_07" },
1550    };
1551    /* Allocate a large enough buffer */
1552    static HChar buf[sizeof prefix +
1553                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1554    if (buf[0] != '\0') return buf;  /* already constructed */
1555 
1556    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1557 
1558    if (hwcaps == 0) return buf;
1559 
1560    UInt i;
1561    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1562       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1563          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1564    }
1565    return buf;
1566 }
1567 
show_hwcaps_ppc64(UInt hwcaps)1568 static const HChar* show_hwcaps_ppc64 ( UInt hwcaps )
1569 {
1570    static const HChar prefix[] = "ppc64-int-flt";
1571    static const struct {
1572       UInt  hwcaps_bit;
1573       HChar name[8];
1574    } hwcaps_list[] = {
1575       { VEX_HWCAPS_PPC64_FX,      "FX"      },
1576       { VEX_HWCAPS_PPC64_GX,      "GX"      },
1577       { VEX_HWCAPS_PPC64_V,       "vmx"     },
1578       { VEX_HWCAPS_PPC64_DFP,     "DFP"     },
1579       { VEX_HWCAPS_PPC64_ISA2_07, "ISA2_07" },
1580    };
1581    /* Allocate a large enough buffer */
1582    static HChar buf[sizeof prefix +
1583                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1584    if (buf[0] != '\0') return buf;  /* already constructed */
1585 
1586    HChar *p = buf + vex_sprintf(buf, "%s", prefix);
1587 
1588    if (hwcaps == 0) return buf;
1589 
1590    UInt i;
1591    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1592       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1593          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1594    }
1595    return buf;
1596 }
1597 
show_hwcaps_arm(UInt hwcaps)1598 static const HChar* show_hwcaps_arm ( UInt hwcaps )
1599 {
1600    static const HChar prefix[] = "ARM";
1601    static const struct {
1602       UInt  hwcaps_bit;
1603       HChar name[6];
1604    } hwcaps_list[] = {
1605       { VEX_HWCAPS_ARM_NEON, "neon" },
1606       { VEX_HWCAPS_ARM_VFP | VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3, "vfp" },
1607    };
1608    /* Allocate a large enough buffer */
1609    static HChar buf[sizeof prefix + 12 +    // level
1610                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1611    if (buf[0] != '\0') return buf;  /* already constructed */
1612 
1613    HChar *p;
1614    UInt i, level;
1615 
1616    level = VEX_ARM_ARCHLEVEL(hwcaps);
1617 
1618    p = buf + vex_sprintf(buf, "%sv%u", prefix, level);
1619    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1620       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1621          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1622    }
1623    return buf;
1624 }
1625 
show_hwcaps_arm64(UInt hwcaps)1626 static const HChar* show_hwcaps_arm64 ( UInt hwcaps )
1627 {
1628    /* Since there are no variants, just insist that hwcaps is zero,
1629       and declare it invalid otherwise. */
1630   if (hwcaps == 0)
1631      return "baseline";
1632   return "Unsupported";
1633 }
1634 
show_hwcaps_s390x(UInt hwcaps)1635 static const HChar* show_hwcaps_s390x ( UInt hwcaps )
1636 {
1637    static const HChar prefix[] = "s390x";
1638    static const struct {
1639       UInt  hwcaps_bit;
1640       HChar name[6];
1641    } hwcaps_list[] = {
1642       { VEX_HWCAPS_S390X_LDISP, "ldisp" },
1643       { VEX_HWCAPS_S390X_EIMM,  "eimm" },
1644       { VEX_HWCAPS_S390X_GIE,   "gie" },
1645       { VEX_HWCAPS_S390X_DFP,   "dfp" },
1646       { VEX_HWCAPS_S390X_FGX,   "fgx" },
1647       { VEX_HWCAPS_S390X_STFLE, "stfle" },
1648       { VEX_HWCAPS_S390X_ETF2,  "etf2" },
1649       { VEX_HWCAPS_S390X_ETF3,  "etf3" },
1650       { VEX_HWCAPS_S390X_STCKF, "stckf" },
1651       { VEX_HWCAPS_S390X_FPEXT, "fpext" },
1652       { VEX_HWCAPS_S390X_LSC,   "lsc" },
1653       { VEX_HWCAPS_S390X_PFPO,  "pfpo" },
1654    };
1655    /* Allocate a large enough buffer */
1656    static HChar buf[sizeof prefix +
1657                     NUM_HWCAPS * (sizeof hwcaps_list[0].name + 1) + 1]; // '\0'
1658 
1659    if (buf[0] != '\0') return buf;  /* already constructed */
1660 
1661    HChar *p;
1662    UInt i;
1663 
1664    hwcaps = VEX_HWCAPS_S390X(hwcaps);
1665 
1666    p = buf + vex_sprintf(buf, "%s", prefix);
1667    for (i = 0 ; i < NUM_HWCAPS; ++i) {
1668       if (hwcaps & hwcaps_list[i].hwcaps_bit)
1669          p = p + vex_sprintf(p, "-%s", hwcaps_list[i].name);
1670    }
1671 
1672    /* If there are no facilities, add "zarch" */
1673    if (hwcaps == 0)
1674      vex_sprintf(p, "-%s", "zarch");
1675 
1676    return buf;
1677 }
1678 
show_hwcaps_mips32(UInt hwcaps)1679 static const HChar* show_hwcaps_mips32 ( UInt hwcaps )
1680 {
1681    /* MIPS baseline. */
1682    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_MIPS) {
1683       /* MIPS baseline with dspr2. */
1684       if (VEX_MIPS_PROC_DSP2(hwcaps)) {
1685          return "MIPS-baseline-dspr2";
1686       }
1687       /* MIPS baseline with dsp. */
1688       if (VEX_MIPS_PROC_DSP(hwcaps)) {
1689          return "MIPS-baseline-dsp";
1690       }
1691       return "MIPS-baseline";
1692    }
1693 
1694    /* Broadcom baseline. */
1695    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_BROADCOM) {
1696       return "Broadcom-baseline";
1697    }
1698 
1699    /* Netlogic baseline. */
1700    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_NETLOGIC) {
1701       return "Netlogic-baseline";
1702    }
1703 
1704    /* Cavium baseline. */
1705    if (VEX_MIPS_COMP_ID(hwcaps) == VEX_PRID_COMP_CAVIUM) {
1706       return "Cavium-baseline";
1707    }
1708 
1709    return "Unsupported baseline";
1710 }
1711 
show_hwcaps_mips64(UInt hwcaps)1712 static const HChar* show_hwcaps_mips64 ( UInt hwcaps )
1713 {
1714    return "mips64-baseline";
1715 }
1716 
show_hwcaps_tilegx(UInt hwcaps)1717 static const HChar* show_hwcaps_tilegx ( UInt hwcaps )
1718 {
1719    return "tilegx-baseline";
1720 }
1721 
1722 #undef NUM_HWCAPS
1723 
1724 /* Thie function must not return NULL. */
1725 
show_hwcaps(VexArch arch,UInt hwcaps)1726 static const HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
1727 {
1728    switch (arch) {
1729       case VexArchX86:    return show_hwcaps_x86(hwcaps);
1730       case VexArchAMD64:  return show_hwcaps_amd64(hwcaps);
1731       case VexArchPPC32:  return show_hwcaps_ppc32(hwcaps);
1732       case VexArchPPC64:  return show_hwcaps_ppc64(hwcaps);
1733       case VexArchARM:    return show_hwcaps_arm(hwcaps);
1734       case VexArchARM64:  return show_hwcaps_arm64(hwcaps);
1735       case VexArchS390X:  return show_hwcaps_s390x(hwcaps);
1736       case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
1737       case VexArchMIPS64: return show_hwcaps_mips64(hwcaps);
1738       case VexArchTILEGX: return show_hwcaps_tilegx(hwcaps);
1739       default: return NULL;
1740    }
1741 }
1742 
1743 /* To be used to complain about hwcaps we cannot handle */
1744 __attribute__((noreturn))
invalid_hwcaps(VexArch arch,UInt hwcaps,const HChar * message)1745 static void invalid_hwcaps ( VexArch arch, UInt hwcaps, const HChar *message )
1746 {
1747    vfatal("\nVEX: %s"
1748           "     Found: %s\n", message, show_hwcaps(arch, hwcaps));
1749 }
1750 
1751 /* This function will not return iff the hwcaps don't pass the test. */
check_hwcaps(VexArch arch,UInt hwcaps)1752 static void check_hwcaps ( VexArch arch, UInt hwcaps )
1753 {
1754    switch (arch) {
1755       case VexArchX86: {
1756          if (hwcaps == 0) return;    // baseline
1757 
1758          /* Monotonic: SSE3 > SSE2 > SSE1 > MMXEXT > baseline. */
1759          static const UInt extras[] = {
1760             VEX_HWCAPS_X86_MMXEXT, VEX_HWCAPS_X86_SSE1, VEX_HWCAPS_X86_SSE2,
1761             VEX_HWCAPS_X86_SSE3
1762          };
1763 
1764          UInt i, caps = 0;
1765          for (i = 0; i < sizeof extras / sizeof extras[0]; ++i) {
1766             caps |= extras[i];
1767             if (caps == hwcaps) return;
1768             /* For SSE2 or later LZCNT is optional */
1769             if ((caps & VEX_HWCAPS_X86_SSE2) != 0) {
1770                if ((caps | VEX_HWCAPS_X86_LZCNT) == hwcaps) return;
1771             }
1772          }
1773          invalid_hwcaps(arch, hwcaps, "Cannot handle capabilities\n");
1774       }
1775 
1776       case VexArchAMD64: {
1777          /* SSE3 and CX16 are orthogonal and > baseline, although we really
1778             don't expect to come across anything which can do SSE3 but can't
1779             do CX16.  Still, we can handle that case.  LZCNT is similarly
1780             orthogonal. */
1781 
1782          /* Throw out obviously stupid cases: */
1783          Bool have_sse3 = (hwcaps & VEX_HWCAPS_AMD64_SSE3) != 0;
1784          Bool have_avx  = (hwcaps & VEX_HWCAPS_AMD64_AVX)  != 0;
1785          Bool have_bmi  = (hwcaps & VEX_HWCAPS_AMD64_BMI)  != 0;
1786          Bool have_avx2 = (hwcaps & VEX_HWCAPS_AMD64_AVX2) != 0;
1787 
1788          /* AVX without SSE3 */
1789          if (have_avx && !have_sse3)
1790             invalid_hwcaps(arch, hwcaps,
1791                            "Support for AVX requires SSE3 capabilities\n");
1792          /* AVX2 or BMI without AVX */
1793          if (have_avx2 && !have_avx)
1794             invalid_hwcaps(arch, hwcaps,
1795                            "Support for AVX2 requires AVX capabilities\n");
1796          if (have_bmi && !have_avx)
1797             invalid_hwcaps(arch, hwcaps,
1798                            "Support for BMI requires AVX capabilities\n");
1799          return;
1800       }
1801 
1802       case VexArchPPC32: {
1803          /* Monotonic with complications.  Basically V > F > baseline,
1804             but once you have F then you can have FX or GX too. */
1805          if (hwcaps == 0) return;   // baseline
1806 
1807          if ((hwcaps & VEX_HWCAPS_PPC32_F) == 0)
1808             invalid_hwcaps(arch, hwcaps,
1809                            "Missing floating point capability\n");
1810          /* V, FX, and GX can appear in any combination */
1811 
1812          /* DFP requires V and FX and GX */
1813          UInt v_fx_gx = VEX_HWCAPS_PPC32_V | VEX_HWCAPS_PPC32_FX |
1814                         VEX_HWCAPS_PPC32_GX;
1815          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1816 
1817          if ((hwcaps & VEX_HWCAPS_PPC32_DFP) && ! has_v_fx_gx)
1818             invalid_hwcaps(arch, hwcaps,
1819                            "DFP requires VMX and FX and GX capabilities\n");
1820 
1821          /* VX requires V and FX and GX */
1822          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1823             invalid_hwcaps(arch, hwcaps,
1824                            "VX requires VMX and FX and GX capabilities\n");
1825 
1826          /* ISA2_07 requires everything else */
1827          if ((hwcaps & VEX_HWCAPS_PPC32_ISA2_07) != 0) {
1828             if (! has_v_fx_gx)
1829                invalid_hwcaps(arch, hwcaps,
1830                           "ISA2_07 requires VMX and FX and GX capabilities\n");
1831             if (! (hwcaps & VEX_HWCAPS_PPC32_VX))
1832                invalid_hwcaps(arch, hwcaps,
1833                               "ISA2_07 requires VX capabilities\n");
1834             if (! (hwcaps & VEX_HWCAPS_PPC32_DFP))
1835                invalid_hwcaps(arch, hwcaps,
1836                               "ISA2_07 requires DFP capabilities\n");
1837          }
1838          return;
1839       }
1840 
1841       case VexArchPPC64: {
1842          /* Monotonic with complications.  Basically V > baseline(==F),
1843             but once you have F then you can have FX or GX too. */
1844          if (hwcaps == 0) return;   // baseline
1845 
1846          /* V, FX, and GX can appear in any combination */
1847 
1848          /* DFP requires V and FX and GX */
1849          UInt v_fx_gx = VEX_HWCAPS_PPC64_V | VEX_HWCAPS_PPC64_FX |
1850                         VEX_HWCAPS_PPC64_GX;
1851          Bool has_v_fx_gx = (hwcaps & v_fx_gx) == v_fx_gx;
1852 
1853          if ((hwcaps & VEX_HWCAPS_PPC64_DFP) && ! has_v_fx_gx)
1854             invalid_hwcaps(arch, hwcaps,
1855                            "DFP requires VMX and FX and GX capabilities\n");
1856 
1857          /* VX requires V and FX and GX */
1858          if ((hwcaps & VEX_HWCAPS_PPC32_VX) && ! has_v_fx_gx)
1859             invalid_hwcaps(arch, hwcaps,
1860                            "VX requires VMX and FX and GX capabilities\n");
1861 
1862          /* ISA2_07 requires everything else */
1863          if ((hwcaps & VEX_HWCAPS_PPC64_ISA2_07) != 0) {
1864             if (! has_v_fx_gx)
1865                invalid_hwcaps(arch, hwcaps,
1866                         "ISA2_07 requires VMX and FX and GX capabilities\n");
1867             if (! (hwcaps & VEX_HWCAPS_PPC64_VX))
1868                invalid_hwcaps(arch, hwcaps,
1869                               "ISA2_07 requires VX capabilities\n");
1870             if (! (hwcaps & VEX_HWCAPS_PPC64_DFP))
1871                invalid_hwcaps(arch, hwcaps,
1872                               "ISA2_07 requires DFP capabilities\n");
1873          }
1874          return;
1875       }
1876 
1877       case VexArchARM: {
1878          Bool NEON  = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
1879          UInt level = VEX_ARM_ARCHLEVEL(hwcaps);
1880 
1881          switch (level) {
1882             case 5:
1883                if (NEON)
1884                   invalid_hwcaps(arch, hwcaps,
1885                           "NEON instructions are not supported for ARMv5.\n");
1886                return;
1887             case 6:
1888                if (NEON)
1889                   invalid_hwcaps(arch, hwcaps,
1890                           "NEON instructions are not supported for ARMv6.\n");
1891                return;
1892             case 7:
1893                return;
1894             default:
1895                invalid_hwcaps(arch, hwcaps,
1896                               "ARM architecture level is not supported.\n");
1897          }
1898       }
1899 
1900       case VexArchARM64:
1901          if (hwcaps != 0)
1902             invalid_hwcaps(arch, hwcaps,
1903                            "Unsupported hardware capabilities.\n");
1904          return;
1905 
1906       case VexArchS390X:
1907          if (! s390_host_has_ldisp)
1908             invalid_hwcaps(arch, hwcaps,
1909                            "Host does not have long displacement facility.\n");
1910          return;
1911 
1912       case VexArchMIPS32:
1913          switch (VEX_MIPS_COMP_ID(hwcaps)) {
1914             case VEX_PRID_COMP_MIPS:
1915             case VEX_PRID_COMP_BROADCOM:
1916             case VEX_PRID_COMP_NETLOGIC:
1917                return;
1918             default:
1919                invalid_hwcaps(arch, hwcaps, "Unsupported baseline\n");
1920          }
1921 
1922       case VexArchMIPS64:
1923          return;
1924 
1925       case VexArchTILEGX:
1926          return;
1927 
1928       default:
1929          vpanic("unknown architecture");
1930    }
1931 }
1932 
1933 
1934 /*---------------------------------------------------------------*/
1935 /*--- end                                         main_main.c ---*/
1936 /*---------------------------------------------------------------*/
1937