• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                       main_main.c ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2010 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 #include "libvex.h"
37 #include "libvex_emwarn.h"
38 #include "libvex_guest_x86.h"
39 #include "libvex_guest_amd64.h"
40 #include "libvex_guest_arm.h"
41 #include "libvex_guest_ppc32.h"
42 #include "libvex_guest_ppc64.h"
43 
44 #include "main_globals.h"
45 #include "main_util.h"
46 #include "host_generic_regs.h"
47 #include "ir_opt.h"
48 
49 #include "host_x86_defs.h"
50 #include "host_amd64_defs.h"
51 #include "host_ppc_defs.h"
52 #include "host_arm_defs.h"
53 
54 #include "guest_generic_bb_to_IR.h"
55 #include "guest_x86_defs.h"
56 #include "guest_amd64_defs.h"
57 #include "guest_arm_defs.h"
58 #include "guest_ppc_defs.h"
59 
60 #include "host_generic_simd128.h"
61 
62 
63 /* This file contains the top level interface to the library. */
64 
65 /* --------- fwds ... --------- */
66 
67 static Bool   are_valid_hwcaps ( VexArch arch, UInt hwcaps );
68 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
69 
70 
71 /* --------- Initialise the library. --------- */
72 
73 /* Exported to library client. */
74 
LibVEX_default_VexControl(VexControl * vcon)75 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
76 {
77    vcon->iropt_verbosity            = 0;
78    vcon->iropt_level                = 2;
79    vcon->iropt_precise_memory_exns  = False;
80    vcon->iropt_unroll_thresh        = 120;
81    vcon->guest_max_insns            = 60;
82    vcon->guest_chase_thresh         = 10;
83    vcon->guest_chase_cond           = False;
84 }
85 
86 
87 /* Exported to library client. */
88 
LibVEX_Init(void (* failure_exit)(void),void (* log_bytes)(HChar *,Int nbytes),Int debuglevel,Bool valgrind_support,VexControl * vcon)89 void LibVEX_Init (
90    /* failure exit function */
91    __attribute__ ((noreturn))
92    void (*failure_exit) ( void ),
93    /* logging output function */
94    void (*log_bytes) ( HChar*, Int nbytes ),
95    /* debug paranoia level */
96    Int debuglevel,
97    /* Are we supporting valgrind checking? */
98    Bool valgrind_support,
99    /* Control ... */
100    /*READONLY*/VexControl* vcon
101 )
102 {
103    /* First off, do enough minimal setup so that the following
104       assertions can fail in a sane fashion, if need be. */
105    vex_failure_exit = failure_exit;
106    vex_log_bytes    = log_bytes;
107 
108    /* Now it's safe to check parameters for sanity. */
109    vassert(!vex_initdone);
110    vassert(failure_exit);
111    vassert(log_bytes);
112    vassert(debuglevel >= 0);
113 
114    vassert(vcon->iropt_verbosity >= 0);
115    vassert(vcon->iropt_level >= 0);
116    vassert(vcon->iropt_level <= 2);
117    vassert(vcon->iropt_unroll_thresh >= 0);
118    vassert(vcon->iropt_unroll_thresh <= 400);
119    vassert(vcon->guest_max_insns >= 1);
120    vassert(vcon->guest_max_insns <= 100);
121    vassert(vcon->guest_chase_thresh >= 0);
122    vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
123    vassert(vcon->guest_chase_cond == True
124            || vcon->guest_chase_cond == False);
125 
126    /* Check that Vex has been built with sizes of basic types as
127       stated in priv/libvex_basictypes.h.  Failure of any of these is
128       a serious configuration error and should be corrected
129       immediately.  If any of these assertions fail you can fully
130       expect Vex not to work properly, if at all. */
131 
132    vassert(1 == sizeof(UChar));
133    vassert(1 == sizeof(Char));
134    vassert(2 == sizeof(UShort));
135    vassert(2 == sizeof(Short));
136    vassert(4 == sizeof(UInt));
137    vassert(4 == sizeof(Int));
138    vassert(8 == sizeof(ULong));
139    vassert(8 == sizeof(Long));
140    vassert(4 == sizeof(Float));
141    vassert(8 == sizeof(Double));
142    vassert(1 == sizeof(Bool));
143    vassert(4 == sizeof(Addr32));
144    vassert(8 == sizeof(Addr64));
145    vassert(16 == sizeof(U128));
146    vassert(16 == sizeof(V128));
147 
148    vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
149    vassert(sizeof(void*) == sizeof(int*));
150    vassert(sizeof(void*) == sizeof(HWord));
151 
152    vassert(VEX_HOST_WORDSIZE == sizeof(void*));
153    vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
154 
155    /* Really start up .. */
156    vex_debuglevel         = debuglevel;
157    vex_valgrind_support   = valgrind_support;
158    vex_control            = *vcon;
159    vex_initdone           = True;
160    vexSetAllocMode ( VexAllocModeTEMP );
161 }
162 
163 
164 /* --------- Make a translation. --------- */
165 
166 /* Exported to library client. */
167 
LibVEX_Translate(VexTranslateArgs * vta)168 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
169 {
170    /* This the bundle of functions we need to do the back-end stuff
171       (insn selection, reg-alloc, assembly) whilst being insulated
172       from the target instruction set. */
173    HReg* available_real_regs;
174    Int   n_available_real_regs;
175    Bool         (*isMove)       ( HInstr*, HReg*, HReg* );
176    void         (*getRegUsage)  ( HRegUsage*, HInstr*, Bool );
177    void         (*mapRegs)      ( HRegRemap*, HInstr*, Bool );
178    void         (*genSpill)     ( HInstr**, HInstr**, HReg, Int, Bool );
179    void         (*genReload)    ( HInstr**, HInstr**, HReg, Int, Bool );
180    HInstr*      (*directReload) ( HInstr*, HReg, Short );
181    void         (*ppInstr)      ( HInstr*, Bool );
182    void         (*ppReg)        ( HReg );
183    HInstrArray* (*iselSB)       ( IRSB*, VexArch, VexArchInfo*,
184                                                   VexAbiInfo* );
185    Int          (*emit)         ( UChar*, Int, HInstr*, Bool, void* );
186    IRExpr*      (*specHelper)   ( HChar*, IRExpr**, IRStmt**, Int );
187    Bool         (*preciseMemExnsFn) ( Int, Int );
188 
189    DisOneInstrFn disInstrFn;
190 
191    VexGuestLayout* guest_layout;
192    Bool            host_is_bigendian = False;
193    IRSB*           irsb;
194    HInstrArray*    vcode;
195    HInstrArray*    rcode;
196    Int             i, j, k, out_used, guest_sizeB;
197    Int             offB_TISTART, offB_TILEN;
198    UChar           insn_bytes[32];
199    IRType          guest_word_type;
200    IRType          host_word_type;
201    Bool            mode64;
202 
203    guest_layout           = NULL;
204    available_real_regs    = NULL;
205    n_available_real_regs  = 0;
206    isMove                 = NULL;
207    getRegUsage            = NULL;
208    mapRegs                = NULL;
209    genSpill               = NULL;
210    genReload              = NULL;
211    directReload           = NULL;
212    ppInstr                = NULL;
213    ppReg                  = NULL;
214    iselSB                 = NULL;
215    emit                   = NULL;
216    specHelper             = NULL;
217    preciseMemExnsFn       = NULL;
218    disInstrFn             = NULL;
219    guest_word_type        = Ity_INVALID;
220    host_word_type         = Ity_INVALID;
221    offB_TISTART           = 0;
222    offB_TILEN             = 0;
223    mode64                 = False;
224 
225    vex_traceflags = vta->traceflags;
226 
227    vassert(vex_initdone);
228    vexSetAllocModeTEMP_and_clear();
229    vexAllocSanityCheck();
230 
231    /* First off, check that the guest and host insn sets
232       are supported. */
233 
234    switch (vta->arch_host) {
235 
236       case VexArchX86:
237          mode64       = False;
238          getAllocableRegs_X86 ( &n_available_real_regs,
239                                 &available_real_regs );
240          isMove       = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
241          getRegUsage  = (void(*)(HRegUsage*,HInstr*, Bool))
242                         getRegUsage_X86Instr;
243          mapRegs      = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
244          genSpill     = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
245                         genSpill_X86;
246          genReload    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
247                         genReload_X86;
248          directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
249          ppInstr      = (void(*)(HInstr*, Bool)) ppX86Instr;
250          ppReg        = (void(*)(HReg)) ppHRegX86;
251          iselSB       = iselSB_X86;
252          emit         = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_X86Instr;
253          host_is_bigendian = False;
254          host_word_type    = Ity_I32;
255          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
256          vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
257          break;
258 
259       case VexArchAMD64:
260          mode64      = True;
261          getAllocableRegs_AMD64 ( &n_available_real_regs,
262                                   &available_real_regs );
263          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
264          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
265                        getRegUsage_AMD64Instr;
266          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
267          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
268                        genSpill_AMD64;
269          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
270                        genReload_AMD64;
271          ppInstr     = (void(*)(HInstr*, Bool)) ppAMD64Instr;
272          ppReg       = (void(*)(HReg)) ppHRegAMD64;
273          iselSB      = iselSB_AMD64;
274          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_AMD64Instr;
275          host_is_bigendian = False;
276          host_word_type    = Ity_I64;
277          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
278          vassert(vta->dispatch != NULL); /* jump-to-dispatcher scheme */
279          break;
280 
281       case VexArchPPC32:
282          mode64      = False;
283          getAllocableRegs_PPC ( &n_available_real_regs,
284                                 &available_real_regs, mode64 );
285          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
286          getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
287          mapRegs     = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
288          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
289          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
290          ppInstr     = (void(*)(HInstr*,Bool)) ppPPCInstr;
291          ppReg       = (void(*)(HReg)) ppHRegPPC;
292          iselSB      = iselSB_PPC;
293          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
294          host_is_bigendian = True;
295          host_word_type    = Ity_I32;
296          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
297          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
298          break;
299 
300       case VexArchPPC64:
301          mode64      = True;
302          getAllocableRegs_PPC ( &n_available_real_regs,
303                                 &available_real_regs, mode64 );
304          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
305          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
306          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
307          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
308          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
309          ppInstr     = (void(*)(HInstr*, Bool)) ppPPCInstr;
310          ppReg       = (void(*)(HReg)) ppHRegPPC;
311          iselSB      = iselSB_PPC;
312          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_PPCInstr;
313          host_is_bigendian = True;
314          host_word_type    = Ity_I64;
315          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
316          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
317          break;
318 
319       case VexArchARM:
320          mode64      = False;
321          getAllocableRegs_ARM ( &n_available_real_regs,
322                                 &available_real_regs );
323          isMove      = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARMInstr;
324          getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_ARMInstr;
325          mapRegs     = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_ARMInstr;
326          genSpill    = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_ARM;
327          genReload   = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_ARM;
328          ppInstr     = (void(*)(HInstr*, Bool)) ppARMInstr;
329          ppReg       = (void(*)(HReg)) ppHRegARM;
330          iselSB      = iselSB_ARM;
331          emit        = (Int(*)(UChar*,Int,HInstr*,Bool,void*)) emit_ARMInstr;
332          host_is_bigendian = False;
333          host_word_type    = Ity_I32;
334          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
335          vassert(vta->dispatch == NULL); /* return-to-dispatcher scheme */
336          break;
337 
338       default:
339          vpanic("LibVEX_Translate: unsupported host insn set");
340    }
341 
342 
343    switch (vta->arch_guest) {
344 
345       case VexArchX86:
346          preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
347          disInstrFn       = disInstr_X86;
348          specHelper       = guest_x86_spechelper;
349          guest_sizeB      = sizeof(VexGuestX86State);
350          guest_word_type  = Ity_I32;
351          guest_layout     = &x86guest_layout;
352          offB_TISTART     = offsetof(VexGuestX86State,guest_TISTART);
353          offB_TILEN       = offsetof(VexGuestX86State,guest_TILEN);
354          vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
355          vassert(0 == sizeof(VexGuestX86State) % 16);
356          vassert(sizeof( ((VexGuestX86State*)0)->guest_TISTART) == 4);
357          vassert(sizeof( ((VexGuestX86State*)0)->guest_TILEN  ) == 4);
358          vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
359          break;
360 
361       case VexArchAMD64:
362          preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns;
363          disInstrFn       = disInstr_AMD64;
364          specHelper       = guest_amd64_spechelper;
365          guest_sizeB      = sizeof(VexGuestAMD64State);
366          guest_word_type  = Ity_I64;
367          guest_layout     = &amd64guest_layout;
368          offB_TISTART     = offsetof(VexGuestAMD64State,guest_TISTART);
369          offB_TILEN       = offsetof(VexGuestAMD64State,guest_TILEN);
370          vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
371          vassert(0 == sizeof(VexGuestAMD64State) % 16);
372          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TISTART ) == 8);
373          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TILEN   ) == 8);
374          vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR  ) == 8);
375          break;
376 
377       case VexArchPPC32:
378          preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
379          disInstrFn       = disInstr_PPC;
380          specHelper       = guest_ppc32_spechelper;
381          guest_sizeB      = sizeof(VexGuestPPC32State);
382          guest_word_type  = Ity_I32;
383          guest_layout     = &ppc32Guest_layout;
384          offB_TISTART     = offsetof(VexGuestPPC32State,guest_TISTART);
385          offB_TILEN       = offsetof(VexGuestPPC32State,guest_TILEN);
386          vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
387          vassert(0 == sizeof(VexGuestPPC32State) % 16);
388          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TISTART ) == 4);
389          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN   ) == 4);
390          vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR  ) == 4);
391          break;
392 
393       case VexArchPPC64:
394          preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
395          disInstrFn       = disInstr_PPC;
396          specHelper       = guest_ppc64_spechelper;
397          guest_sizeB      = sizeof(VexGuestPPC64State);
398          guest_word_type  = Ity_I64;
399          guest_layout     = &ppc64Guest_layout;
400          offB_TISTART     = offsetof(VexGuestPPC64State,guest_TISTART);
401          offB_TILEN       = offsetof(VexGuestPPC64State,guest_TILEN);
402          vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
403          vassert(0 == sizeof(VexGuestPPC64State) % 16);
404          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART    ) == 8);
405          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN      ) == 8);
406          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR     ) == 8);
407          vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
408          break;
409 
410       case VexArchARM:
411          preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
412          disInstrFn       = disInstr_ARM;
413          specHelper       = guest_arm_spechelper;
414          guest_sizeB      = sizeof(VexGuestARMState);
415          guest_word_type  = Ity_I32;
416          guest_layout     = &armGuest_layout;
417          offB_TISTART     = offsetof(VexGuestARMState,guest_TISTART);
418          offB_TILEN       = offsetof(VexGuestARMState,guest_TILEN);
419          vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
420          vassert(0 == sizeof(VexGuestARMState) % 16);
421          vassert(sizeof( ((VexGuestARMState*)0)->guest_TISTART) == 4);
422          vassert(sizeof( ((VexGuestARMState*)0)->guest_TILEN  ) == 4);
423          vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
424          break;
425 
426       default:
427          vpanic("LibVEX_Translate: unsupported guest insn set");
428    }
429 
430    /* yet more sanity checks ... */
431    if (vta->arch_guest == vta->arch_host) {
432       /* doesn't necessarily have to be true, but if it isn't it means
433          we are simulating one flavour of an architecture a different
434          flavour of the same architecture, which is pretty strange. */
435       vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
436    }
437 
438    vexAllocSanityCheck();
439 
440    if (vex_traceflags & VEX_TRACE_FE)
441       vex_printf("\n------------------------"
442                    " Front end "
443                    "------------------------\n\n");
444 
445    irsb = bb_to_IR ( vta->guest_extents,
446                      vta->callback_opaque,
447                      disInstrFn,
448                      vta->guest_bytes,
449                      vta->guest_bytes_addr,
450                      vta->chase_into_ok,
451                      host_is_bigendian,
452                      vta->arch_guest,
453                      &vta->archinfo_guest,
454                      &vta->abiinfo_both,
455                      guest_word_type,
456                      vta->do_self_check,
457                      vta->preamble_function,
458                      offB_TISTART,
459                      offB_TILEN );
460 
461    vexAllocSanityCheck();
462 
463    if (irsb == NULL) {
464       /* Access failure. */
465       vexSetAllocModeTEMP_and_clear();
466       vex_traceflags = 0;
467       return VexTransAccessFail;
468    }
469 
470    vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
471    vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
472    for (i = 0; i < vta->guest_extents->n_used; i++) {
473       vassert(vta->guest_extents->len[i] < 10000); /* sanity */
474    }
475 
476    /* If debugging, show the raw guest bytes for this bb. */
477    if (0 || (vex_traceflags & VEX_TRACE_FE)) {
478       if (vta->guest_extents->n_used > 1) {
479          vex_printf("can't show code due to extents > 1\n");
480       } else {
481          /* HACK */
482          UChar* p = (UChar*)vta->guest_bytes;
483          UInt   sum = 0;
484          UInt   guest_bytes_read = (UInt)vta->guest_extents->len[0];
485          vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr,
486                                            guest_bytes_read );
487          for (i = 0; i < guest_bytes_read; i++) {
488             UInt b = (UInt)p[i];
489             vex_printf(" %02x", b );
490             sum = (sum << 1) ^ b;
491          }
492          vex_printf("  %08x\n\n", sum);
493       }
494    }
495 
496    /* Sanity check the initial IR. */
497    sanityCheckIRSB( irsb, "initial IR",
498                     False/*can be non-flat*/, guest_word_type );
499 
500    vexAllocSanityCheck();
501 
502    /* Clean it up, hopefully a lot. */
503    irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn,
504                               vta->guest_bytes_addr,
505                               vta->arch_guest );
506    sanityCheckIRSB( irsb, "after initial iropt",
507                     True/*must be flat*/, guest_word_type );
508 
509    if (vex_traceflags & VEX_TRACE_OPT1) {
510       vex_printf("\n------------------------"
511                    " After pre-instr IR optimisation "
512                    "------------------------\n\n");
513       ppIRSB ( irsb );
514       vex_printf("\n");
515    }
516 
517    vexAllocSanityCheck();
518 
519    /* Get the thing instrumented. */
520    if (vta->instrument1)
521       irsb = vta->instrument1(vta->callback_opaque,
522                               irsb, guest_layout,
523                               vta->guest_extents,
524                               guest_word_type, host_word_type);
525    vexAllocSanityCheck();
526 
527    if (vta->instrument2)
528       irsb = vta->instrument2(vta->callback_opaque,
529                               irsb, guest_layout,
530                               vta->guest_extents,
531                               guest_word_type, host_word_type);
532 
533    if (vex_traceflags & VEX_TRACE_INST) {
534       vex_printf("\n------------------------"
535                    " After instrumentation "
536                    "------------------------\n\n");
537       ppIRSB ( irsb );
538       vex_printf("\n");
539    }
540 
541    if (vta->instrument1 || vta->instrument2)
542       sanityCheckIRSB( irsb, "after instrumentation",
543                        True/*must be flat*/, guest_word_type );
544 
545    /* Do a post-instrumentation cleanup pass. */
546    if (vta->instrument1 || vta->instrument2) {
547       do_deadcode_BB( irsb );
548       irsb = cprop_BB( irsb );
549       do_deadcode_BB( irsb );
550       sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
551                        True/*must be flat*/, guest_word_type );
552    }
553 
554    vexAllocSanityCheck();
555 
556    if (vex_traceflags & VEX_TRACE_OPT2) {
557       vex_printf("\n------------------------"
558                    " After post-instr IR optimisation "
559                    "------------------------\n\n");
560       ppIRSB ( irsb );
561       vex_printf("\n");
562    }
563 
564    /* Turn it into virtual-registerised code.  Build trees -- this
565       also throws away any dead bindings. */
566    ado_treebuild_BB( irsb );
567 
568    if (vta->finaltidy) {
569       irsb = vta->finaltidy(irsb);
570    }
571 
572    vexAllocSanityCheck();
573 
574    if (vex_traceflags & VEX_TRACE_TREES) {
575       vex_printf("\n------------------------"
576                    "  After tree-building "
577                    "------------------------\n\n");
578       ppIRSB ( irsb );
579       vex_printf("\n");
580    }
581 
582    /* HACK */
583    if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
584    /* end HACK */
585 
586    if (vex_traceflags & VEX_TRACE_VCODE)
587       vex_printf("\n------------------------"
588                    " Instruction selection "
589                    "------------------------\n");
590 
591    vcode = iselSB ( irsb, vta->arch_host, &vta->archinfo_host,
592                                           &vta->abiinfo_both );
593 
594    vexAllocSanityCheck();
595 
596    if (vex_traceflags & VEX_TRACE_VCODE)
597       vex_printf("\n");
598 
599    if (vex_traceflags & VEX_TRACE_VCODE) {
600       for (i = 0; i < vcode->arr_used; i++) {
601          vex_printf("%3d   ", i);
602          ppInstr(vcode->arr[i], mode64);
603          vex_printf("\n");
604       }
605       vex_printf("\n");
606    }
607 
608    /* Register allocate. */
609    rcode = doRegisterAllocation ( vcode, available_real_regs,
610                                   n_available_real_regs,
611                                   isMove, getRegUsage, mapRegs,
612                                   genSpill, genReload, directReload,
613                                   guest_sizeB,
614                                   ppInstr, ppReg, mode64 );
615 
616    vexAllocSanityCheck();
617 
618    if (vex_traceflags & VEX_TRACE_RCODE) {
619       vex_printf("\n------------------------"
620                    " Register-allocated code "
621                    "------------------------\n\n");
622       for (i = 0; i < rcode->arr_used; i++) {
623          vex_printf("%3d   ", i);
624          ppInstr(rcode->arr[i], mode64);
625          vex_printf("\n");
626       }
627       vex_printf("\n");
628    }
629 
630    /* HACK */
631    if (0) { *(vta->host_bytes_used) = 0; return VexTransOK; }
632    /* end HACK */
633 
634    /* Assemble */
635    if (vex_traceflags & VEX_TRACE_ASM) {
636       vex_printf("\n------------------------"
637                    " Assembly "
638                    "------------------------\n\n");
639    }
640 
641    out_used = 0; /* tracks along the host_bytes array */
642    for (i = 0; i < rcode->arr_used; i++) {
643       if (vex_traceflags & VEX_TRACE_ASM) {
644          ppInstr(rcode->arr[i], mode64);
645          vex_printf("\n");
646       }
647       j = (*emit)( insn_bytes, 32, rcode->arr[i], mode64, vta->dispatch );
648       if (vex_traceflags & VEX_TRACE_ASM) {
649          for (k = 0; k < j; k++)
650             if (insn_bytes[k] < 16)
651                vex_printf("0%x ",  (UInt)insn_bytes[k]);
652             else
653                vex_printf("%x ", (UInt)insn_bytes[k]);
654          vex_printf("\n\n");
655       }
656       if (out_used + j > vta->host_bytes_size) {
657          vexSetAllocModeTEMP_and_clear();
658          vex_traceflags = 0;
659          return VexTransOutputFull;
660       }
661       for (k = 0; k < j; k++) {
662          vta->host_bytes[out_used] = insn_bytes[k];
663          out_used++;
664       }
665       vassert(out_used <= vta->host_bytes_size);
666    }
667    *(vta->host_bytes_used) = out_used;
668 
669    vexAllocSanityCheck();
670 
671    vexSetAllocModeTEMP_and_clear();
672 
673    vex_traceflags = 0;
674    return VexTransOK;
675 }
676 
677 
678 /* --------- Emulation warnings. --------- */
679 
LibVEX_EmWarn_string(VexEmWarn ew)680 HChar* LibVEX_EmWarn_string ( VexEmWarn ew )
681 {
682    switch (ew) {
683      case EmWarn_NONE:
684         return "none";
685      case EmWarn_X86_x87exns:
686         return "Unmasking x87 FP exceptions";
687      case EmWarn_X86_x87precision:
688         return "Selection of non-80-bit x87 FP precision";
689      case EmWarn_X86_sseExns:
690         return "Unmasking SSE FP exceptions";
691      case EmWarn_X86_fz:
692         return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
693      case EmWarn_X86_daz:
694         return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
695      case EmWarn_X86_acFlag:
696         return "Setting %eflags.ac (setting noted but ignored)";
697      case EmWarn_PPCexns:
698         return "Unmasking PPC32/64 FP exceptions";
699      case EmWarn_PPC64_redir_overflow:
700         return "PPC64 function redirection stack overflow";
701      case EmWarn_PPC64_redir_underflow:
702         return "PPC64 function redirection stack underflow";
703      default:
704         vpanic("LibVEX_EmWarn_string: unknown warning");
705    }
706 }
707 
708 /* ------------------ Arch/HwCaps stuff. ------------------ */
709 
LibVEX_ppVexArch(VexArch arch)710 const HChar* LibVEX_ppVexArch ( VexArch arch )
711 {
712    switch (arch) {
713       case VexArch_INVALID: return "INVALID";
714       case VexArchX86:      return "X86";
715       case VexArchAMD64:    return "AMD64";
716       case VexArchARM:      return "ARM";
717       case VexArchPPC32:    return "PPC32";
718       case VexArchPPC64:    return "PPC64";
719       default:              return "VexArch???";
720    }
721 }
722 
LibVEX_ppVexHwCaps(VexArch arch,UInt hwcaps)723 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
724 {
725    HChar* str = show_hwcaps(arch,hwcaps);
726    return str ? str : "INVALID";
727 }
728 
729 
730 /* Write default settings info *vai. */
LibVEX_default_VexArchInfo(VexArchInfo * vai)731 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
732 {
733    vai->hwcaps             = 0;
734    vai->ppc_cache_line_szB = 0;
735    vai->ppc_dcbz_szB       = 0;
736    vai->ppc_dcbzl_szB      = 0;
737 
738 }
739 
740 /* Write default settings info *vbi. */
LibVEX_default_VexAbiInfo(VexAbiInfo * vbi)741 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
742 {
743    vbi->guest_stack_redzone_size       = 0;
744    vbi->guest_amd64_assume_fs_is_zero  = False;
745    vbi->guest_amd64_assume_gs_is_0x60  = False;
746    vbi->guest_ppc_zap_RZ_at_blr        = False;
747    vbi->guest_ppc_zap_RZ_at_bl         = NULL;
748    vbi->guest_ppc_sc_continues_at_LR   = False;
749    vbi->host_ppc_calls_use_fndescrs    = False;
750    vbi->host_ppc32_regalign_int64_args = False;
751 }
752 
753 
754 /* Return a string showing the hwcaps in a nice way.  The string will
755    be NULL for invalid combinations of flags, so these functions also
756    serve as a way to validate hwcaps values. */
757 
show_hwcaps_x86(UInt hwcaps)758 static HChar* show_hwcaps_x86 ( UInt hwcaps )
759 {
760    /* Monotonic, SSE3 > SSE2 > SSE1 > baseline. */
761    switch (hwcaps) {
762       case 0:
763          return "x86-sse0";
764       case VEX_HWCAPS_X86_SSE1:
765          return "x86-sse1";
766       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2:
767          return "x86-sse1-sse2";
768       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
769            | VEX_HWCAPS_X86_LZCNT:
770          return "x86-sse1-sse2-lzcnt";
771       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
772            | VEX_HWCAPS_X86_SSE3:
773          return "x86-sse1-sse2-sse3";
774       case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
775            | VEX_HWCAPS_X86_SSE3 | VEX_HWCAPS_X86_LZCNT:
776          return "x86-sse1-sse2-sse3-lzcnt";
777       default:
778          return NULL;
779    }
780 }
781 
show_hwcaps_amd64(UInt hwcaps)782 static HChar* show_hwcaps_amd64 ( UInt hwcaps )
783 {
784    /* SSE3 and CX16 are orthogonal and > baseline, although we really
785       don't expect to come across anything which can do SSE3 but can't
786       do CX16.  Still, we can handle that case.  LZCNT is similarly
787       orthogonal. */
788    switch (hwcaps) {
789       case 0:
790          return "amd64-sse2";
791       case VEX_HWCAPS_AMD64_SSE3:
792          return "amd64-sse3";
793       case VEX_HWCAPS_AMD64_CX16:
794          return "amd64-sse2-cx16";
795       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16:
796          return "amd64-sse3-cx16";
797       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_LZCNT:
798          return "amd64-sse3-lzcnt";
799       case VEX_HWCAPS_AMD64_CX16 | VEX_HWCAPS_AMD64_LZCNT:
800          return "amd64-sse2-cx16-lzcnt";
801       case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
802            | VEX_HWCAPS_AMD64_LZCNT:
803          return "amd64-sse3-cx16-lzcnt";
804 
805       default:
806          return NULL;
807    }
808 }
809 
show_hwcaps_ppc32(UInt hwcaps)810 static HChar* show_hwcaps_ppc32 ( UInt hwcaps )
811 {
812    /* Monotonic with complications.  Basically V > F > baseline,
813       but once you have F then you can have FX or GX too. */
814    const UInt F  = VEX_HWCAPS_PPC32_F;
815    const UInt V  = VEX_HWCAPS_PPC32_V;
816    const UInt FX = VEX_HWCAPS_PPC32_FX;
817    const UInt GX = VEX_HWCAPS_PPC32_GX;
818          UInt c  = hwcaps;
819    if (c == 0)           return "ppc32-int";
820    if (c == F)           return "ppc32-int-flt";
821    if (c == (F|FX))      return "ppc32-int-flt-FX";
822    if (c == (F|GX))      return "ppc32-int-flt-GX";
823    if (c == (F|FX|GX))   return "ppc32-int-flt-FX-GX";
824    if (c == (F|V))       return "ppc32-int-flt-vmx";
825    if (c == (F|V|FX))    return "ppc32-int-flt-vmx-FX";
826    if (c == (F|V|GX))    return "ppc32-int-flt-vmx-GX";
827    if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
828    return NULL;
829 }
830 
show_hwcaps_ppc64(UInt hwcaps)831 static HChar* show_hwcaps_ppc64 ( UInt hwcaps )
832 {
833    /* Monotonic with complications.  Basically V > baseline(==F),
834       but once you have F then you can have FX or GX too. */
835    const UInt V  = VEX_HWCAPS_PPC64_V;
836    const UInt FX = VEX_HWCAPS_PPC64_FX;
837    const UInt GX = VEX_HWCAPS_PPC64_GX;
838          UInt c  = hwcaps;
839    if (c == 0)         return "ppc64-int-flt";
840    if (c == FX)        return "ppc64-int-flt-FX";
841    if (c == GX)        return "ppc64-int-flt-GX";
842    if (c == (FX|GX))   return "ppc64-int-flt-FX-GX";
843    if (c == V)         return "ppc64-int-flt-vmx";
844    if (c == (V|FX))    return "ppc64-int-flt-vmx-FX";
845    if (c == (V|GX))    return "ppc64-int-flt-vmx-GX";
846    if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
847    return NULL;
848 }
849 
show_hwcaps_arm(UInt hwcaps)850 static HChar* show_hwcaps_arm ( UInt hwcaps )
851 {
852    Bool N = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
853    Bool vfp = ((hwcaps & (VEX_HWCAPS_ARM_VFP |
854                VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3)) != 0);
855    switch (VEX_ARM_ARCHLEVEL(hwcaps)) {
856       case 5:
857          if (N)
858             return NULL;
859          if (vfp)
860             return "ARMv5-vfp";
861          else
862             return "ARMv5";
863          return NULL;
864       case 6:
865          if (N)
866             return NULL;
867          if (vfp)
868             return "ARMv6-vfp";
869          else
870             return "ARMv6";
871          return NULL;
872       case 7:
873          if (vfp) {
874             if (N)
875                return "ARMv7-vfp-neon";
876             else
877                return "ARMv7-vfp";
878          } else {
879             if (N)
880                return "ARMv7-neon";
881             else
882                return "ARMv7";
883          }
884       default:
885          return NULL;
886    }
887    return NULL;
888 }
889 
890 /* ---- */
show_hwcaps(VexArch arch,UInt hwcaps)891 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
892 {
893    switch (arch) {
894       case VexArchX86:   return show_hwcaps_x86(hwcaps);
895       case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
896       case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
897       case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
898       case VexArchARM:   return show_hwcaps_arm(hwcaps);
899       default: return NULL;
900    }
901 }
902 
are_valid_hwcaps(VexArch arch,UInt hwcaps)903 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
904 {
905    return show_hwcaps(arch,hwcaps) != NULL;
906 }
907 
908 
909 /*---------------------------------------------------------------*/
910 /*--- end                                         main_main.c ---*/
911 /*---------------------------------------------------------------*/
912