1
2 /*---------------------------------------------------------------*/
3 /*--- begin main_main.c ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2012 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #include "libvex.h"
37 #include "libvex_emwarn.h"
38 #include "libvex_guest_x86.h"
39 #include "libvex_guest_amd64.h"
40 #include "libvex_guest_arm.h"
41 #include "libvex_guest_ppc32.h"
42 #include "libvex_guest_ppc64.h"
43 #include "libvex_guest_s390x.h"
44 #include "libvex_guest_mips32.h"
45
46 #include "main_globals.h"
47 #include "main_util.h"
48 #include "host_generic_regs.h"
49 #include "ir_opt.h"
50
51 #include "host_x86_defs.h"
52 #include "host_amd64_defs.h"
53 #include "host_ppc_defs.h"
54 #include "host_arm_defs.h"
55 #include "host_s390_defs.h"
56 #include "host_mips_defs.h"
57
58 #include "guest_generic_bb_to_IR.h"
59 #include "guest_x86_defs.h"
60 #include "guest_amd64_defs.h"
61 #include "guest_arm_defs.h"
62 #include "guest_ppc_defs.h"
63 #include "guest_s390_defs.h"
64 #include "guest_mips_defs.h"
65
66 #include "host_generic_simd128.h"
67
68
69 /* This file contains the top level interface to the library. */
70
71 /* --------- fwds ... --------- */
72
73 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps );
74 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps );
75
76
77 /* --------- Initialise the library. --------- */
78
79 /* Exported to library client. */
80
LibVEX_default_VexControl(VexControl * vcon)81 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon )
82 {
83 vcon->iropt_verbosity = 0;
84 vcon->iropt_level = 2;
85 vcon->iropt_register_updates = VexRegUpdUnwindregsAtMemAccess;
86 vcon->iropt_unroll_thresh = 120;
87 vcon->guest_max_insns = 60;
88 vcon->guest_chase_thresh = 10;
89 vcon->guest_chase_cond = False;
90 }
91
92
93 /* Exported to library client. */
94
LibVEX_Init(void (* failure_exit)(void),void (* log_bytes)(HChar *,Int nbytes),Int debuglevel,Bool valgrind_support,VexControl * vcon)95 void LibVEX_Init (
96 /* failure exit function */
97 __attribute__ ((noreturn))
98 void (*failure_exit) ( void ),
99 /* logging output function */
100 void (*log_bytes) ( HChar*, Int nbytes ),
101 /* debug paranoia level */
102 Int debuglevel,
103 /* Are we supporting valgrind checking? */
104 Bool valgrind_support,
105 /* Control ... */
106 /*READONLY*/VexControl* vcon
107 )
108 {
109 /* First off, do enough minimal setup so that the following
110 assertions can fail in a sane fashion, if need be. */
111 vex_failure_exit = failure_exit;
112 vex_log_bytes = log_bytes;
113
114 /* Now it's safe to check parameters for sanity. */
115 vassert(!vex_initdone);
116 vassert(failure_exit);
117 vassert(log_bytes);
118 vassert(debuglevel >= 0);
119
120 vassert(vcon->iropt_verbosity >= 0);
121 vassert(vcon->iropt_level >= 0);
122 vassert(vcon->iropt_level <= 2);
123 vassert(vcon->iropt_unroll_thresh >= 0);
124 vassert(vcon->iropt_unroll_thresh <= 400);
125 vassert(vcon->guest_max_insns >= 1);
126 vassert(vcon->guest_max_insns <= 100);
127 vassert(vcon->guest_chase_thresh >= 0);
128 vassert(vcon->guest_chase_thresh < vcon->guest_max_insns);
129 vassert(vcon->guest_chase_cond == True
130 || vcon->guest_chase_cond == False);
131
132 /* Check that Vex has been built with sizes of basic types as
133 stated in priv/libvex_basictypes.h. Failure of any of these is
134 a serious configuration error and should be corrected
135 immediately. If any of these assertions fail you can fully
136 expect Vex not to work properly, if at all. */
137
138 vassert(1 == sizeof(UChar));
139 vassert(1 == sizeof(Char));
140 vassert(2 == sizeof(UShort));
141 vassert(2 == sizeof(Short));
142 vassert(4 == sizeof(UInt));
143 vassert(4 == sizeof(Int));
144 vassert(8 == sizeof(ULong));
145 vassert(8 == sizeof(Long));
146 vassert(4 == sizeof(Float));
147 vassert(8 == sizeof(Double));
148 vassert(1 == sizeof(Bool));
149 vassert(4 == sizeof(Addr32));
150 vassert(8 == sizeof(Addr64));
151 vassert(16 == sizeof(U128));
152 vassert(16 == sizeof(V128));
153 vassert(32 == sizeof(U256));
154
155 vassert(sizeof(void*) == 4 || sizeof(void*) == 8);
156 vassert(sizeof(void*) == sizeof(int*));
157 vassert(sizeof(void*) == sizeof(HWord));
158
159 vassert(VEX_HOST_WORDSIZE == sizeof(void*));
160 vassert(VEX_HOST_WORDSIZE == sizeof(HWord));
161
162 /* These take a lot of space, so make sure we don't have
163 any unnoticed size regressions. */
164 if (VEX_HOST_WORDSIZE == 4) {
165 vassert(sizeof(IRExpr) == 16);
166 vassert(sizeof(IRStmt) == 20 /* x86 */
167 || sizeof(IRStmt) == 24 /* arm */);
168 } else {
169 vassert(sizeof(IRExpr) == 32);
170 vassert(sizeof(IRStmt) == 32);
171 }
172
173 /* Really start up .. */
174 vex_debuglevel = debuglevel;
175 vex_valgrind_support = valgrind_support;
176 vex_control = *vcon;
177 vex_initdone = True;
178 vexSetAllocMode ( VexAllocModeTEMP );
179 }
180
181
182 /* --------- Make a translation. --------- */
183
184 /* Exported to library client. */
185
LibVEX_Translate(VexTranslateArgs * vta)186 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* vta )
187 {
188 /* This the bundle of functions we need to do the back-end stuff
189 (insn selection, reg-alloc, assembly) whilst being insulated
190 from the target instruction set. */
191 HReg* available_real_regs;
192 Int n_available_real_regs;
193 Bool (*isMove) ( HInstr*, HReg*, HReg* );
194 void (*getRegUsage) ( HRegUsage*, HInstr*, Bool );
195 void (*mapRegs) ( HRegRemap*, HInstr*, Bool );
196 void (*genSpill) ( HInstr**, HInstr**, HReg, Int, Bool );
197 void (*genReload) ( HInstr**, HInstr**, HReg, Int, Bool );
198 HInstr* (*directReload) ( HInstr*, HReg, Short );
199 void (*ppInstr) ( HInstr*, Bool );
200 void (*ppReg) ( HReg );
201 HInstrArray* (*iselSB) ( IRSB*, VexArch, VexArchInfo*, VexAbiInfo*,
202 Int, Int, Bool, Bool, Addr64 );
203 Int (*emit) ( /*MB_MOD*/Bool*,
204 UChar*, Int, HInstr*, Bool,
205 void*, void*, void*, void* );
206 IRExpr* (*specHelper) ( HChar*, IRExpr**, IRStmt**, Int );
207 Bool (*preciseMemExnsFn) ( Int, Int );
208
209 DisOneInstrFn disInstrFn;
210
211 VexGuestLayout* guest_layout;
212 Bool host_is_bigendian = False;
213 IRSB* irsb;
214 HInstrArray* vcode;
215 HInstrArray* rcode;
216 Int i, j, k, out_used, guest_sizeB;
217 Int offB_TISTART, offB_TILEN, offB_GUEST_IP, szB_GUEST_IP;
218 Int offB_HOST_EvC_COUNTER, offB_HOST_EvC_FAILADDR;
219 UChar insn_bytes[64];
220 IRType guest_word_type;
221 IRType host_word_type;
222 Bool mode64, chainingAllowed;
223 Addr64 max_ga;
224
225 guest_layout = NULL;
226 available_real_regs = NULL;
227 n_available_real_regs = 0;
228 isMove = NULL;
229 getRegUsage = NULL;
230 mapRegs = NULL;
231 genSpill = NULL;
232 genReload = NULL;
233 directReload = NULL;
234 ppInstr = NULL;
235 ppReg = NULL;
236 iselSB = NULL;
237 emit = NULL;
238 specHelper = NULL;
239 preciseMemExnsFn = NULL;
240 disInstrFn = NULL;
241 guest_word_type = Ity_INVALID;
242 host_word_type = Ity_INVALID;
243 offB_TISTART = 0;
244 offB_TILEN = 0;
245 offB_GUEST_IP = 0;
246 szB_GUEST_IP = 0;
247 offB_HOST_EvC_COUNTER = 0;
248 offB_HOST_EvC_FAILADDR = 0;
249 mode64 = False;
250 chainingAllowed = False;
251
252 vex_traceflags = vta->traceflags;
253
254 vassert(vex_initdone);
255 vassert(vta->needs_self_check != NULL);
256 vassert(vta->disp_cp_xassisted != NULL);
257 /* Both the chainers and the indir are either NULL or non-NULL. */
258 if (vta->disp_cp_chain_me_to_slowEP != NULL) {
259 vassert(vta->disp_cp_chain_me_to_fastEP != NULL);
260 vassert(vta->disp_cp_xindir != NULL);
261 chainingAllowed = True;
262 } else {
263 vassert(vta->disp_cp_chain_me_to_fastEP == NULL);
264 vassert(vta->disp_cp_xindir == NULL);
265 }
266
267 vexSetAllocModeTEMP_and_clear();
268 vexAllocSanityCheck();
269
270 /* First off, check that the guest and host insn sets
271 are supported. */
272
273 switch (vta->arch_host) {
274
275 case VexArchX86:
276 mode64 = False;
277 getAllocableRegs_X86 ( &n_available_real_regs,
278 &available_real_regs );
279 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_X86Instr;
280 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
281 getRegUsage_X86Instr;
282 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_X86Instr;
283 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
284 genSpill_X86;
285 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
286 genReload_X86;
287 directReload = (HInstr*(*)(HInstr*,HReg,Short)) directReload_X86;
288 ppInstr = (void(*)(HInstr*, Bool)) ppX86Instr;
289 ppReg = (void(*)(HReg)) ppHRegX86;
290 iselSB = iselSB_X86;
291 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
292 void*,void*,void*,void*))
293 emit_X86Instr;
294 host_is_bigendian = False;
295 host_word_type = Ity_I32;
296 vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_host.hwcaps));
297 break;
298
299 case VexArchAMD64:
300 mode64 = True;
301 getAllocableRegs_AMD64 ( &n_available_real_regs,
302 &available_real_regs );
303 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_AMD64Instr;
304 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool))
305 getRegUsage_AMD64Instr;
306 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_AMD64Instr;
307 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
308 genSpill_AMD64;
309 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool))
310 genReload_AMD64;
311 ppInstr = (void(*)(HInstr*, Bool)) ppAMD64Instr;
312 ppReg = (void(*)(HReg)) ppHRegAMD64;
313 iselSB = iselSB_AMD64;
314 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
315 void*,void*,void*,void*))
316 emit_AMD64Instr;
317 host_is_bigendian = False;
318 host_word_type = Ity_I64;
319 vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_host.hwcaps));
320 break;
321
322 case VexArchPPC32:
323 mode64 = False;
324 getAllocableRegs_PPC ( &n_available_real_regs,
325 &available_real_regs, mode64 );
326 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
327 getRegUsage = (void(*)(HRegUsage*,HInstr*,Bool)) getRegUsage_PPCInstr;
328 mapRegs = (void(*)(HRegRemap*,HInstr*,Bool)) mapRegs_PPCInstr;
329 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
330 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
331 ppInstr = (void(*)(HInstr*,Bool)) ppPPCInstr;
332 ppReg = (void(*)(HReg)) ppHRegPPC;
333 iselSB = iselSB_PPC;
334 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
335 void*,void*,void*,void*))
336 emit_PPCInstr;
337 host_is_bigendian = True;
338 host_word_type = Ity_I32;
339 vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_host.hwcaps));
340 break;
341
342 case VexArchPPC64:
343 mode64 = True;
344 getAllocableRegs_PPC ( &n_available_real_regs,
345 &available_real_regs, mode64 );
346 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_PPCInstr;
347 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_PPCInstr;
348 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_PPCInstr;
349 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_PPC;
350 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_PPC;
351 ppInstr = (void(*)(HInstr*, Bool)) ppPPCInstr;
352 ppReg = (void(*)(HReg)) ppHRegPPC;
353 iselSB = iselSB_PPC;
354 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
355 void*,void*,void*,void*))
356 emit_PPCInstr;
357 host_is_bigendian = True;
358 host_word_type = Ity_I64;
359 vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_host.hwcaps));
360 break;
361
362 case VexArchS390X:
363 mode64 = True;
364 getAllocableRegs_S390 ( &n_available_real_regs,
365 &available_real_regs, mode64 );
366 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_S390Instr;
367 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_S390Instr;
368 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_S390Instr;
369 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_S390;
370 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_S390;
371 ppInstr = (void(*)(HInstr*, Bool)) ppS390Instr;
372 ppReg = (void(*)(HReg)) ppHRegS390;
373 iselSB = iselSB_S390;
374 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
375 void*,void*,void*,void*)) emit_S390Instr;
376 host_is_bigendian = True;
377 host_word_type = Ity_I64;
378 vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_host.hwcaps));
379 break;
380
381 case VexArchARM:
382 mode64 = False;
383 getAllocableRegs_ARM ( &n_available_real_regs,
384 &available_real_regs );
385 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_ARMInstr;
386 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_ARMInstr;
387 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_ARMInstr;
388 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_ARM;
389 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_ARM;
390 ppInstr = (void(*)(HInstr*, Bool)) ppARMInstr;
391 ppReg = (void(*)(HReg)) ppHRegARM;
392 iselSB = iselSB_ARM;
393 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
394 void*,void*,void*,void*))
395 emit_ARMInstr;
396 host_is_bigendian = False;
397 host_word_type = Ity_I32;
398 vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_host.hwcaps));
399 break;
400
401 case VexArchMIPS32:
402 mode64 = False;
403 getAllocableRegs_MIPS ( &n_available_real_regs,
404 &available_real_regs, mode64 );
405 isMove = (Bool(*)(HInstr*,HReg*,HReg*)) isMove_MIPSInstr;
406 getRegUsage = (void(*)(HRegUsage*,HInstr*, Bool)) getRegUsage_MIPSInstr;
407 mapRegs = (void(*)(HRegRemap*,HInstr*, Bool)) mapRegs_MIPSInstr;
408 genSpill = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genSpill_MIPS;
409 genReload = (void(*)(HInstr**,HInstr**,HReg,Int,Bool)) genReload_MIPS;
410 ppInstr = (void(*)(HInstr*, Bool)) ppMIPSInstr;
411 ppReg = (void(*)(HReg)) ppHRegMIPS;
412 iselSB = iselSB_MIPS;
413 emit = (Int(*)(Bool*,UChar*,Int,HInstr*,Bool,
414 void*,void*,void*,void*))
415 emit_MIPSInstr;
416 #if defined(VKI_LITTLE_ENDIAN)
417 host_is_bigendian = False;
418 #elif defined(VKI_BIG_ENDIAN)
419 host_is_bigendian = True;
420 #endif
421 host_word_type = Ity_I32;
422 vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_host.hwcaps));
423 break;
424
425 default:
426 vpanic("LibVEX_Translate: unsupported host insn set");
427 }
428
429
430 switch (vta->arch_guest) {
431
432 case VexArchX86:
433 preciseMemExnsFn = guest_x86_state_requires_precise_mem_exns;
434 disInstrFn = disInstr_X86;
435 specHelper = guest_x86_spechelper;
436 guest_sizeB = sizeof(VexGuestX86State);
437 guest_word_type = Ity_I32;
438 guest_layout = &x86guest_layout;
439 offB_TISTART = offsetof(VexGuestX86State,guest_TISTART);
440 offB_TILEN = offsetof(VexGuestX86State,guest_TILEN);
441 offB_GUEST_IP = offsetof(VexGuestX86State,guest_EIP);
442 szB_GUEST_IP = sizeof( ((VexGuestX86State*)0)->guest_EIP );
443 offB_HOST_EvC_COUNTER = offsetof(VexGuestX86State,host_EvC_COUNTER);
444 offB_HOST_EvC_FAILADDR = offsetof(VexGuestX86State,host_EvC_FAILADDR);
445 vassert(are_valid_hwcaps(VexArchX86, vta->archinfo_guest.hwcaps));
446 vassert(0 == sizeof(VexGuestX86State) % 16);
447 vassert(sizeof( ((VexGuestX86State*)0)->guest_TISTART) == 4);
448 vassert(sizeof( ((VexGuestX86State*)0)->guest_TILEN ) == 4);
449 vassert(sizeof( ((VexGuestX86State*)0)->guest_NRADDR ) == 4);
450 break;
451
452 case VexArchAMD64:
453 preciseMemExnsFn = guest_amd64_state_requires_precise_mem_exns;
454 disInstrFn = disInstr_AMD64;
455 specHelper = guest_amd64_spechelper;
456 guest_sizeB = sizeof(VexGuestAMD64State);
457 guest_word_type = Ity_I64;
458 guest_layout = &amd64guest_layout;
459 offB_TISTART = offsetof(VexGuestAMD64State,guest_TISTART);
460 offB_TILEN = offsetof(VexGuestAMD64State,guest_TILEN);
461 offB_GUEST_IP = offsetof(VexGuestAMD64State,guest_RIP);
462 szB_GUEST_IP = sizeof( ((VexGuestAMD64State*)0)->guest_RIP );
463 offB_HOST_EvC_COUNTER = offsetof(VexGuestAMD64State,host_EvC_COUNTER);
464 offB_HOST_EvC_FAILADDR = offsetof(VexGuestAMD64State,host_EvC_FAILADDR);
465 vassert(are_valid_hwcaps(VexArchAMD64, vta->archinfo_guest.hwcaps));
466 vassert(0 == sizeof(VexGuestAMD64State) % 16);
467 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TISTART ) == 8);
468 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_TILEN ) == 8);
469 vassert(sizeof( ((VexGuestAMD64State*)0)->guest_NRADDR ) == 8);
470 break;
471
472 case VexArchPPC32:
473 preciseMemExnsFn = guest_ppc32_state_requires_precise_mem_exns;
474 disInstrFn = disInstr_PPC;
475 specHelper = guest_ppc32_spechelper;
476 guest_sizeB = sizeof(VexGuestPPC32State);
477 guest_word_type = Ity_I32;
478 guest_layout = &ppc32Guest_layout;
479 offB_TISTART = offsetof(VexGuestPPC32State,guest_TISTART);
480 offB_TILEN = offsetof(VexGuestPPC32State,guest_TILEN);
481 offB_GUEST_IP = offsetof(VexGuestPPC32State,guest_CIA);
482 szB_GUEST_IP = sizeof( ((VexGuestPPC32State*)0)->guest_CIA );
483 offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC32State,host_EvC_COUNTER);
484 offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC32State,host_EvC_FAILADDR);
485 vassert(are_valid_hwcaps(VexArchPPC32, vta->archinfo_guest.hwcaps));
486 vassert(0 == sizeof(VexGuestPPC32State) % 16);
487 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TISTART ) == 4);
488 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_TILEN ) == 4);
489 vassert(sizeof( ((VexGuestPPC32State*)0)->guest_NRADDR ) == 4);
490 break;
491
492 case VexArchPPC64:
493 preciseMemExnsFn = guest_ppc64_state_requires_precise_mem_exns;
494 disInstrFn = disInstr_PPC;
495 specHelper = guest_ppc64_spechelper;
496 guest_sizeB = sizeof(VexGuestPPC64State);
497 guest_word_type = Ity_I64;
498 guest_layout = &ppc64Guest_layout;
499 offB_TISTART = offsetof(VexGuestPPC64State,guest_TISTART);
500 offB_TILEN = offsetof(VexGuestPPC64State,guest_TILEN);
501 offB_GUEST_IP = offsetof(VexGuestPPC64State,guest_CIA);
502 szB_GUEST_IP = sizeof( ((VexGuestPPC64State*)0)->guest_CIA );
503 offB_HOST_EvC_COUNTER = offsetof(VexGuestPPC64State,host_EvC_COUNTER);
504 offB_HOST_EvC_FAILADDR = offsetof(VexGuestPPC64State,host_EvC_FAILADDR);
505 vassert(are_valid_hwcaps(VexArchPPC64, vta->archinfo_guest.hwcaps));
506 vassert(0 == sizeof(VexGuestPPC64State) % 16);
507 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TISTART ) == 8);
508 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_TILEN ) == 8);
509 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR ) == 8);
510 vassert(sizeof( ((VexGuestPPC64State*)0)->guest_NRADDR_GPR2) == 8);
511 break;
512
513 case VexArchS390X:
514 preciseMemExnsFn = guest_s390x_state_requires_precise_mem_exns;
515 disInstrFn = disInstr_S390;
516 specHelper = guest_s390x_spechelper;
517 guest_sizeB = sizeof(VexGuestS390XState);
518 guest_word_type = Ity_I64;
519 guest_layout = &s390xGuest_layout;
520 offB_TISTART = offsetof(VexGuestS390XState,guest_TISTART);
521 offB_TILEN = offsetof(VexGuestS390XState,guest_TILEN);
522 offB_GUEST_IP = offsetof(VexGuestS390XState,guest_IA);
523 szB_GUEST_IP = sizeof( ((VexGuestS390XState*)0)->guest_IA);
524 offB_HOST_EvC_COUNTER = offsetof(VexGuestS390XState,host_EvC_COUNTER);
525 offB_HOST_EvC_FAILADDR = offsetof(VexGuestS390XState,host_EvC_FAILADDR);
526 vassert(are_valid_hwcaps(VexArchS390X, vta->archinfo_guest.hwcaps));
527 vassert(0 == sizeof(VexGuestS390XState) % 16);
528 vassert(sizeof( ((VexGuestS390XState*)0)->guest_TISTART ) == 8);
529 vassert(sizeof( ((VexGuestS390XState*)0)->guest_TILEN ) == 8);
530 vassert(sizeof( ((VexGuestS390XState*)0)->guest_NRADDR ) == 8);
531 break;
532
533 case VexArchARM:
534 preciseMemExnsFn = guest_arm_state_requires_precise_mem_exns;
535 disInstrFn = disInstr_ARM;
536 specHelper = guest_arm_spechelper;
537 guest_sizeB = sizeof(VexGuestARMState);
538 guest_word_type = Ity_I32;
539 guest_layout = &armGuest_layout;
540 offB_TISTART = offsetof(VexGuestARMState,guest_TISTART);
541 offB_TILEN = offsetof(VexGuestARMState,guest_TILEN);
542 offB_GUEST_IP = offsetof(VexGuestARMState,guest_R15T);
543 szB_GUEST_IP = sizeof( ((VexGuestARMState*)0)->guest_R15T );
544 offB_HOST_EvC_COUNTER = offsetof(VexGuestARMState,host_EvC_COUNTER);
545 offB_HOST_EvC_FAILADDR = offsetof(VexGuestARMState,host_EvC_FAILADDR);
546 vassert(are_valid_hwcaps(VexArchARM, vta->archinfo_guest.hwcaps));
547 vassert(0 == sizeof(VexGuestARMState) % 16);
548 vassert(sizeof( ((VexGuestARMState*)0)->guest_TISTART) == 4);
549 vassert(sizeof( ((VexGuestARMState*)0)->guest_TILEN ) == 4);
550 vassert(sizeof( ((VexGuestARMState*)0)->guest_NRADDR ) == 4);
551 break;
552
553 case VexArchMIPS32:
554 preciseMemExnsFn = guest_mips32_state_requires_precise_mem_exns;
555 disInstrFn = disInstr_MIPS;
556 specHelper = guest_mips32_spechelper;
557 guest_sizeB = sizeof(VexGuestMIPS32State);
558 guest_word_type = Ity_I32;
559 guest_layout = &mips32Guest_layout;
560 offB_TISTART = offsetof(VexGuestMIPS32State,guest_TISTART);
561 offB_TILEN = offsetof(VexGuestMIPS32State,guest_TILEN);
562 offB_GUEST_IP = offsetof(VexGuestMIPS32State,guest_PC);
563 szB_GUEST_IP = sizeof( ((VexGuestMIPS32State*)0)->guest_PC );
564 offB_HOST_EvC_COUNTER = offsetof(VexGuestMIPS32State,host_EvC_COUNTER);
565 offB_HOST_EvC_FAILADDR = offsetof(VexGuestMIPS32State,host_EvC_FAILADDR);
566 vassert(are_valid_hwcaps(VexArchMIPS32, vta->archinfo_guest.hwcaps));
567 vassert(0 == sizeof(VexGuestMIPS32State) % 16);
568 vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_TISTART) == 4);
569 vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_TILEN ) == 4);
570 vassert(sizeof( ((VexGuestMIPS32State*)0)->guest_NRADDR ) == 4);
571 break;
572
573 default:
574 vpanic("LibVEX_Translate: unsupported guest insn set");
575 }
576
577 /* Set up result struct. */
578 VexTranslateResult res;
579 res.status = VexTransOK;
580 res.n_sc_extents = 0;
581 res.offs_profInc = -1;
582 res.n_guest_instrs = 0;
583
584 /* yet more sanity checks ... */
585 if (vta->arch_guest == vta->arch_host) {
586 /* doesn't necessarily have to be true, but if it isn't it means
587 we are simulating one flavour of an architecture a different
588 flavour of the same architecture, which is pretty strange. */
589 vassert(vta->archinfo_guest.hwcaps == vta->archinfo_host.hwcaps);
590 }
591
592 vexAllocSanityCheck();
593
594 if (vex_traceflags & VEX_TRACE_FE)
595 vex_printf("\n------------------------"
596 " Front end "
597 "------------------------\n\n");
598
599 irsb = bb_to_IR ( vta->guest_extents,
600 &res.n_sc_extents,
601 &res.n_guest_instrs,
602 vta->callback_opaque,
603 disInstrFn,
604 vta->guest_bytes,
605 vta->guest_bytes_addr,
606 vta->chase_into_ok,
607 host_is_bigendian,
608 vta->arch_guest,
609 &vta->archinfo_guest,
610 &vta->abiinfo_both,
611 guest_word_type,
612 vta->needs_self_check,
613 vta->preamble_function,
614 offB_TISTART,
615 offB_TILEN,
616 offB_GUEST_IP,
617 szB_GUEST_IP );
618
619 vexAllocSanityCheck();
620
621 if (irsb == NULL) {
622 /* Access failure. */
623 vexSetAllocModeTEMP_and_clear();
624 vex_traceflags = 0;
625 res.status = VexTransAccessFail; return res;
626 }
627
628 vassert(vta->guest_extents->n_used >= 1 && vta->guest_extents->n_used <= 3);
629 vassert(vta->guest_extents->base[0] == vta->guest_bytes_addr);
630 for (i = 0; i < vta->guest_extents->n_used; i++) {
631 vassert(vta->guest_extents->len[i] < 10000); /* sanity */
632 }
633
634 /* If debugging, show the raw guest bytes for this bb. */
635 if (0 || (vex_traceflags & VEX_TRACE_FE)) {
636 if (vta->guest_extents->n_used > 1) {
637 vex_printf("can't show code due to extents > 1\n");
638 } else {
639 /* HACK */
640 UChar* p = (UChar*)vta->guest_bytes;
641 UInt sum = 0;
642 UInt guest_bytes_read = (UInt)vta->guest_extents->len[0];
643 vex_printf("GuestBytes %llx %u ", vta->guest_bytes_addr,
644 guest_bytes_read );
645 for (i = 0; i < guest_bytes_read; i++) {
646 UInt b = (UInt)p[i];
647 vex_printf(" %02x", b );
648 sum = (sum << 1) ^ b;
649 }
650 vex_printf(" %08x\n\n", sum);
651 }
652 }
653
654 /* Sanity check the initial IR. */
655 sanityCheckIRSB( irsb, "initial IR",
656 False/*can be non-flat*/, guest_word_type );
657
658 vexAllocSanityCheck();
659
660 /* Clean it up, hopefully a lot. */
661 irsb = do_iropt_BB ( irsb, specHelper, preciseMemExnsFn,
662 vta->guest_bytes_addr,
663 vta->arch_guest );
664 sanityCheckIRSB( irsb, "after initial iropt",
665 True/*must be flat*/, guest_word_type );
666
667 if (vex_traceflags & VEX_TRACE_OPT1) {
668 vex_printf("\n------------------------"
669 " After pre-instr IR optimisation "
670 "------------------------\n\n");
671 ppIRSB ( irsb );
672 vex_printf("\n");
673 }
674
675 vexAllocSanityCheck();
676
677 /* Get the thing instrumented. */
678 if (vta->instrument1)
679 irsb = vta->instrument1(vta->callback_opaque,
680 irsb, guest_layout,
681 vta->guest_extents,
682 guest_word_type, host_word_type);
683 vexAllocSanityCheck();
684
685 if (vta->instrument2)
686 irsb = vta->instrument2(vta->callback_opaque,
687 irsb, guest_layout,
688 vta->guest_extents,
689 guest_word_type, host_word_type);
690
691 if (vex_traceflags & VEX_TRACE_INST) {
692 vex_printf("\n------------------------"
693 " After instrumentation "
694 "------------------------\n\n");
695 ppIRSB ( irsb );
696 vex_printf("\n");
697 }
698
699 if (vta->instrument1 || vta->instrument2)
700 sanityCheckIRSB( irsb, "after instrumentation",
701 True/*must be flat*/, guest_word_type );
702
703 /* Do a post-instrumentation cleanup pass. */
704 if (vta->instrument1 || vta->instrument2) {
705 do_deadcode_BB( irsb );
706 irsb = cprop_BB( irsb );
707 do_deadcode_BB( irsb );
708 sanityCheckIRSB( irsb, "after post-instrumentation cleanup",
709 True/*must be flat*/, guest_word_type );
710 }
711
712 vexAllocSanityCheck();
713
714 if (vex_traceflags & VEX_TRACE_OPT2) {
715 vex_printf("\n------------------------"
716 " After post-instr IR optimisation "
717 "------------------------\n\n");
718 ppIRSB ( irsb );
719 vex_printf("\n");
720 }
721
722 /* Turn it into virtual-registerised code. Build trees -- this
723 also throws away any dead bindings. */
724 max_ga = ado_treebuild_BB( irsb );
725
726 if (vta->finaltidy) {
727 irsb = vta->finaltidy(irsb);
728 }
729
730 vexAllocSanityCheck();
731
732 if (vex_traceflags & VEX_TRACE_TREES) {
733 vex_printf("\n------------------------"
734 " After tree-building "
735 "------------------------\n\n");
736 ppIRSB ( irsb );
737 vex_printf("\n");
738 }
739
740 /* HACK */
741 if (0) {
742 *(vta->host_bytes_used) = 0;
743 res.status = VexTransOK; return res;
744 }
745 /* end HACK */
746
747 if (vex_traceflags & VEX_TRACE_VCODE)
748 vex_printf("\n------------------------"
749 " Instruction selection "
750 "------------------------\n");
751
752 /* No guest has its IP field at offset zero. If this fails it
753 means some transformation pass somewhere failed to update/copy
754 irsb->offsIP properly. */
755 vassert(irsb->offsIP >= 16);
756
757 vcode = iselSB ( irsb, vta->arch_host,
758 &vta->archinfo_host,
759 &vta->abiinfo_both,
760 offB_HOST_EvC_COUNTER,
761 offB_HOST_EvC_FAILADDR,
762 chainingAllowed,
763 vta->addProfInc,
764 max_ga );
765
766 vexAllocSanityCheck();
767
768 if (vex_traceflags & VEX_TRACE_VCODE)
769 vex_printf("\n");
770
771 if (vex_traceflags & VEX_TRACE_VCODE) {
772 for (i = 0; i < vcode->arr_used; i++) {
773 vex_printf("%3d ", i);
774 ppInstr(vcode->arr[i], mode64);
775 vex_printf("\n");
776 }
777 vex_printf("\n");
778 }
779
780 /* Register allocate. */
781 rcode = doRegisterAllocation ( vcode, available_real_regs,
782 n_available_real_regs,
783 isMove, getRegUsage, mapRegs,
784 genSpill, genReload, directReload,
785 guest_sizeB,
786 ppInstr, ppReg, mode64 );
787
788 vexAllocSanityCheck();
789
790 if (vex_traceflags & VEX_TRACE_RCODE) {
791 vex_printf("\n------------------------"
792 " Register-allocated code "
793 "------------------------\n\n");
794 for (i = 0; i < rcode->arr_used; i++) {
795 vex_printf("%3d ", i);
796 ppInstr(rcode->arr[i], mode64);
797 vex_printf("\n");
798 }
799 vex_printf("\n");
800 }
801
802 /* HACK */
803 if (0) {
804 *(vta->host_bytes_used) = 0;
805 res.status = VexTransOK; return res;
806 }
807 /* end HACK */
808
809 /* Assemble */
810 if (vex_traceflags & VEX_TRACE_ASM) {
811 vex_printf("\n------------------------"
812 " Assembly "
813 "------------------------\n\n");
814 }
815
816 out_used = 0; /* tracks along the host_bytes array */
817 for (i = 0; i < rcode->arr_used; i++) {
818 HInstr* hi = rcode->arr[i];
819 Bool hi_isProfInc = False;
820 if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
821 ppInstr(hi, mode64);
822 vex_printf("\n");
823 }
824 j = emit( &hi_isProfInc,
825 insn_bytes, sizeof insn_bytes, hi, mode64,
826 vta->disp_cp_chain_me_to_slowEP,
827 vta->disp_cp_chain_me_to_fastEP,
828 vta->disp_cp_xindir,
829 vta->disp_cp_xassisted );
830 if (UNLIKELY(vex_traceflags & VEX_TRACE_ASM)) {
831 for (k = 0; k < j; k++)
832 if (insn_bytes[k] < 16)
833 vex_printf("0%x ", (UInt)insn_bytes[k]);
834 else
835 vex_printf("%x ", (UInt)insn_bytes[k]);
836 vex_printf("\n\n");
837 }
838 if (UNLIKELY(out_used + j > vta->host_bytes_size)) {
839 vexSetAllocModeTEMP_and_clear();
840 vex_traceflags = 0;
841 res.status = VexTransOutputFull;
842 return res;
843 }
844 if (UNLIKELY(hi_isProfInc)) {
845 vassert(vta->addProfInc); /* else where did it come from? */
846 vassert(res.offs_profInc == -1); /* there can be only one (tm) */
847 vassert(out_used >= 0);
848 res.offs_profInc = out_used;
849 }
850 { UChar* dst = &vta->host_bytes[out_used];
851 for (k = 0; k < j; k++) {
852 dst[k] = insn_bytes[k];
853 }
854 out_used += j;
855 }
856 vassert(out_used <= vta->host_bytes_size);
857 }
858 *(vta->host_bytes_used) = out_used;
859
860 vexAllocSanityCheck();
861
862 vexSetAllocModeTEMP_and_clear();
863
864 vex_traceflags = 0;
865 res.status = VexTransOK;
866 return res;
867 }
868
869
870 /* --------- Chain/Unchain XDirects. --------- */
871
LibVEX_Chain(VexArch arch_host,void * place_to_chain,void * disp_cp_chain_me_EXPECTED,void * place_to_jump_to)872 VexInvalRange LibVEX_Chain ( VexArch arch_host,
873 void* place_to_chain,
874 void* disp_cp_chain_me_EXPECTED,
875 void* place_to_jump_to )
876 {
877 VexInvalRange (*chainXDirect)(void*, void*, void*) = NULL;
878 switch (arch_host) {
879 case VexArchX86:
880 chainXDirect = chainXDirect_X86; break;
881 case VexArchAMD64:
882 chainXDirect = chainXDirect_AMD64; break;
883 case VexArchARM:
884 chainXDirect = chainXDirect_ARM; break;
885 case VexArchS390X:
886 chainXDirect = chainXDirect_S390; break;
887 case VexArchPPC32:
888 return chainXDirect_PPC(place_to_chain,
889 disp_cp_chain_me_EXPECTED,
890 place_to_jump_to, False/*!mode64*/);
891 case VexArchPPC64:
892 return chainXDirect_PPC(place_to_chain,
893 disp_cp_chain_me_EXPECTED,
894 place_to_jump_to, True/*mode64*/);
895 case VexArchMIPS32:
896 return chainXDirect_MIPS(place_to_chain,
897 disp_cp_chain_me_EXPECTED,
898 place_to_jump_to, False/*!mode64*/);
899 default:
900 vassert(0);
901 }
902 vassert(chainXDirect);
903 VexInvalRange vir
904 = chainXDirect(place_to_chain, disp_cp_chain_me_EXPECTED,
905 place_to_jump_to);
906 return vir;
907 }
908
LibVEX_UnChain(VexArch arch_host,void * place_to_unchain,void * place_to_jump_to_EXPECTED,void * disp_cp_chain_me)909 VexInvalRange LibVEX_UnChain ( VexArch arch_host,
910 void* place_to_unchain,
911 void* place_to_jump_to_EXPECTED,
912 void* disp_cp_chain_me )
913 {
914 VexInvalRange (*unchainXDirect)(void*, void*, void*) = NULL;
915 switch (arch_host) {
916 case VexArchX86:
917 unchainXDirect = unchainXDirect_X86; break;
918 case VexArchAMD64:
919 unchainXDirect = unchainXDirect_AMD64; break;
920 case VexArchARM:
921 unchainXDirect = unchainXDirect_ARM; break;
922 case VexArchS390X:
923 unchainXDirect = unchainXDirect_S390; break;
924 case VexArchPPC32:
925 return unchainXDirect_PPC(place_to_unchain,
926 place_to_jump_to_EXPECTED,
927 disp_cp_chain_me, False/*!mode64*/);
928 case VexArchPPC64:
929 return unchainXDirect_PPC(place_to_unchain,
930 place_to_jump_to_EXPECTED,
931 disp_cp_chain_me, True/*mode64*/);
932 case VexArchMIPS32:
933 return unchainXDirect_MIPS(place_to_unchain,
934 place_to_jump_to_EXPECTED,
935 disp_cp_chain_me, False/*!mode64*/);
936 default:
937 vassert(0);
938 }
939 vassert(unchainXDirect);
940 VexInvalRange vir
941 = unchainXDirect(place_to_unchain, place_to_jump_to_EXPECTED,
942 disp_cp_chain_me);
943 return vir;
944 }
945
LibVEX_evCheckSzB(VexArch arch_host)946 Int LibVEX_evCheckSzB ( VexArch arch_host )
947 {
948 static Int cached = 0; /* DO NOT MAKE NON-STATIC */
949 if (UNLIKELY(cached == 0)) {
950 switch (arch_host) {
951 case VexArchX86:
952 cached = evCheckSzB_X86(); break;
953 case VexArchAMD64:
954 cached = evCheckSzB_AMD64(); break;
955 case VexArchARM:
956 cached = evCheckSzB_ARM(); break;
957 case VexArchS390X:
958 cached = evCheckSzB_S390(); break;
959 case VexArchPPC32:
960 case VexArchPPC64:
961 cached = evCheckSzB_PPC(); break;
962 case VexArchMIPS32:
963 cached = evCheckSzB_MIPS(); break;
964 default:
965 vassert(0);
966 }
967 }
968 return cached;
969 }
970
LibVEX_PatchProfInc(VexArch arch_host,void * place_to_patch,ULong * location_of_counter)971 VexInvalRange LibVEX_PatchProfInc ( VexArch arch_host,
972 void* place_to_patch,
973 ULong* location_of_counter )
974 {
975 VexInvalRange (*patchProfInc)(void*,ULong*) = NULL;
976 switch (arch_host) {
977 case VexArchX86:
978 patchProfInc = patchProfInc_X86; break;
979 case VexArchAMD64:
980 patchProfInc = patchProfInc_AMD64; break;
981 case VexArchARM:
982 patchProfInc = patchProfInc_ARM; break;
983 case VexArchS390X:
984 patchProfInc = patchProfInc_S390; break;
985 case VexArchPPC32:
986 return patchProfInc_PPC(place_to_patch,
987 location_of_counter, False/*!mode64*/);
988 case VexArchPPC64:
989 return patchProfInc_PPC(place_to_patch,
990 location_of_counter, True/*mode64*/);
991 case VexArchMIPS32:
992 return patchProfInc_MIPS(place_to_patch,
993 location_of_counter, False/*!mode64*/);
994 default:
995 vassert(0);
996 }
997 vassert(patchProfInc);
998 VexInvalRange vir
999 = patchProfInc(place_to_patch, location_of_counter);
1000 return vir;
1001 }
1002
1003
1004 /* --------- Emulation warnings. --------- */
1005
LibVEX_EmWarn_string(VexEmWarn ew)1006 HChar* LibVEX_EmWarn_string ( VexEmWarn ew )
1007 {
1008 switch (ew) {
1009 case EmWarn_NONE:
1010 return "none";
1011 case EmWarn_X86_x87exns:
1012 return "Unmasking x87 FP exceptions";
1013 case EmWarn_X86_x87precision:
1014 return "Selection of non-80-bit x87 FP precision";
1015 case EmWarn_X86_sseExns:
1016 return "Unmasking SSE FP exceptions";
1017 case EmWarn_X86_fz:
1018 return "Setting %mxcsr.fz (SSE flush-underflows-to-zero mode)";
1019 case EmWarn_X86_daz:
1020 return "Setting %mxcsr.daz (SSE treat-denormals-as-zero mode)";
1021 case EmWarn_X86_acFlag:
1022 return "Setting %eflags.ac (setting noted but ignored)";
1023 case EmWarn_PPCexns:
1024 return "Unmasking PPC32/64 FP exceptions";
1025 case EmWarn_PPC64_redir_overflow:
1026 return "PPC64 function redirection stack overflow";
1027 case EmWarn_PPC64_redir_underflow:
1028 return "PPC64 function redirection stack underflow";
1029 default:
1030 vpanic("LibVEX_EmWarn_string: unknown warning");
1031 }
1032 }
1033
1034 /* ------------------ Arch/HwCaps stuff. ------------------ */
1035
LibVEX_ppVexArch(VexArch arch)1036 const HChar* LibVEX_ppVexArch ( VexArch arch )
1037 {
1038 switch (arch) {
1039 case VexArch_INVALID: return "INVALID";
1040 case VexArchX86: return "X86";
1041 case VexArchAMD64: return "AMD64";
1042 case VexArchARM: return "ARM";
1043 case VexArchPPC32: return "PPC32";
1044 case VexArchPPC64: return "PPC64";
1045 case VexArchS390X: return "S390X";
1046 case VexArchMIPS32: return "MIPS32";
1047 default: return "VexArch???";
1048 }
1049 }
1050
LibVEX_ppVexHwCaps(VexArch arch,UInt hwcaps)1051 const HChar* LibVEX_ppVexHwCaps ( VexArch arch, UInt hwcaps )
1052 {
1053 HChar* str = show_hwcaps(arch,hwcaps);
1054 return str ? str : "INVALID";
1055 }
1056
1057
1058 /* Write default settings info *vai. */
LibVEX_default_VexArchInfo(VexArchInfo * vai)1059 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai )
1060 {
1061 vai->hwcaps = 0;
1062 vai->ppc_cache_line_szB = 0;
1063 vai->ppc_dcbz_szB = 0;
1064 vai->ppc_dcbzl_szB = 0;
1065
1066 }
1067
1068 /* Write default settings info *vbi. */
LibVEX_default_VexAbiInfo(VexAbiInfo * vbi)1069 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi )
1070 {
1071 vbi->guest_stack_redzone_size = 0;
1072 vbi->guest_amd64_assume_fs_is_zero = False;
1073 vbi->guest_amd64_assume_gs_is_0x60 = False;
1074 vbi->guest_ppc_zap_RZ_at_blr = False;
1075 vbi->guest_ppc_zap_RZ_at_bl = NULL;
1076 vbi->guest_ppc_sc_continues_at_LR = False;
1077 vbi->host_ppc_calls_use_fndescrs = False;
1078 vbi->host_ppc32_regalign_int64_args = False;
1079 }
1080
1081
1082 /* Return a string showing the hwcaps in a nice way. The string will
1083 be NULL for invalid combinations of flags, so these functions also
1084 serve as a way to validate hwcaps values. */
1085
show_hwcaps_x86(UInt hwcaps)1086 static HChar* show_hwcaps_x86 ( UInt hwcaps )
1087 {
1088 /* Monotonic, SSE3 > SSE2 > SSE1 > baseline. */
1089 switch (hwcaps) {
1090 case 0:
1091 return "x86-sse0";
1092 case VEX_HWCAPS_X86_SSE1:
1093 return "x86-sse1";
1094 case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2:
1095 return "x86-sse1-sse2";
1096 case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1097 | VEX_HWCAPS_X86_LZCNT:
1098 return "x86-sse1-sse2-lzcnt";
1099 case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1100 | VEX_HWCAPS_X86_SSE3:
1101 return "x86-sse1-sse2-sse3";
1102 case VEX_HWCAPS_X86_SSE1 | VEX_HWCAPS_X86_SSE2
1103 | VEX_HWCAPS_X86_SSE3 | VEX_HWCAPS_X86_LZCNT:
1104 return "x86-sse1-sse2-sse3-lzcnt";
1105 default:
1106 return NULL;
1107 }
1108 }
1109
show_hwcaps_amd64(UInt hwcaps)1110 static HChar* show_hwcaps_amd64 ( UInt hwcaps )
1111 {
1112 /* SSE3 and CX16 are orthogonal and > baseline, although we really
1113 don't expect to come across anything which can do SSE3 but can't
1114 do CX16. Still, we can handle that case. LZCNT is similarly
1115 orthogonal. AVX is technically orthogonal, but just add the
1116 cases we actually come across. (This scheme for printing is
1117 very stupid. We should add strings independently based on
1118 feature bits, but then it would be hard to return a string that
1119 didn't need deallocating by the caller.) */
1120 /* FIXME: show_hwcaps_s390x is a much better way to do this. */
1121 switch (hwcaps) {
1122 case 0:
1123 return "amd64-sse2";
1124 case VEX_HWCAPS_AMD64_SSE3:
1125 return "amd64-sse3";
1126 case VEX_HWCAPS_AMD64_CX16:
1127 return "amd64-sse2-cx16";
1128 case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16:
1129 return "amd64-sse3-cx16";
1130 case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_LZCNT:
1131 return "amd64-sse3-lzcnt";
1132 case VEX_HWCAPS_AMD64_CX16 | VEX_HWCAPS_AMD64_LZCNT:
1133 return "amd64-sse2-cx16-lzcnt";
1134 case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
1135 | VEX_HWCAPS_AMD64_LZCNT:
1136 return "amd64-sse3-cx16-lzcnt";
1137 case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
1138 | VEX_HWCAPS_AMD64_AVX:
1139 return "amd64-sse3-cx16-avx";
1140 case VEX_HWCAPS_AMD64_SSE3 | VEX_HWCAPS_AMD64_CX16
1141 | VEX_HWCAPS_AMD64_LZCNT | VEX_HWCAPS_AMD64_AVX:
1142 return "amd64-sse3-cx16-lzcnt-avx";
1143 default:
1144 return NULL;
1145 }
1146 }
1147
show_hwcaps_ppc32(UInt hwcaps)1148 static HChar* show_hwcaps_ppc32 ( UInt hwcaps )
1149 {
1150 /* Monotonic with complications. Basically V > F > baseline,
1151 but once you have F then you can have FX or GX too. */
1152 const UInt F = VEX_HWCAPS_PPC32_F;
1153 const UInt V = VEX_HWCAPS_PPC32_V;
1154 const UInt FX = VEX_HWCAPS_PPC32_FX;
1155 const UInt GX = VEX_HWCAPS_PPC32_GX;
1156 const UInt VX = VEX_HWCAPS_PPC32_VX;
1157 const UInt DFP = VEX_HWCAPS_PPC32_DFP;
1158 UInt c = hwcaps;
1159 if (c == 0) return "ppc32-int";
1160 if (c == F) return "ppc32-int-flt";
1161 if (c == (F|FX)) return "ppc32-int-flt-FX";
1162 if (c == (F|GX)) return "ppc32-int-flt-GX";
1163 if (c == (F|FX|GX)) return "ppc32-int-flt-FX-GX";
1164 if (c == (F|V)) return "ppc32-int-flt-vmx";
1165 if (c == (F|V|FX)) return "ppc32-int-flt-vmx-FX";
1166 if (c == (F|V|GX)) return "ppc32-int-flt-vmx-GX";
1167 if (c == (F|V|FX|GX)) return "ppc32-int-flt-vmx-FX-GX";
1168 if (c == (F|V|FX|GX|DFP)) return "ppc32-int-flt-vmx-FX-GX-DFP";
1169 if (c == (F|V|FX|GX|VX|DFP)) return "ppc32-int-flt-vmx-FX-GX-VX-DFP";
1170 return NULL;
1171 }
1172
show_hwcaps_ppc64(UInt hwcaps)1173 static HChar* show_hwcaps_ppc64 ( UInt hwcaps )
1174 {
1175 /* Monotonic with complications. Basically V > baseline(==F),
1176 but once you have F then you can have FX or GX too. */
1177 const UInt V = VEX_HWCAPS_PPC64_V;
1178 const UInt FX = VEX_HWCAPS_PPC64_FX;
1179 const UInt GX = VEX_HWCAPS_PPC64_GX;
1180 const UInt VX = VEX_HWCAPS_PPC64_VX;
1181 const UInt DFP = VEX_HWCAPS_PPC64_DFP;
1182 UInt c = hwcaps;
1183 if (c == 0) return "ppc64-int-flt";
1184 if (c == FX) return "ppc64-int-flt-FX";
1185 if (c == GX) return "ppc64-int-flt-GX";
1186 if (c == (FX|GX)) return "ppc64-int-flt-FX-GX";
1187 if (c == V) return "ppc64-int-flt-vmx";
1188 if (c == (V|FX)) return "ppc64-int-flt-vmx-FX";
1189 if (c == (V|GX)) return "ppc64-int-flt-vmx-GX";
1190 if (c == (V|FX|GX)) return "ppc64-int-flt-vmx-FX-GX";
1191 if (c == (V|FX|GX|DFP)) return "ppc64-int-flt-vmx-FX-GX-DFP";
1192 if (c == (V|FX|GX|VX|DFP)) return "ppc64-int-flt-vmx-FX-GX-VX-DFP";
1193 return NULL;
1194 }
1195
show_hwcaps_arm(UInt hwcaps)1196 static HChar* show_hwcaps_arm ( UInt hwcaps )
1197 {
1198 Bool N = ((hwcaps & VEX_HWCAPS_ARM_NEON) != 0);
1199 Bool vfp = ((hwcaps & (VEX_HWCAPS_ARM_VFP |
1200 VEX_HWCAPS_ARM_VFP2 | VEX_HWCAPS_ARM_VFP3)) != 0);
1201 switch (VEX_ARM_ARCHLEVEL(hwcaps)) {
1202 case 5:
1203 if (N)
1204 return NULL;
1205 if (vfp)
1206 return "ARMv5-vfp";
1207 else
1208 return "ARMv5";
1209 return NULL;
1210 case 6:
1211 if (N)
1212 return NULL;
1213 if (vfp)
1214 return "ARMv6-vfp";
1215 else
1216 return "ARMv6";
1217 return NULL;
1218 case 7:
1219 if (vfp) {
1220 if (N)
1221 return "ARMv7-vfp-neon";
1222 else
1223 return "ARMv7-vfp";
1224 } else {
1225 if (N)
1226 return "ARMv7-neon";
1227 else
1228 return "ARMv7";
1229 }
1230 default:
1231 return NULL;
1232 }
1233 return NULL;
1234 }
1235
show_hwcaps_s390x(UInt hwcaps)1236 static HChar* show_hwcaps_s390x ( UInt hwcaps )
1237 {
1238 static const HChar prefix[] = "s390x";
1239 static const HChar facilities[][6] = {
1240 { "ldisp" },
1241 { "eimm" },
1242 { "gie" },
1243 { "dfp" },
1244 { "fgx" },
1245 { "stfle" },
1246 { "etf2" },
1247 { "etf3" },
1248 };
1249 static HChar buf[sizeof facilities + sizeof prefix + 1];
1250 static HChar *p;
1251
1252 if (buf[0] != '\0') return buf; /* already constructed */
1253
1254 hwcaps = VEX_HWCAPS_S390X(hwcaps);
1255
1256 p = buf + vex_sprintf(buf, "%s", prefix);
1257 if (hwcaps & VEX_HWCAPS_S390X_LDISP)
1258 p = p + vex_sprintf(p, "-%s", facilities[0]);
1259 if (hwcaps & VEX_HWCAPS_S390X_EIMM)
1260 p = p + vex_sprintf(p, "-%s", facilities[1]);
1261 if (hwcaps & VEX_HWCAPS_S390X_GIE)
1262 p = p + vex_sprintf(p, "-%s", facilities[2]);
1263 if (hwcaps & VEX_HWCAPS_S390X_DFP)
1264 p = p + vex_sprintf(p, "-%s", facilities[3]);
1265 if (hwcaps & VEX_HWCAPS_S390X_FGX)
1266 p = p + vex_sprintf(p, "-%s", facilities[4]);
1267 if (hwcaps & VEX_HWCAPS_S390X_STFLE)
1268 p = p + vex_sprintf(p, "-%s", facilities[5]);
1269 if (hwcaps & VEX_HWCAPS_S390X_ETF2)
1270 p = p + vex_sprintf(p, "-%s", facilities[6]);
1271 if (hwcaps & VEX_HWCAPS_S390X_ETF3)
1272 p = p + vex_sprintf(p, "-%s", facilities[7]);
1273
1274 /* If there are no facilities, add "zarch" */
1275 if (hwcaps == 0)
1276 vex_sprintf(p, "-%s", "zarch");
1277
1278 return buf;
1279 }
1280
show_hwcaps_mips32(UInt hwcaps)1281 static HChar* show_hwcaps_mips32 ( UInt hwcaps )
1282 {
1283 if (hwcaps == 0x00010000) return "MIPS-baseline";
1284 if (hwcaps == 0x00020000) return "Broadcom-baseline";
1285 return NULL;
1286 }
1287
1288 /* ---- */
show_hwcaps(VexArch arch,UInt hwcaps)1289 static HChar* show_hwcaps ( VexArch arch, UInt hwcaps )
1290 {
1291 switch (arch) {
1292 case VexArchX86: return show_hwcaps_x86(hwcaps);
1293 case VexArchAMD64: return show_hwcaps_amd64(hwcaps);
1294 case VexArchPPC32: return show_hwcaps_ppc32(hwcaps);
1295 case VexArchPPC64: return show_hwcaps_ppc64(hwcaps);
1296 case VexArchARM: return show_hwcaps_arm(hwcaps);
1297 case VexArchS390X: return show_hwcaps_s390x(hwcaps);
1298 case VexArchMIPS32: return show_hwcaps_mips32(hwcaps);
1299 default: return NULL;
1300 }
1301 }
1302
are_valid_hwcaps(VexArch arch,UInt hwcaps)1303 static Bool are_valid_hwcaps ( VexArch arch, UInt hwcaps )
1304 {
1305 return show_hwcaps(arch,hwcaps) != NULL;
1306 }
1307
1308
1309 /*---------------------------------------------------------------*/
1310 /*--- end main_main.c ---*/
1311 /*---------------------------------------------------------------*/
1312