• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                          libvex.h ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2011 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 #ifndef __LIBVEX_H
37 #define __LIBVEX_H
38 
39 
40 #include "libvex_basictypes.h"
41 #include "libvex_ir.h"
42 
43 
44 /*---------------------------------------------------------------*/
45 /*--- This file defines the top-level interface to LibVEX.    ---*/
46 /*---------------------------------------------------------------*/
47 
48 /*-------------------------------------------------------*/
49 /*--- Architectures, variants, and other arch info    ---*/
50 /*-------------------------------------------------------*/
51 
52 typedef
53    enum {
54       VexArch_INVALID,
55       VexArchX86,
56       VexArchAMD64,
57       VexArchARM,
58       VexArchPPC32,
59       VexArchPPC64,
60       VexArchS390X
61    }
62    VexArch;
63 
64 
65 /* For a given architecture, these specify extra capabilities beyond
66    the minimum supported (baseline) capabilities.  They may be OR'd
67    together, although some combinations don't make sense.  (eg, SSE2
68    but not SSE1).  LibVEX_Translate will check for nonsensical
69    combinations. */
70 
71 /* x86: baseline capability is Pentium-1 (FPU, MMX, but no SSE), with
72    cmpxchg8b. */
73 #define VEX_HWCAPS_X86_SSE1    (1<<1)  /* SSE1 support (Pentium III) */
74 #define VEX_HWCAPS_X86_SSE2    (1<<2)  /* SSE2 support (Pentium 4) */
75 #define VEX_HWCAPS_X86_SSE3    (1<<3)  /* SSE3 support (>= Prescott) */
76 #define VEX_HWCAPS_X86_LZCNT   (1<<4)  /* SSE4a LZCNT insn */
77 
78 /* amd64: baseline capability is SSE2, with cmpxchg8b but not
79    cmpxchg16b. */
80 #define VEX_HWCAPS_AMD64_SSE3  (1<<5)  /* SSE3 support */
81 #define VEX_HWCAPS_AMD64_CX16  (1<<6)  /* cmpxchg16b support */
82 #define VEX_HWCAPS_AMD64_LZCNT (1<<7)  /* SSE4a LZCNT insn */
83 
84 /* ppc32: baseline capability is integer only */
85 #define VEX_HWCAPS_PPC32_F     (1<<8)  /* basic (non-optional) FP */
86 #define VEX_HWCAPS_PPC32_V     (1<<9)  /* Altivec (VMX) */
87 #define VEX_HWCAPS_PPC32_FX    (1<<10) /* FP extns (fsqrt, fsqrts) */
88 #define VEX_HWCAPS_PPC32_GX    (1<<11) /* Graphics extns
89                                           (fres,frsqrte,fsel,stfiwx) */
90 #define VEX_HWCAPS_PPC32_VX    (1<<12) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher  */
91 
92 /* ppc64: baseline capability is integer and basic FP insns */
93 #define VEX_HWCAPS_PPC64_V     (1<<13) /* Altivec (VMX) */
94 #define VEX_HWCAPS_PPC64_FX    (1<<14) /* FP extns (fsqrt, fsqrts) */
95 #define VEX_HWCAPS_PPC64_GX    (1<<15) /* Graphics extns
96                                           (fres,frsqrte,fsel,stfiwx) */
97 #define VEX_HWCAPS_PPC64_VX    (1<<16) /* Vector-scalar floating-point (VSX); implies ISA 2.06 or higher  */
98 
99 /* s390x: Hardware capability encoding
100 
101    Bits    Information
102    [26:31] Machine model
103    [25]    Long displacement facility
104    [24]    Extended-immediate facility
105    [23]    General-instruction-extension facility
106    [22]    Decimal floating point facility
107    [21]    FPR-GR transfer facility
108    [0:20]  Currently unused; reserved for future use
109 */
110 
111 /* Model numbers must be assigned in chronological order.
112    They are used as array index. */
113 #define VEX_S390X_MODEL_Z900     0
114 #define VEX_S390X_MODEL_Z800     1
115 #define VEX_S390X_MODEL_Z990     2
116 #define VEX_S390X_MODEL_Z890     3
117 #define VEX_S390X_MODEL_Z9_EC    4
118 #define VEX_S390X_MODEL_Z9_BC    5
119 #define VEX_S390X_MODEL_Z10_EC   6
120 #define VEX_S390X_MODEL_Z10_BC   7
121 #define VEX_S390X_MODEL_Z196     8
122 #define VEX_S390X_MODEL_Z114     9
123 #define VEX_S390X_MODEL_INVALID  10
124 #define VEX_S390X_MODEL_MASK     0x3F
125 
126 #define VEX_HWCAPS_S390X_LDISP (1<<6)   /* Long-displacement facility */
127 #define VEX_HWCAPS_S390X_EIMM  (1<<7)   /* Extended-immediate facility */
128 #define VEX_HWCAPS_S390X_GIE   (1<<8)   /* General-instruction-extension facility */
129 #define VEX_HWCAPS_S390X_DFP   (1<<9)   /* Decimal floating point facility */
130 #define VEX_HWCAPS_S390X_FGX   (1<<10)  /* FPR-GR transfer facility */
131 
132 /* Special value representing all available s390x hwcaps */
133 #define VEX_HWCAPS_S390X_ALL   (VEX_HWCAPS_S390X_LDISP | \
134                                 VEX_HWCAPS_S390X_EIMM  | \
135                                 VEX_HWCAPS_S390X_GIE   | \
136                                 VEX_HWCAPS_S390X_DFP   | \
137                                 VEX_HWCAPS_S390X_FGX)
138 
139 #define VEX_HWCAPS_S390X(x)  ((x) & ~VEX_S390X_MODEL_MASK)
140 #define VEX_S390X_MODEL(x)   ((x) &  VEX_S390X_MODEL_MASK)
141 
142 /* arm: baseline capability is ARMv4 */
143 /* Bits 5:0 - architecture level (e.g. 5 for v5, 6 for v6 etc) */
144 #define VEX_HWCAPS_ARM_VFP    (1<<6)  /* VFP extension */
145 #define VEX_HWCAPS_ARM_VFP2   (1<<7)  /* VFPv2 */
146 #define VEX_HWCAPS_ARM_VFP3   (1<<8)  /* VFPv3 */
147 /* Bits 15:10 reserved for (possible) future VFP revisions */
148 #define VEX_HWCAPS_ARM_NEON   (1<<16) /* Advanced SIMD also known as NEON */
149 
150 /* Get an ARM architecure level from HWCAPS */
151 #define VEX_ARM_ARCHLEVEL(x) ((x) & 0x3f)
152 
153 /* These return statically allocated strings. */
154 
155 extern const HChar* LibVEX_ppVexArch    ( VexArch );
156 extern const HChar* LibVEX_ppVexHwCaps  ( VexArch, UInt );
157 
158 
159 /* This struct is a bit of a hack, but is needed to carry misc
160    important bits of info about an arch.  Fields which are meaningless
161    or ignored for the platform in question should be set to zero. */
162 
163 typedef
164    struct {
165       /* This is the only mandatory field. */
166       UInt hwcaps;
167       /* PPC32/PPC64 only: size of cache line */
168       Int ppc_cache_line_szB;
169       /* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions
170        * (bug#135264) */
171       UInt ppc_dcbz_szB;
172       UInt ppc_dcbzl_szB; /* 0 means unsupported (SIGILL) */
173    }
174    VexArchInfo;
175 
176 /* Write default settings info *vai. */
177 extern
178 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai );
179 
180 
181 /* This struct carries guest and host ABI variant information that may
182    be needed.  Fields which are meaningless or ignored for the
183    platform in question should be set to zero.
184 
185    Settings which are believed to be correct are:
186 
187    guest_stack_redzone_size
188       guest is ppc32-linux                ==> 0
189       guest is ppc64-linux                ==> 288
190       guest is ppc32-aix5                 ==> 220
191       guest is ppc64-aix5                 ==> unknown
192       guest is amd64-linux                ==> 128
193       guest is other                      ==> inapplicable
194 
195    guest_amd64_assume_fs_is_zero
196       guest is amd64-linux                ==> True
197       guest is amd64-darwin               ==> False
198       guest is other                      ==> inapplicable
199 
200    guest_amd64_assume_gs_is_0x60
201       guest is amd64-darwin               ==> True
202       guest is amd64-linux                ==> False
203       guest is other                      ==> inapplicable
204 
205    guest_ppc_zap_RZ_at_blr
206       guest is ppc64-linux                ==> True
207       guest is ppc32-linux                ==> False
208       guest is ppc64-aix5                 ==> unknown
209       guest is ppc32-aix5                 ==> False
210       guest is other                      ==> inapplicable
211 
212    guest_ppc_zap_RZ_at_bl
213       guest is ppc64-linux                ==> const True
214       guest is ppc32-linux                ==> const False
215       guest is ppc64-aix5                 ==> unknown
216       guest is ppc32-aix5                 ==> True except for calls to
217                                               millicode, $SAVEFn, $RESTFn
218       guest is other                      ==> inapplicable
219 
220    guest_ppc_sc_continues_at_LR:
221       guest is ppc32-aix5  or ppc64-aix5  ==> True
222       guest is ppc32-linux or ppc64-linux ==> False
223       guest is other                      ==> inapplicable
224 
225    host_ppc_calls_use_fndescrs:
226       host is ppc32-linux                 ==> False
227       host is ppc64-linux                 ==> True
228       host is ppc32-aix5 or ppc64-aix5    ==> True
229       host is other                       ==> inapplicable
230 
231    host_ppc32_regalign_int64_args:
232       host is ppc32-linux                 ==> True
233       host is ppc32-aix5                  ==> False
234       host is other                       ==> inapplicable
235 */
236 
237 typedef
238    struct {
239       /* PPC and AMD64 GUESTS only: how many bytes below the
240          stack pointer are validly addressible? */
241       Int guest_stack_redzone_size;
242 
243       /* AMD64 GUESTS only: should we translate %fs-prefixed
244          instructions using the assumption that %fs always contains
245          zero? */
246       Bool guest_amd64_assume_fs_is_zero;
247 
248       /* AMD64 GUESTS only: should we translate %gs-prefixed
249          instructions using the assumption that %gs always contains
250          0x60? */
251       Bool guest_amd64_assume_gs_is_0x60;
252 
253       /* PPC GUESTS only: should we zap the stack red zone at a 'blr'
254          (function return) ? */
255       Bool guest_ppc_zap_RZ_at_blr;
256 
257       /* PPC GUESTS only: should we zap the stack red zone at a 'bl'
258          (function call) ?  Is supplied with the guest address of the
259          target of the call since that may be significant.  If NULL,
260          is assumed equivalent to a fn which always returns False. */
261       Bool (*guest_ppc_zap_RZ_at_bl)(Addr64);
262 
263       /* PPC32/PPC64 GUESTS only: where does the kernel resume after
264          'sc'?  False => Linux style, at the next insn.  True => AIX
265          style, at the address stated in the link register. */
266       Bool guest_ppc_sc_continues_at_LR;
267 
268       /* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a
269          function descriptor on the host, or to the function code
270          itself?  True => descriptor, False => code. */
271       Bool host_ppc_calls_use_fndescrs;
272 
273       /* PPC32 HOSTS only: when generating code to pass a 64-bit value
274          (actual parameter) in a pair of regs, should we skip an arg
275          reg if it is even-numbered?  True => yes, False => no. */
276       Bool host_ppc32_regalign_int64_args;
277    }
278    VexAbiInfo;
279 
280 /* Write default settings info *vbi. */
281 extern
282 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi );
283 
284 
285 /*-------------------------------------------------------*/
286 /*--- Control of Vex's optimiser (iropt).             ---*/
287 /*-------------------------------------------------------*/
288 
289 /* Control of Vex's optimiser. */
290 
291 typedef
292    struct {
293       /* Controls verbosity of iropt.  0 = no output. */
294       Int iropt_verbosity;
295       /* Control aggressiveness of iropt.  0 = no opt, 1 = simple
296          opts, 2 (default) = max optimisation. */
297       Int iropt_level;
298       /* Ensure all integer registers are up to date at potential
299          memory exception points?  True(default)=yes, False=no, only
300          the guest's stack pointer. */
301       Bool iropt_precise_memory_exns;
302       /* How aggressive should iropt be in unrolling loops?  Higher
303          numbers make it more enthusiastic about loop unrolling.
304          Default=120.  A setting of zero disables unrolling.  */
305       Int iropt_unroll_thresh;
306       /* What's the maximum basic block length the front end(s) allow?
307          BBs longer than this are split up.  Default=50 (guest
308          insns). */
309       Int guest_max_insns;
310       /* How aggressive should front ends be in following
311          unconditional branches to known destinations?  Default=10,
312          meaning that if a block contains less than 10 guest insns so
313          far, the front end(s) will attempt to chase into its
314          successor. A setting of zero disables chasing.  */
315       Int guest_chase_thresh;
316       /* EXPERIMENTAL: chase across conditional branches?  Not all
317          front ends honour this.  Default: NO. */
318       Bool guest_chase_cond;
319    }
320    VexControl;
321 
322 
323 /* Write the default settings into *vcon. */
324 
325 extern
326 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon );
327 
328 
329 /*-------------------------------------------------------*/
330 /*--- Storage management control                      ---*/
331 /*-------------------------------------------------------*/
332 
333 /* Allocate in Vex's temporary allocation area.  Be careful with this.
334    You can only call it inside an instrumentation or optimisation
335    callback that you have previously specified in a call to
336    LibVEX_Translate.  The storage allocated will only stay alive until
337    translation of the current basic block is complete.
338  */
339 extern HChar* private_LibVEX_alloc_first;
340 extern HChar* private_LibVEX_alloc_curr;
341 extern HChar* private_LibVEX_alloc_last;
342 extern void   private_LibVEX_alloc_OOM(void) __attribute__((noreturn));
343 
LibVEX_Alloc(Int nbytes)344 static inline void* LibVEX_Alloc ( Int nbytes )
345 {
346 #if 0
347   /* Nasty debugging hack, do not use. */
348   return malloc(nbytes);
349 #else
350    HChar* curr;
351    HChar* next;
352    Int    ALIGN;
353    ALIGN  = sizeof(void*)-1;
354    nbytes = (nbytes + ALIGN) & ~ALIGN;
355    curr   = private_LibVEX_alloc_curr;
356    next   = curr + nbytes;
357    if (next >= private_LibVEX_alloc_last)
358       private_LibVEX_alloc_OOM();
359    private_LibVEX_alloc_curr = next;
360    return curr;
361 #endif
362 }
363 
364 /* Show Vex allocation statistics. */
365 extern void LibVEX_ShowAllocStats ( void );
366 
367 
368 /*-------------------------------------------------------*/
369 /*--- Describing guest state layout                   ---*/
370 /*-------------------------------------------------------*/
371 
372 /* Describe the guest state enough that the instrumentation
373    functions can work. */
374 
375 /* The max number of guest state chunks which we can describe as
376    always defined (for the benefit of Memcheck). */
377 #define VEXGLO_N_ALWAYSDEFD  24
378 
379 typedef
380    struct {
381       /* Total size of the guest state, in bytes.  Must be
382          8-aligned. */
383       Int total_sizeB;
384       /* Whereabouts is the stack pointer? */
385       Int offset_SP;
386       Int sizeof_SP; /* 4 or 8 */
387       /* Whereabouts is the frame pointer? */
388       Int offset_FP;
389       Int sizeof_FP; /* 4 or 8 */
390       /* Whereabouts is the instruction pointer? */
391       Int offset_IP;
392       Int sizeof_IP; /* 4 or 8 */
393       /* Describe parts of the guest state regarded as 'always
394          defined'. */
395       Int n_alwaysDefd;
396       struct {
397          Int offset;
398          Int size;
399       } alwaysDefd[VEXGLO_N_ALWAYSDEFD];
400    }
401    VexGuestLayout;
402 
403 /* A note about guest state layout.
404 
405    LibVEX defines the layout for the guest state, in the file
406    pub/libvex_guest_<arch>.h.  The struct will have an 16-aligned
407    size.  Each translated bb is assumed to be entered with a specified
408    register pointing at such a struct.  Beyond that is two copies of
409    the shadow state area with the same size as the struct.  Beyond
410    that is a spill area that LibVEX may spill into.  It must have size
411    LibVEX_N_SPILL_BYTES, and this must be a 16-aligned number.
412 
413    On entry, the baseblock pointer register must be 16-aligned.
414 
415    There must be no holes in between the primary guest state, its two
416    copies, and the spill area.  In short, all 4 areas must have a
417    16-aligned size and be 16-aligned, and placed back-to-back.
418 */
419 
420 #define LibVEX_N_SPILL_BYTES 4096
421 
422 
423 /*-------------------------------------------------------*/
424 /*--- Initialisation of the library                   ---*/
425 /*-------------------------------------------------------*/
426 
427 /* Initialise the library.  You must call this first. */
428 
429 extern void LibVEX_Init (
430 
431    /* failure exit function */
432 #  if __cplusplus == 1 && __GNUC__ && __GNUC__ <= 3
433    /* g++ 3.x doesn't understand attributes on function parameters.
434       See #265762. */
435 #  else
436    __attribute__ ((noreturn))
437 #  endif
438    void (*failure_exit) ( void ),
439 
440    /* logging output function */
441    void (*log_bytes) ( HChar*, Int nbytes ),
442 
443    /* debug paranoia level */
444    Int debuglevel,
445 
446    /* Are we supporting valgrind checking? */
447    Bool valgrind_support,
448 
449    /* Control ... */
450    /*READONLY*/VexControl* vcon
451 );
452 
453 
454 /*-------------------------------------------------------*/
455 /*--- Make a translation                              ---*/
456 /*-------------------------------------------------------*/
457 
458 /* Describes the outcome of a translation attempt. */
459 typedef
460    struct {
461       /* overall status */
462       enum { VexTransOK,
463              VexTransAccessFail, VexTransOutputFull } status;
464       /* The number of extents that have a self-check (0 to 3) */
465       UInt n_sc_extents;
466    }
467    VexTranslateResult;
468 
469 
470 /* Describes precisely the pieces of guest code that a translation
471    covers.  Now that Vex can chase across BB boundaries, the old
472    scheme of describing a chunk of guest code merely by its start
473    address and length is inadequate.
474 
475    Hopefully this struct is only 32 bytes long.  Space is important as
476    clients will have to store one of these for each translation made.
477 */
478 typedef
479    struct {
480       Addr64 base[3];
481       UShort len[3];
482       UShort n_used;
483    }
484    VexGuestExtents;
485 
486 
487 /* A structure to carry arguments for LibVEX_Translate.  There are so
488    many of them, it seems better to have a structure. */
489 typedef
490    struct {
491       /* IN: The instruction sets we are translating from and to.  And
492          guest/host misc info. */
493       VexArch      arch_guest;
494       VexArchInfo  archinfo_guest;
495       VexArch      arch_host;
496       VexArchInfo  archinfo_host;
497       VexAbiInfo   abiinfo_both;
498 
499       /* IN: an opaque value which is passed as the first arg to all
500          callback functions supplied in this struct.  Vex has no idea
501          what's at the other end of this pointer. */
502       void*   callback_opaque;
503 
504       /* IN: the block to translate, and its guest address. */
505       /* where are the actual bytes in the host's address space? */
506       UChar*  guest_bytes;
507       /* where do the bytes really come from in the guest's aspace?
508          This is the post-redirection guest address.  Not that Vex
509          understands anything about redirection; that is all done on
510          the Valgrind side. */
511       Addr64  guest_bytes_addr;
512 
513       /* Is it OK to chase into this guest address?  May not be
514 	 NULL. */
515       Bool    (*chase_into_ok) ( /*callback_opaque*/void*, Addr64 );
516 
517       /* OUT: which bits of guest code actually got translated */
518       VexGuestExtents* guest_extents;
519 
520       /* IN: a place to put the resulting code, and its size */
521       UChar*  host_bytes;
522       Int     host_bytes_size;
523       /* OUT: how much of the output area is used. */
524       Int*    host_bytes_used;
525 
526       /* IN: optionally, two instrumentation functions.  May be
527 	 NULL. */
528       IRSB*   (*instrument1) ( /*callback_opaque*/void*,
529                                IRSB*,
530                                VexGuestLayout*,
531                                VexGuestExtents*,
532                                IRType gWordTy, IRType hWordTy );
533       IRSB*   (*instrument2) ( /*callback_opaque*/void*,
534                                IRSB*,
535                                VexGuestLayout*,
536                                VexGuestExtents*,
537                                IRType gWordTy, IRType hWordTy );
538 
539       IRSB* (*finaltidy) ( IRSB* );
540 
541       /* IN: a callback used to ask the caller which of the extents,
542          if any, a self check is required for.  Must not be NULL.
543          The returned value is a bitmask with a 1 in position i indicating
544          that the i'th extent needs a check.  Since there can be at most
545          3 extents, the returned values must be between 0 and 7. */
546       UInt (*needs_self_check)( /*callback_opaque*/void*,
547                                 VexGuestExtents* );
548 
549       /* IN: optionally, a callback which allows the caller to add its
550          own IR preamble following the self-check and any other
551          VEX-generated preamble, if any.  May be NULL.  If non-NULL,
552          the IRSB under construction is handed to this function, which
553          presumably adds IR statements to it.  The callback may
554          optionally complete the block and direct bb_to_IR not to
555          disassemble any instructions into it; this is indicated by
556          the callback returning True.
557       */
558       Bool    (*preamble_function)(/*callback_opaque*/void*, IRSB*);
559 
560       /* IN: debug: trace vex activity at various points */
561       Int     traceflags;
562 
563       /* IN: address of the dispatcher entry points.  Describes the
564          places where generated code should jump to at the end of each
565          bb.
566 
567          At the end of each translation, the next guest address is
568          placed in the host's standard return register (x86: %eax,
569          amd64: %rax, ppc32: %r3, ppc64: %r3).  Optionally, the guest
570          state pointer register (on host x86: %ebp; amd64: %rbp;
571          ppc32/64: r31) may be set to a VEX_TRC_ value to indicate any
572          special action required before the next block is run.
573 
574          Control is then passed back to the dispatcher (beyond Vex's
575          control; caller supplies this) in the following way:
576 
577          - On host archs which lack a link register (x86, amd64), by a
578            jump to the host address specified in
579            'dispatcher_assisted', if the guest state pointer has been
580            changed so as to request some action before the next block
581            is run, or 'dispatcher_unassisted' (the fast path), in
582            which it is assumed that the guest state pointer is
583            unchanged and we wish to continue directly with the next
584            translation.  Both of these must be non-NULL.
585 
586          - On host archs which have a link register (ppc32, ppc64), by
587            a branch to the link register (which is guaranteed to be
588            unchanged from whatever it was at entry to the
589            translation).  'dispatch_assisted' and
590            'dispatch_unassisted' must be NULL.
591 
592          The aim is to get back and forth between translations and the
593          dispatcher without creating memory traffic to store return
594          addresses.
595       */
596       void* dispatch_unassisted;
597       void* dispatch_assisted;
598    }
599    VexTranslateArgs;
600 
601 
602 extern
603 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* );
604 
605 /* A subtlety re interaction between self-checking translations and
606    bb-chasing.  The supplied chase_into_ok function should say NO
607    (False) when presented with any address for which you might want to
608    make a self-checking translation.
609 
610    If it doesn't do that, you may end up with Vex chasing from BB #1
611    to BB #2 (fine); but if you wanted checking for #2 and not #1, that
612    would not be the result.  Therefore chase_into_ok should disallow
613    following into #2.  That will force the caller to eventually
614    request a new translation starting at #2, at which point Vex will
615    correctly observe the make-a-self-check flag.  */
616 
617 
618 /*-------------------------------------------------------*/
619 /*--- Show accumulated statistics                     ---*/
620 /*-------------------------------------------------------*/
621 
622 extern void LibVEX_ShowStats ( void );
623 
624 
625 /*-------------------------------------------------------*/
626 /*--- Notes                                           ---*/
627 /*-------------------------------------------------------*/
628 
629 /* Code generation conventions that need to be recorded somewhere.
630    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
631 
632    x86
633    ~~~
634    Generated code should be entered using a JMP instruction.  On
635    entry, %ebp should point to the guest state, and %esp should be a
636    valid stack pointer.  The generated code may change %eax, %ebx,
637    %ecx, %edx, %esi, %edi, all the FP registers and control state, and
638    all the XMM registers.
639 
640    On entry, the FPU control word should be set to 0x027F, and the SSE
641    control word (%mxcsr) should be set to 0x1F80.  On exit, they
642    should still have those values (after masking off the lowest 6 bits
643    of %mxcsr).  If they don't, there is a bug in VEX-generated code.
644 
645    Generated code returns to the scheduler using a JMP instruction, to
646    the address specified in the .dispatch field of VexTranslateArgs.
647    %eax (or %eax:%edx, if simulating a 64-bit target) will contain the
648    guest address of the next block to execute.  %ebp may be changed
649    to a VEX_TRC_ value, otherwise it should be as it was at entry.
650 
651    CRITICAL ISSUES in x86 code generation.  The only known critical
652    issue is that the host FPU and SSE state is not properly saved
653    across calls to helper functions.  If any helper references any
654    such state, it is likely (1) to misbehave itself, since the FP
655    stack tags will not be as expected, and (2) after returning to
656    generated code, the generated code is likely to go wrong.  This
657    really should be fixed.
658 
659    amd64
660    ~~~~~
661    Analogous to x86.
662 
663    ppc32
664    ~~~~~
665    On entry, guest state pointer is r31.  .dispatch must be NULL.
666    Control is returned with a branch to the link register.  Generated
667    code will not change lr.  At return, r3 holds the next guest addr
668    (or r3:r4 ?).  r31 may be may be changed to a VEX_TRC_ value,
669    otherwise it should be as it was at entry.
670 
671    ppc64
672    ~~~~~
673    Same as ppc32.
674 
675    ALL GUEST ARCHITECTURES
676    ~~~~~~~~~~~~~~~~~~~~~~~
677    The guest state must contain two pseudo-registers, guest_TISTART
678    and guest_TILEN.  These are used to pass the address of areas of
679    guest code, translations of which are to be invalidated, back to
680    the despatcher.  Both pseudo-regs must have size equal to the guest
681    word size.
682 
683    The architecture must a third pseudo-register, guest_NRADDR, also
684    guest-word-sized.  This is used to record the unredirected guest
685    address at the start of a translation whose start has been
686    redirected.  By reading this pseudo-register shortly afterwards,
687    the translation can find out what the corresponding no-redirection
688    address was.  Note, this is only set for wrap-style redirects, not
689    for replace-style ones.
690 */
691 #endif /* ndef __LIBVEX_H */
692 
693 /*---------------------------------------------------------------*/
694 /*---                                                libvex.h ---*/
695 /*---------------------------------------------------------------*/
696