• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                          libvex.h ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2010 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 #ifndef __LIBVEX_H
37 #define __LIBVEX_H
38 
39 
40 #include "libvex_basictypes.h"
41 #include "libvex_ir.h"
42 
43 
44 /*---------------------------------------------------------------*/
45 /*--- This file defines the top-level interface to LibVEX.    ---*/
46 /*---------------------------------------------------------------*/
47 
48 /*-------------------------------------------------------*/
49 /*--- Architectures, variants, and other arch info    ---*/
50 /*-------------------------------------------------------*/
51 
52 typedef
53    enum {
54       VexArch_INVALID,
55       VexArchX86,
56       VexArchAMD64,
57       VexArchARM,
58       VexArchPPC32,
59       VexArchPPC64
60    }
61    VexArch;
62 
63 
64 /* For a given architecture, these specify extra capabilities beyond
65    the minimum supported (baseline) capabilities.  They may be OR'd
66    together, although some combinations don't make sense.  (eg, SSE2
67    but not SSE1).  LibVEX_Translate will check for nonsensical
68    combinations. */
69 
70 /* x86: baseline capability is Pentium-1 (FPU, MMX, but no SSE), with
71    cmpxchg8b. */
72 #define VEX_HWCAPS_X86_SSE1    (1<<1)  /* SSE1 support (Pentium III) */
73 #define VEX_HWCAPS_X86_SSE2    (1<<2)  /* SSE2 support (Pentium 4) */
74 #define VEX_HWCAPS_X86_SSE3    (1<<3)  /* SSE3 support (>= Prescott) */
75 #define VEX_HWCAPS_X86_LZCNT   (1<<4)  /* SSE4a LZCNT insn */
76 
77 /* amd64: baseline capability is SSE2, with cmpxchg8b but not
78    cmpxchg16b. */
79 #define VEX_HWCAPS_AMD64_SSE3  (1<<5)  /* SSE3 support */
80 #define VEX_HWCAPS_AMD64_CX16  (1<<6)  /* cmpxchg16b support */
81 #define VEX_HWCAPS_AMD64_LZCNT (1<<7)  /* SSE4a LZCNT insn */
82 
83 /* ppc32: baseline capability is integer only */
84 #define VEX_HWCAPS_PPC32_F     (1<<8)  /* basic (non-optional) FP */
85 #define VEX_HWCAPS_PPC32_V     (1<<9)  /* Altivec (VMX) */
86 #define VEX_HWCAPS_PPC32_FX    (1<<10) /* FP extns (fsqrt, fsqrts) */
87 #define VEX_HWCAPS_PPC32_GX    (1<<11) /* Graphics extns
88                                           (fres,frsqrte,fsel,stfiwx) */
89 
90 /* ppc64: baseline capability is integer and basic FP insns */
91 #define VEX_HWCAPS_PPC64_V     (1<<12) /* Altivec (VMX) */
92 #define VEX_HWCAPS_PPC64_FX    (1<<13) /* FP extns (fsqrt, fsqrts) */
93 #define VEX_HWCAPS_PPC64_GX    (1<<14) /* Graphics extns
94                                           (fres,frsqrte,fsel,stfiwx) */
95 
96 /* arm: baseline capability is ARMv4 */
97 /* Bits 5:0 - architecture level (e.g. 5 for v5, 6 for v6 etc) */
98 #define VEX_HWCAPS_ARM_VFP    (1<<6)  /* VFP extension */
99 #define VEX_HWCAPS_ARM_VFP2   (1<<7)  /* VFPv2 */
100 #define VEX_HWCAPS_ARM_VFP3   (1<<8)  /* VFPv3 */
101 /* Bits 15:10 reserved for (possible) future VFP revisions */
102 #define VEX_HWCAPS_ARM_NEON   (1<<16) /* Advanced SIMD also known as NEON */
103 
104 /* Get an ARM architecure level from HWCAPS */
105 #define VEX_ARM_ARCHLEVEL(x) ((x) & 0x3f)
106 
107 /* These return statically allocated strings. */
108 
109 extern const HChar* LibVEX_ppVexArch    ( VexArch );
110 extern const HChar* LibVEX_ppVexHwCaps  ( VexArch, UInt );
111 
112 
113 /* This struct is a bit of a hack, but is needed to carry misc
114    important bits of info about an arch.  Fields which are meaningless
115    or ignored for the platform in question should be set to zero. */
116 
117 typedef
118    struct {
119       /* This is the only mandatory field. */
120       UInt hwcaps;
121       /* PPC32/PPC64 only: size of cache line */
122       Int ppc_cache_line_szB;
123       /* PPC32/PPC64 only: sizes zeroed by the dcbz/dcbzl instructions
124        * (bug#135264) */
125       UInt ppc_dcbz_szB;
126       UInt ppc_dcbzl_szB; /* 0 means unsupported (SIGILL) */
127    }
128    VexArchInfo;
129 
130 /* Write default settings info *vai. */
131 extern
132 void LibVEX_default_VexArchInfo ( /*OUT*/VexArchInfo* vai );
133 
134 
135 /* This struct carries guest and host ABI variant information that may
136    be needed.  Fields which are meaningless or ignored for the
137    platform in question should be set to zero.
138 
139    Settings which are believed to be correct are:
140 
141    guest_stack_redzone_size
142       guest is ppc32-linux                ==> 0
143       guest is ppc64-linux                ==> 288
144       guest is ppc32-aix5                 ==> 220
145       guest is ppc64-aix5                 ==> unknown
146       guest is amd64-linux                ==> 128
147       guest is other                      ==> inapplicable
148 
149    guest_amd64_assume_fs_is_zero
150       guest is amd64-linux                ==> True
151       guest is amd64-darwin               ==> False
152       guest is other                      ==> inapplicable
153 
154    guest_amd64_assume_gs_is_0x60
155       guest is amd64-darwin               ==> True
156       guest is amd64-linux                ==> False
157       guest is other                      ==> inapplicable
158 
159    guest_ppc_zap_RZ_at_blr
160       guest is ppc64-linux                ==> True
161       guest is ppc32-linux                ==> False
162       guest is ppc64-aix5                 ==> unknown
163       guest is ppc32-aix5                 ==> False
164       guest is other                      ==> inapplicable
165 
166    guest_ppc_zap_RZ_at_bl
167       guest is ppc64-linux                ==> const True
168       guest is ppc32-linux                ==> const False
169       guest is ppc64-aix5                 ==> unknown
170       guest is ppc32-aix5                 ==> True except for calls to
171                                               millicode, $SAVEFn, $RESTFn
172       guest is other                      ==> inapplicable
173 
174    guest_ppc_sc_continues_at_LR:
175       guest is ppc32-aix5  or ppc64-aix5  ==> True
176       guest is ppc32-linux or ppc64-linux ==> False
177       guest is other                      ==> inapplicable
178 
179    host_ppc_calls_use_fndescrs:
180       host is ppc32-linux                 ==> False
181       host is ppc64-linux                 ==> True
182       host is ppc32-aix5 or ppc64-aix5    ==> True
183       host is other                       ==> inapplicable
184 
185    host_ppc32_regalign_int64_args:
186       host is ppc32-linux                 ==> True
187       host is ppc32-aix5                  ==> False
188       host is other                       ==> inapplicable
189 */
190 
191 typedef
192    struct {
193       /* PPC and AMD64 GUESTS only: how many bytes below the
194          stack pointer are validly addressible? */
195       Int guest_stack_redzone_size;
196 
197       /* AMD64 GUESTS only: should we translate %fs-prefixed
198          instructions using the assumption that %fs always contains
199          zero? */
200       Bool guest_amd64_assume_fs_is_zero;
201 
202       /* AMD64 GUESTS only: should we translate %gs-prefixed
203          instructions using the assumption that %gs always contains
204          0x60? */
205       Bool guest_amd64_assume_gs_is_0x60;
206 
207       /* PPC GUESTS only: should we zap the stack red zone at a 'blr'
208          (function return) ? */
209       Bool guest_ppc_zap_RZ_at_blr;
210 
211       /* PPC GUESTS only: should we zap the stack red zone at a 'bl'
212          (function call) ?  Is supplied with the guest address of the
213          target of the call since that may be significant.  If NULL,
214          is assumed equivalent to a fn which always returns False. */
215       Bool (*guest_ppc_zap_RZ_at_bl)(Addr64);
216 
217       /* PPC32/PPC64 GUESTS only: where does the kernel resume after
218          'sc'?  False => Linux style, at the next insn.  True => AIX
219          style, at the address stated in the link register. */
220       Bool guest_ppc_sc_continues_at_LR;
221 
222       /* PPC32/PPC64 HOSTS only: does '&f' give us a pointer to a
223          function descriptor on the host, or to the function code
224          itself?  True => descriptor, False => code. */
225       Bool host_ppc_calls_use_fndescrs;
226 
227       /* PPC32 HOSTS only: when generating code to pass a 64-bit value
228          (actual parameter) in a pair of regs, should we skip an arg
229          reg if it is even-numbered?  True => yes, False => no. */
230       Bool host_ppc32_regalign_int64_args;
231    }
232    VexAbiInfo;
233 
234 /* Write default settings info *vbi. */
235 extern
236 void LibVEX_default_VexAbiInfo ( /*OUT*/VexAbiInfo* vbi );
237 
238 
239 /*-------------------------------------------------------*/
240 /*--- Control of Vex's optimiser (iropt).             ---*/
241 /*-------------------------------------------------------*/
242 
243 /* Control of Vex's optimiser. */
244 
245 typedef
246    struct {
247       /* Controls verbosity of iropt.  0 = no output. */
248       Int iropt_verbosity;
249       /* Control aggressiveness of iropt.  0 = no opt, 1 = simple
250          opts, 2 (default) = max optimisation. */
251       Int iropt_level;
252       /* Ensure all integer registers are up to date at potential
253          memory exception points?  True(default)=yes, False=no, only
254          the guest's stack pointer. */
255       Bool iropt_precise_memory_exns;
256       /* How aggressive should iropt be in unrolling loops?  Higher
257          numbers make it more enthusiastic about loop unrolling.
258          Default=120.  A setting of zero disables unrolling.  */
259       Int iropt_unroll_thresh;
260       /* What's the maximum basic block length the front end(s) allow?
261          BBs longer than this are split up.  Default=50 (guest
262          insns). */
263       Int guest_max_insns;
264       /* How aggressive should front ends be in following
265          unconditional branches to known destinations?  Default=10,
266          meaning that if a block contains less than 10 guest insns so
267          far, the front end(s) will attempt to chase into its
268          successor. A setting of zero disables chasing.  */
269       Int guest_chase_thresh;
270       /* EXPERIMENTAL: chase across conditional branches?  Not all
271          front ends honour this.  Default: NO. */
272       Bool guest_chase_cond;
273    }
274    VexControl;
275 
276 
277 /* Write the default settings into *vcon. */
278 
279 extern
280 void LibVEX_default_VexControl ( /*OUT*/ VexControl* vcon );
281 
282 
283 /*-------------------------------------------------------*/
284 /*--- Storage management control                      ---*/
285 /*-------------------------------------------------------*/
286 
287 /* Allocate in Vex's temporary allocation area.  Be careful with this.
288    You can only call it inside an instrumentation or optimisation
289    callback that you have previously specified in a call to
290    LibVEX_Translate.  The storage allocated will only stay alive until
291    translation of the current basic block is complete.
292  */
293 extern HChar* private_LibVEX_alloc_first;
294 extern HChar* private_LibVEX_alloc_curr;
295 extern HChar* private_LibVEX_alloc_last;
296 extern void   private_LibVEX_alloc_OOM(void) __attribute__((noreturn));
297 
LibVEX_Alloc(Int nbytes)298 static inline void* LibVEX_Alloc ( Int nbytes )
299 {
300 #if 0
301   /* Nasty debugging hack, do not use. */
302   return malloc(nbytes);
303 #else
304    HChar* curr;
305    HChar* next;
306    Int    ALIGN;
307    ALIGN  = sizeof(void*)-1;
308    nbytes = (nbytes + ALIGN) & ~ALIGN;
309    curr   = private_LibVEX_alloc_curr;
310    next   = curr + nbytes;
311    if (next >= private_LibVEX_alloc_last)
312       private_LibVEX_alloc_OOM();
313    private_LibVEX_alloc_curr = next;
314    return curr;
315 #endif
316 }
317 
318 /* Show Vex allocation statistics. */
319 extern void LibVEX_ShowAllocStats ( void );
320 
321 
322 /*-------------------------------------------------------*/
323 /*--- Describing guest state layout                   ---*/
324 /*-------------------------------------------------------*/
325 
326 /* Describe the guest state enough that the instrumentation
327    functions can work. */
328 
329 /* The max number of guest state chunks which we can describe as
330    always defined (for the benefit of Memcheck). */
331 #define VEXGLO_N_ALWAYSDEFD  24
332 
333 typedef
334    struct {
335       /* Total size of the guest state, in bytes.  Must be
336          8-aligned. */
337       Int total_sizeB;
338       /* Whereabouts is the stack pointer? */
339       Int offset_SP;
340       Int sizeof_SP; /* 4 or 8 */
341       /* Whereabouts is the frame pointer? */
342       Int offset_FP;
343       Int sizeof_FP; /* 4 or 8 */
344       /* Whereabouts is the instruction pointer? */
345       Int offset_IP;
346       Int sizeof_IP; /* 4 or 8 */
347       /* Describe parts of the guest state regarded as 'always
348          defined'. */
349       Int n_alwaysDefd;
350       struct {
351          Int offset;
352          Int size;
353       } alwaysDefd[VEXGLO_N_ALWAYSDEFD];
354    }
355    VexGuestLayout;
356 
357 /* A note about guest state layout.
358 
359    LibVEX defines the layout for the guest state, in the file
360    pub/libvex_guest_<arch>.h.  The struct will have an 16-aligned
361    size.  Each translated bb is assumed to be entered with a specified
362    register pointing at such a struct.  Beyond that is two copies of
363    the shadow state area with the same size as the struct.  Beyond
364    that is a spill area that LibVEX may spill into.  It must have size
365    LibVEX_N_SPILL_BYTES, and this must be a 16-aligned number.
366 
367    On entry, the baseblock pointer register must be 16-aligned.
368 
369    There must be no holes in between the primary guest state, its two
370    copies, and the spill area.  In short, all 4 areas must have a
371    16-aligned size and be 16-aligned, and placed back-to-back.
372 */
373 
374 #define LibVEX_N_SPILL_BYTES 4096
375 
376 
377 /*-------------------------------------------------------*/
378 /*--- Initialisation of the library                   ---*/
379 /*-------------------------------------------------------*/
380 
381 /* Initialise the library.  You must call this first. */
382 
383 extern void LibVEX_Init (
384    /* failure exit function */
385    __attribute__ ((noreturn))
386    void (*failure_exit) ( void ),
387    /* logging output function */
388    void (*log_bytes) ( HChar*, Int nbytes ),
389    /* debug paranoia level */
390    Int debuglevel,
391    /* Are we supporting valgrind checking? */
392    Bool valgrind_support,
393    /* Control ... */
394    /*READONLY*/VexControl* vcon
395 );
396 
397 
398 /*-------------------------------------------------------*/
399 /*--- Make a translation                              ---*/
400 /*-------------------------------------------------------*/
401 
402 /* Describes the outcome of a translation attempt. */
403 typedef
404    enum {
405       VexTransOK,
406       VexTransAccessFail,
407       VexTransOutputFull
408    }
409    VexTranslateResult;
410 
411 
412 /* Describes precisely the pieces of guest code that a translation
413    covers.  Now that Vex can chase across BB boundaries, the old
414    scheme of describing a chunk of guest code merely by its start
415    address and length is inadequate.
416 
417    Hopefully this struct is only 32 bytes long.  Space is important as
418    clients will have to store one of these for each translation made.
419 */
420 typedef
421    struct {
422       Addr64 base[3];
423       UShort len[3];
424       UShort n_used;
425    }
426    VexGuestExtents;
427 
428 
429 /* A structure to carry arguments for LibVEX_Translate.  There are so
430    many of them, it seems better to have a structure. */
431 typedef
432    struct {
433       /* IN: The instruction sets we are translating from and to.  And
434          guest/host misc info. */
435       VexArch      arch_guest;
436       VexArchInfo  archinfo_guest;
437       VexArch      arch_host;
438       VexArchInfo  archinfo_host;
439       VexAbiInfo   abiinfo_both;
440 
441       /* IN: an opaque value which is passed as the first arg to all
442          callback functions supplied in this struct.  Vex has no idea
443          what's at the other end of this pointer. */
444       void*   callback_opaque;
445 
446       /* IN: the block to translate, and its guest address. */
447       /* where are the actual bytes in the host's address space? */
448       UChar*  guest_bytes;
449       /* where do the bytes really come from in the guest's aspace?
450          This is the post-redirection guest address.  Not that Vex
451          understands anything about redirection; that is all done on
452          the Valgrind side. */
453       Addr64  guest_bytes_addr;
454 
455       /* Is it OK to chase into this guest address?  May not be
456 	 NULL. */
457       Bool    (*chase_into_ok) ( /*callback_opaque*/void*, Addr64 );
458 
459       /* OUT: which bits of guest code actually got translated */
460       VexGuestExtents* guest_extents;
461 
462       /* IN: a place to put the resulting code, and its size */
463       UChar*  host_bytes;
464       Int     host_bytes_size;
465       /* OUT: how much of the output area is used. */
466       Int*    host_bytes_used;
467 
468       /* IN: optionally, two instrumentation functions.  May be
469 	 NULL. */
470       IRSB*   (*instrument1) ( /*callback_opaque*/void*,
471                                IRSB*,
472                                VexGuestLayout*,
473                                VexGuestExtents*,
474                                IRType gWordTy, IRType hWordTy );
475       IRSB*   (*instrument2) ( /*callback_opaque*/void*,
476                                IRSB*,
477                                VexGuestLayout*,
478                                VexGuestExtents*,
479                                IRType gWordTy, IRType hWordTy );
480 
481       IRSB* (*finaltidy) ( IRSB* );
482 
483       /* IN: should this translation be self-checking?  default: False */
484       Bool    do_self_check;
485 
486       /* IN: optionally, a callback which allows the caller to add its
487          own IR preamble following the self-check and any other
488          VEX-generated preamble, if any.  May be NULL.  If non-NULL,
489          the IRSB under construction is handed to this function, which
490          presumably adds IR statements to it.  The callback may
491          optionally complete the block and direct bb_to_IR not to
492          disassemble any instructions into it; this is indicated by
493          the callback returning True.
494       */
495       Bool    (*preamble_function)(/*callback_opaque*/void*, IRSB*);
496 
497       /* IN: debug: trace vex activity at various points */
498       Int     traceflags;
499 
500       /* IN: address of the dispatcher entry point.  Describes the
501          place where generated code should jump to at the end of each
502          bb.
503 
504          At the end of each translation, the next guest address is
505          placed in the host's standard return register (x86: %eax,
506          amd64: %rax, ppc32: %r3, ppc64: %r3).  Optionally, the guest
507          state pointer register (on host x86: %ebp; amd64: %rbp;
508          ppc32/64: r31) may be set to a VEX_TRC_ value to indicate any
509          special action required before the next block is run.
510 
511          Control is then passed back to the dispatcher (beyond Vex's
512          control; caller supplies this) in the following way:
513 
514          - On host archs which lack a link register (x86, amd64), by a
515            jump to the host address specified in 'dispatcher', which
516            must be non-NULL.
517 
518          - On host archs which have a link register (ppc32, ppc64), by
519            a branch to the link register (which is guaranteed to be
520            unchanged from whatever it was at entry to the
521            translation).  'dispatch' must be NULL.
522 
523          The aim is to get back and forth between translations and the
524          dispatcher without creating memory traffic to store return
525          addresses.
526       */
527       void* dispatch;
528    }
529    VexTranslateArgs;
530 
531 
532 extern
533 VexTranslateResult LibVEX_Translate ( VexTranslateArgs* );
534 
535 /* A subtlety re interaction between self-checking translations and
536    bb-chasing.  The supplied chase_into_ok function should say NO
537    (False) when presented with any address for which you might want to
538    make a self-checking translation.
539 
540    If it doesn't do that, you may end up with Vex chasing from BB #1
541    to BB #2 (fine); but if you wanted checking for #2 and not #1, that
542    would not be the result.  Therefore chase_into_ok should disallow
543    following into #2.  That will force the caller to eventually
544    request a new translation starting at #2, at which point Vex will
545    correctly observe the make-a-self-check flag.  */
546 
547 
548 /*-------------------------------------------------------*/
549 /*--- Show accumulated statistics                     ---*/
550 /*-------------------------------------------------------*/
551 
552 extern void LibVEX_ShowStats ( void );
553 
554 
555 /*-------------------------------------------------------*/
556 /*--- Notes                                           ---*/
557 /*-------------------------------------------------------*/
558 
559 /* Code generation conventions that need to be recorded somewhere.
560    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
561 
562    x86
563    ~~~
564    Generated code should be entered using a JMP instruction.  On
565    entry, %ebp should point to the guest state, and %esp should be a
566    valid stack pointer.  The generated code may change %eax, %ebx,
567    %ecx, %edx, %esi, %edi, all the FP registers and control state, and
568    all the XMM registers.
569 
570    On entry, the FPU control word should be set to 0x027F, and the SSE
571    control word (%mxcsr) should be set to 0x1F80.  On exit, they
572    should still have those values (after masking off the lowest 6 bits
573    of %mxcsr).  If they don't, there is a bug in VEX-generated code.
574 
575    Generated code returns to the scheduler using a JMP instruction, to
576    the address specified in the .dispatch field of VexTranslateArgs.
577    %eax (or %eax:%edx, if simulating a 64-bit target) will contain the
578    guest address of the next block to execute.  %ebp may be changed
579    to a VEX_TRC_ value, otherwise it should be as it was at entry.
580 
581    CRITICAL ISSUES in x86 code generation.  The only known critical
582    issue is that the host FPU and SSE state is not properly saved
583    across calls to helper functions.  If any helper references any
584    such state, it is likely (1) to misbehave itself, since the FP
585    stack tags will not be as expected, and (2) after returning to
586    generated code, the generated code is likely to go wrong.  This
587    really should be fixed.
588 
589    amd64
590    ~~~~~
591    Analogous to x86.
592 
593    ppc32
594    ~~~~~
595    On entry, guest state pointer is r31.  .dispatch must be NULL.
596    Control is returned with a branch to the link register.  Generated
597    code will not change lr.  At return, r3 holds the next guest addr
598    (or r3:r4 ?).  r31 may be may be changed to a VEX_TRC_ value,
599    otherwise it should be as it was at entry.
600 
601    ppc64
602    ~~~~~
603    Same as ppc32.
604 
605    ALL GUEST ARCHITECTURES
606    ~~~~~~~~~~~~~~~~~~~~~~~
607    The guest state must contain two pseudo-registers, guest_TISTART
608    and guest_TILEN.  These are used to pass the address of areas of
609    guest code, translations of which are to be invalidated, back to
610    the despatcher.  Both pseudo-regs must have size equal to the guest
611    word size.
612 
613    The architecture must a third pseudo-register, guest_NRADDR, also
614    guest-word-sized.  This is used to record the unredirected guest
615    address at the start of a translation whose start has been
616    redirected.  By reading this pseudo-register shortly afterwards,
617    the translation can find out what the corresponding no-redirection
618    address was.  Note, this is only set for wrap-style redirects, not
619    for replace-style ones.
620 */
621 #endif /* ndef __LIBVEX_H */
622 
623 /*---------------------------------------------------------------*/
624 /*---                                                libvex.h ---*/
625 /*---------------------------------------------------------------*/
626