• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                       libvex_ir.h ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2017 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 #ifndef __LIBVEX_IR_H
37 #define __LIBVEX_IR_H
38 
39 #include "libvex_basictypes.h"
40 
41 
42 /*---------------------------------------------------------------*/
43 /*--- High-level IR description                               ---*/
44 /*---------------------------------------------------------------*/
45 
46 /* Vex IR is an architecture-neutral intermediate representation.
47    Unlike some IRs in systems similar to Vex, it is not like assembly
48    language (ie. a list of instructions).  Rather, it is more like the
49    IR that might be used in a compiler.
50 
51    Code blocks
52    ~~~~~~~~~~~
53    The code is broken into small code blocks ("superblocks", type:
54    'IRSB').  Each code block typically represents from 1 to perhaps 50
55    instructions.  IRSBs are single-entry, multiple-exit code blocks.
56    Each IRSB contains three things:
57    - a type environment, which indicates the type of each temporary
58      value present in the IRSB
59    - a list of statements, which represent code
60    - a jump that exits from the end the IRSB
61    Because the blocks are multiple-exit, there can be additional
62    conditional exit statements that cause control to leave the IRSB
63    before the final exit.  Also because of this, IRSBs can cover
64    multiple non-consecutive sequences of code (up to 3).  These are
65    recorded in the type VexGuestExtents (see libvex.h).
66 
67    Statements and expressions
68    ~~~~~~~~~~~~~~~~~~~~~~~~~~
69    Statements (type 'IRStmt') represent operations with side-effects,
70    eg.  guest register writes, stores, and assignments to temporaries.
71    Expressions (type 'IRExpr') represent operations without
72    side-effects, eg. arithmetic operations, loads, constants.
73    Expressions can contain sub-expressions, forming expression trees,
74    eg. (3 + (4 * load(addr1)).
75 
76    Storage of guest state
77    ~~~~~~~~~~~~~~~~~~~~~~
78    The "guest state" contains the guest registers of the guest machine
79    (ie.  the machine that we are simulating).  It is stored by default
80    in a block of memory supplied by the user of the VEX library,
81    generally referred to as the guest state (area).  To operate on
82    these registers, one must first read ("Get") them from the guest
83    state into a temporary value.  Afterwards, one can write ("Put")
84    them back into the guest state.
85 
86    Get and Put are characterised by a byte offset into the guest
87    state, a small integer which effectively gives the identity of the
88    referenced guest register, and a type, which indicates the size of
89    the value to be transferred.
90 
91    The basic "Get" and "Put" operations are sufficient to model normal
92    fixed registers on the guest.  Selected areas of the guest state
93    can be treated as a circular array of registers (type:
94    'IRRegArray'), which can be indexed at run-time.  This is done with
95    the "GetI" and "PutI" primitives.  This is necessary to describe
96    rotating register files, for example the x87 FPU stack, SPARC
97    register windows, and the Itanium register files.
98 
99    Examples, and flattened vs. unflattened code
100    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
101    For example, consider this x86 instruction:
102 
103      addl %eax, %ebx
104 
105    One Vex IR translation for this code would be this:
106 
107      ------ IMark(0x24F275, 7, 0) ------
108      t3 = GET:I32(0)             # get %eax, a 32-bit integer
109      t2 = GET:I32(12)            # get %ebx, a 32-bit integer
110      t1 = Add32(t3,t2)           # addl
111      PUT(0) = t1                 # put %eax
112 
113    (For simplicity, this ignores the effects on the condition codes, and
114    the update of the instruction pointer.)
115 
116    The "IMark" is an IR statement that doesn't represent actual code.
117    Instead it indicates the address and length of the original
118    instruction.  The numbers 0 and 12 are offsets into the guest state
119    for %eax and %ebx.  The full list of offsets for an architecture
120    <ARCH> can be found in the type VexGuest<ARCH>State in the file
121    VEX/pub/libvex_guest_<ARCH>.h.
122 
123    The five statements in this example are:
124    - the IMark
125    - three assignments to temporaries
126    - one register write (put)
127 
128    The six expressions in this example are:
129    - two register reads (gets)
130    - one arithmetic (add) operation
131    - three temporaries (two nested within the Add32, one in the PUT)
132 
133    The above IR is "flattened", ie. all sub-expressions are "atoms",
134    either constants or temporaries.  An equivalent, unflattened version
135    would be:
136 
137      PUT(0) = Add32(GET:I32(0), GET:I32(12))
138 
139    IR is guaranteed to be flattened at instrumentation-time.  This makes
140    instrumentation easier.  Equivalent flattened and unflattened IR
141    typically results in the same generated code.
142 
143    Another example, this one showing loads and stores:
144 
145      addl %edx,4(%eax)
146 
147    This becomes (again ignoring condition code and instruction pointer
148    updates):
149 
150      ------ IMark(0x4000ABA, 3, 0) ------
151      t3 = Add32(GET:I32(0),0x4:I32)
152      t2 = LDle:I32(t3)
153      t1 = GET:I32(8)
154      t0 = Add32(t2,t1)
155      STle(t3) = t0
156 
157    The "le" in "LDle" and "STle" is short for "little-endian".
158 
159    No need for deallocations
160    ~~~~~~~~~~~~~~~~~~~~~~~~~
161    Although there are allocation functions for various data structures
162    in this file, there are no deallocation functions.  This is because
163    Vex uses a memory allocation scheme that automatically reclaims the
164    memory used by allocated structures once translation is completed.
165    This makes things easier for tools that instruments/transforms code
166    blocks.
167 
168    SSAness and typing
169    ~~~~~~~~~~~~~~~~~~
170    The IR is fully typed.  For every IRSB (IR block) it is possible to
171    say unambiguously whether or not it is correctly typed.
172    Incorrectly typed IR has no meaning and the VEX will refuse to
173    process it.  At various points during processing VEX typechecks the
174    IR and aborts if any violations are found.  This seems overkill but
175    makes it a great deal easier to build a reliable JIT.
176 
177    IR also has the SSA property.  SSA stands for Static Single
178    Assignment, and what it means is that each IR temporary may be
179    assigned to only once.  This idea became widely used in compiler
180    construction in the mid to late 90s.  It makes many IR-level
181    transformations/code improvements easier, simpler and faster.
182    Whenever it typechecks an IR block, VEX also checks the SSA
183    property holds, and will abort if not so.  So SSAness is
184    mechanically and rigidly enforced.
185 */
186 
187 /*---------------------------------------------------------------*/
188 /*--- Type definitions for the IR                             ---*/
189 /*---------------------------------------------------------------*/
190 
191 /* General comments about naming schemes:
192 
193    All publically visible functions contain the name of the primary
194    type on which they operate (IRFoo, IRBar, etc).  Hence you should
195    be able to identify these functions by grepping for "IR[A-Z]".
196 
197    For some type 'IRFoo':
198 
199    - ppIRFoo is the printing method for IRFoo, printing it to the
200      output channel specified in the LibVEX_Initialise call.
201 
202    - eqIRFoo is a structural equality predicate for IRFoos.
203 
204    - deepCopyIRFoo is a deep copy constructor for IRFoos.
205      It recursively traverses the entire argument tree and
206      produces a complete new tree.  All types have a deep copy
207      constructor.
208 
209    - shallowCopyIRFoo is the shallow copy constructor for IRFoos.
210      It creates a new top-level copy of the supplied object,
211      but does not copy any sub-objects.  Only some types have a
212      shallow copy constructor.
213 */
214 
215 /* ------------------ Types ------------------ */
216 
217 /* A type indicates the size of a value, and whether it's an integer, a
218    float, or a vector (SIMD) value. */
219 typedef
220    enum {
221       Ity_INVALID=0x1100,
222       Ity_I1,
223       Ity_I8,
224       Ity_I16,
225       Ity_I32,
226       Ity_I64,
227       Ity_I128,  /* 128-bit scalar */
228       Ity_F16,   /* 16 bit float */
229       Ity_F32,   /* IEEE 754 float */
230       Ity_F64,   /* IEEE 754 double */
231       Ity_D32,   /* 32-bit Decimal floating point */
232       Ity_D64,   /* 64-bit Decimal floating point */
233       Ity_D128,  /* 128-bit Decimal floating point */
234       Ity_F128,  /* 128-bit floating point; implementation defined */
235       Ity_V128,  /* 128-bit SIMD */
236       Ity_V256   /* 256-bit SIMD */
237    }
238    IRType;
239 
240 /* Pretty-print an IRType */
241 extern void ppIRType ( IRType );
242 
243 /* Get the size (in bytes) of an IRType */
244 extern Int sizeofIRType ( IRType );
245 
246 /* Translate 1/2/4/8 into Ity_I{8,16,32,64} respectively.  Asserts on
247    any other input. */
248 extern IRType integerIRTypeOfSize ( Int szB );
249 
250 
251 /* ------------------ Endianness ------------------ */
252 
253 /* IREndness is used in load IRExprs and store IRStmts. */
254 typedef
255    enum {
256       Iend_LE=0x1200, /* little endian */
257       Iend_BE          /* big endian */
258    }
259    IREndness;
260 
261 
262 /* ------------------ Constants ------------------ */
263 
264 /* IRConsts are used within 'Const' and 'Exit' IRExprs. */
265 
266 /* The various kinds of constant. */
267 typedef
268    enum {
269       Ico_U1=0x1300,
270       Ico_U8,
271       Ico_U16,
272       Ico_U32,
273       Ico_U64,
274       Ico_F32,   /* 32-bit IEEE754 floating */
275       Ico_F32i,  /* 32-bit unsigned int to be interpreted literally
276                     as a IEEE754 single value. */
277       Ico_F64,   /* 64-bit IEEE754 floating */
278       Ico_F64i,  /* 64-bit unsigned int to be interpreted literally
279                     as a IEEE754 double value. */
280       Ico_V128,  /* 128-bit restricted vector constant, with 1 bit
281                     (repeated 8 times) for each of the 16 x 1-byte lanes */
282       Ico_V256   /* 256-bit restricted vector constant, with 1 bit
283                     (repeated 8 times) for each of the 32 x 1-byte lanes */
284    }
285    IRConstTag;
286 
287 /* A constant.  Stored as a tagged union.  'tag' indicates what kind of
288    constant this is.  'Ico' is the union that holds the fields.  If an
289    IRConst 'c' has c.tag equal to Ico_U32, then it's a 32-bit constant,
290    and its value can be accessed with 'c.Ico.U32'. */
291 typedef
292    struct _IRConst {
293       IRConstTag tag;
294       union {
295          Bool   U1;
296          UChar  U8;
297          UShort U16;
298          UInt   U32;
299          ULong  U64;
300          Float  F32;
301          UInt   F32i;
302          Double F64;
303          ULong  F64i;
304          UShort V128;   /* 16-bit value; see Ico_V128 comment above */
305          UInt   V256;   /* 32-bit value; see Ico_V256 comment above */
306       } Ico;
307    }
308    IRConst;
309 
310 /* IRConst constructors */
311 extern IRConst* IRConst_U1   ( Bool );
312 extern IRConst* IRConst_U8   ( UChar );
313 extern IRConst* IRConst_U16  ( UShort );
314 extern IRConst* IRConst_U32  ( UInt );
315 extern IRConst* IRConst_U64  ( ULong );
316 extern IRConst* IRConst_F32  ( Float );
317 extern IRConst* IRConst_F32i ( UInt );
318 extern IRConst* IRConst_F64  ( Double );
319 extern IRConst* IRConst_F64i ( ULong );
320 extern IRConst* IRConst_V128 ( UShort );
321 extern IRConst* IRConst_V256 ( UInt );
322 
323 /* Deep-copy an IRConst */
324 extern IRConst* deepCopyIRConst ( const IRConst* );
325 
326 /* Pretty-print an IRConst */
327 extern void ppIRConst ( const IRConst* );
328 
329 /* Compare two IRConsts for equality */
330 extern Bool eqIRConst ( const IRConst*, const IRConst* );
331 
332 
333 /* ------------------ Call targets ------------------ */
334 
335 /* Describes a helper function to call.  The name part is purely for
336    pretty printing and not actually used.  regparms=n tells the back
337    end that the callee has been declared
338    "__attribute__((regparm(n)))", although indirectly using the
339    VEX_REGPARM(n) macro.  On some targets (x86) the back end will need
340    to construct a non-standard sequence to call a function declared
341    like this.
342 
343    mcx_mask is a sop to Memcheck.  It indicates which args should be
344    considered 'always defined' when lazily computing definedness of
345    the result.  Bit 0 of mcx_mask corresponds to args[0], bit 1 to
346    args[1], etc.  If a bit is set, the corresponding arg is excluded
347    (hence "x" in "mcx") from definedness checking.
348 */
349 
350 typedef
351    struct {
352       Int          regparms;
353       const HChar* name;
354       void*        addr;
355       UInt         mcx_mask;
356    }
357    IRCallee;
358 
359 /* Create an IRCallee. */
360 extern IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr );
361 
362 /* Deep-copy an IRCallee. */
363 extern IRCallee* deepCopyIRCallee ( const IRCallee* );
364 
365 /* Pretty-print an IRCallee. */
366 extern void ppIRCallee ( const IRCallee* );
367 
368 
369 /* ------------------ Guest state arrays ------------------ */
370 
371 /* This describes a section of the guest state that we want to
372    be able to index at run time, so as to be able to describe
373    indexed or rotating register files on the guest. */
374 typedef
375    struct {
376       Int    base;   /* guest state offset of start of indexed area */
377       IRType elemTy; /* type of each element in the indexed area */
378       Int    nElems; /* number of elements in the indexed area */
379    }
380    IRRegArray;
381 
382 extern IRRegArray* mkIRRegArray ( Int, IRType, Int );
383 
384 extern IRRegArray* deepCopyIRRegArray ( const IRRegArray* );
385 
386 extern void ppIRRegArray ( const IRRegArray* );
387 extern Bool eqIRRegArray ( const IRRegArray*, const IRRegArray* );
388 
389 
390 /* ------------------ Temporaries ------------------ */
391 
392 /* This represents a temporary, eg. t1.  The IR optimiser relies on the
393    fact that IRTemps are 32-bit ints.  Do not change them to be ints of
394    any other size. */
395 typedef UInt IRTemp;
396 
397 /* Pretty-print an IRTemp. */
398 extern void ppIRTemp ( IRTemp );
399 
400 #define IRTemp_INVALID ((IRTemp)0xFFFFFFFF)
401 
402 
403 /* --------------- Primops (arity 1,2,3 and 4) --------------- */
404 
405 /* Primitive operations that are used in Unop, Binop, Triop and Qop
406    IRExprs.  Once we take into account integer, floating point and SIMD
407    operations of all the different sizes, there are quite a lot of them.
408    Most instructions supported by the architectures that Vex supports
409    (x86, PPC, etc) are represented.  Some more obscure ones (eg. cpuid)
410    are not;  they are instead handled with dirty helpers that emulate
411    their functionality.  Such obscure ones are thus not directly visible
412    in the IR, but their effects on guest state (memory and registers)
413    are made visible via the annotations in IRDirty structures.
414 */
415 typedef
416    enum {
417       /* -- Do not change this ordering.  The IR generators rely on
418             (eg) Iop_Add64 == IopAdd8 + 3. -- */
419 
420       Iop_INVALID=0x1400,
421       Iop_Add8,  Iop_Add16,  Iop_Add32,  Iop_Add64,
422       Iop_Sub8,  Iop_Sub16,  Iop_Sub32,  Iop_Sub64,
423       /* Signless mul.  MullS/MullU is elsewhere. */
424       Iop_Mul8,  Iop_Mul16,  Iop_Mul32,  Iop_Mul64,
425       Iop_Or8,   Iop_Or16,   Iop_Or32,   Iop_Or64,
426       Iop_And8,  Iop_And16,  Iop_And32,  Iop_And64,
427       Iop_Xor8,  Iop_Xor16,  Iop_Xor32,  Iop_Xor64,
428       Iop_Shl8,  Iop_Shl16,  Iop_Shl32,  Iop_Shl64,
429       Iop_Shr8,  Iop_Shr16,  Iop_Shr32,  Iop_Shr64,
430       Iop_Sar8,  Iop_Sar16,  Iop_Sar32,  Iop_Sar64,
431       /* Integer comparisons. */
432       Iop_CmpEQ8,  Iop_CmpEQ16,  Iop_CmpEQ32,  Iop_CmpEQ64,
433       Iop_CmpNE8,  Iop_CmpNE16,  Iop_CmpNE32,  Iop_CmpNE64,
434       /* Tags for unary ops */
435       Iop_Not8,  Iop_Not16,  Iop_Not32,  Iop_Not64,
436 
437       /* Exactly like CmpEQ8/16/32/64, but carrying the additional
438          hint that these compute the success/failure of a CAS
439          operation, and hence are almost certainly applied to two
440          copies of the same value, which in turn has implications for
441          Memcheck's instrumentation. */
442       Iop_CasCmpEQ8, Iop_CasCmpEQ16, Iop_CasCmpEQ32, Iop_CasCmpEQ64,
443       Iop_CasCmpNE8, Iop_CasCmpNE16, Iop_CasCmpNE32, Iop_CasCmpNE64,
444 
445       /* Exactly like CmpNE8/16/32/64, but carrying the additional
446          hint that these needs expensive definedness tracking. */
447       Iop_ExpCmpNE8, Iop_ExpCmpNE16, Iop_ExpCmpNE32, Iop_ExpCmpNE64,
448 
449       /* -- Ordering not important after here. -- */
450 
451       /* Widening multiplies */
452       Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64,
453       Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64,
454 
455       /* Wierdo integer stuff */
456       Iop_Clz64, Iop_Clz32,   /* count leading zeroes */
457       Iop_Ctz64, Iop_Ctz32,   /* count trailing zeros */
458       /* Ctz64/Ctz32/Clz64/Clz32 are UNDEFINED when given arguments of
459          zero.  You must ensure they are never given a zero argument.
460       */
461 
462       /* Standard integer comparisons */
463       Iop_CmpLT32S, Iop_CmpLT64S,
464       Iop_CmpLE32S, Iop_CmpLE64S,
465       Iop_CmpLT32U, Iop_CmpLT64U,
466       Iop_CmpLE32U, Iop_CmpLE64U,
467 
468       /* As a sop to Valgrind-Memcheck, the following are useful. */
469       Iop_CmpNEZ8, Iop_CmpNEZ16,  Iop_CmpNEZ32,  Iop_CmpNEZ64,
470       Iop_CmpwNEZ32, Iop_CmpwNEZ64, /* all-0s -> all-Os; other -> all-1s */
471       Iop_Left8, Iop_Left16, Iop_Left32, Iop_Left64, /*  \x -> x | -x */
472       Iop_Max32U, /* unsigned max */
473 
474       /* PowerPC-style 3-way integer comparisons.  Without them it is
475          difficult to simulate PPC efficiently.
476          op(x,y) | x < y  = 0x8 else
477                  | x > y  = 0x4 else
478                  | x == y = 0x2
479       */
480       Iop_CmpORD32U, Iop_CmpORD64U,
481       Iop_CmpORD32S, Iop_CmpORD64S,
482 
483       /* Division */
484       /* TODO: clarify semantics wrt rounding, negative values, whatever */
485       Iop_DivU32,   // :: I32,I32 -> I32 (simple div, no mod)
486       Iop_DivS32,   // ditto, signed
487       Iop_DivU64,   // :: I64,I64 -> I64 (simple div, no mod)
488       Iop_DivS64,   // ditto, signed
489       Iop_DivU64E,  // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
490                     //                    concat with 64 0's (low))
491       Iop_DivS64E,  // ditto, signed
492       Iop_DivU32E,  // :: I32,I32 -> I32 (dividend is 32-bit arg (hi)
493                     // concat with 32 0's (low))
494       Iop_DivS32E,  // ditto, signed
495 
496       Iop_DivModU64to32, // :: I64,I32 -> I64
497                          // of which lo half is div and hi half is mod
498       Iop_DivModS64to32, // ditto, signed
499 
500       Iop_DivModU128to64, // :: V128,I64 -> V128
501                           // of which lo half is div and hi half is mod
502       Iop_DivModS128to64, // ditto, signed
503 
504       Iop_DivModS64to64, // :: I64,I64 -> I128
505                          // of which lo half is div and hi half is mod
506 
507       /* Integer conversions.  Some of these are redundant (eg
508          Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
509          having a complete set reduces the typical dynamic size of IR
510          and makes the instruction selectors easier to write. */
511 
512       /* Widening conversions */
513       Iop_8Uto16, Iop_8Uto32,  Iop_8Uto64,
514                   Iop_16Uto32, Iop_16Uto64,
515                                Iop_32Uto64,
516       Iop_8Sto16, Iop_8Sto32,  Iop_8Sto64,
517                   Iop_16Sto32, Iop_16Sto64,
518                                Iop_32Sto64,
519 
520       /* Narrowing conversions */
521       Iop_64to8, Iop_32to8, Iop_64to16,
522       /* 8 <-> 16 bit conversions */
523       Iop_16to8,      // :: I16 -> I8, low half
524       Iop_16HIto8,    // :: I16 -> I8, high half
525       Iop_8HLto16,    // :: (I8,I8) -> I16
526       /* 16 <-> 32 bit conversions */
527       Iop_32to16,     // :: I32 -> I16, low half
528       Iop_32HIto16,   // :: I32 -> I16, high half
529       Iop_16HLto32,   // :: (I16,I16) -> I32
530       /* 32 <-> 64 bit conversions */
531       Iop_64to32,     // :: I64 -> I32, low half
532       Iop_64HIto32,   // :: I64 -> I32, high half
533       Iop_32HLto64,   // :: (I32,I32) -> I64
534       /* 64 <-> 128 bit conversions */
535       Iop_128to64,    // :: I128 -> I64, low half
536       Iop_128HIto64,  // :: I128 -> I64, high half
537       Iop_64HLto128,  // :: (I64,I64) -> I128
538       /* 1-bit stuff */
539       Iop_Not1,   /* :: Ity_Bit -> Ity_Bit */
540       Iop_32to1,  /* :: Ity_I32 -> Ity_Bit, just select bit[0] */
541       Iop_64to1,  /* :: Ity_I64 -> Ity_Bit, just select bit[0] */
542       Iop_1Uto8,  /* :: Ity_Bit -> Ity_I8,  unsigned widen */
543       Iop_1Uto32, /* :: Ity_Bit -> Ity_I32, unsigned widen */
544       Iop_1Uto64, /* :: Ity_Bit -> Ity_I64, unsigned widen */
545       Iop_1Sto8,  /* :: Ity_Bit -> Ity_I8,  signed widen */
546       Iop_1Sto16, /* :: Ity_Bit -> Ity_I16, signed widen */
547       Iop_1Sto32, /* :: Ity_Bit -> Ity_I32, signed widen */
548       Iop_1Sto64, /* :: Ity_Bit -> Ity_I64, signed widen */
549 
550       /* ------ Floating point.  We try to be IEEE754 compliant. ------ */
551 
552       /* --- Simple stuff as mandated by 754. --- */
553 
554       /* Binary operations, with rounding. */
555       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
556       Iop_AddF64, Iop_SubF64, Iop_MulF64, Iop_DivF64,
557 
558       /* :: IRRoundingMode(I32) x F32 x F32 -> F32 */
559       Iop_AddF32, Iop_SubF32, Iop_MulF32, Iop_DivF32,
560 
561       /* Variants of the above which produce a 64-bit result but which
562          round their result to a IEEE float range first. */
563       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
564       Iop_AddF64r32, Iop_SubF64r32, Iop_MulF64r32, Iop_DivF64r32,
565 
566       /* Unary operations, without rounding. */
567       /* :: F64 -> F64 */
568       Iop_NegF64, Iop_AbsF64,
569 
570       /* :: F32 -> F32 */
571       Iop_NegF32, Iop_AbsF32,
572 
573       /* Unary operations, with rounding. */
574       /* :: IRRoundingMode(I32) x F64 -> F64 */
575       Iop_SqrtF64,
576 
577       /* :: IRRoundingMode(I32) x F32 -> F32 */
578       Iop_SqrtF32,
579 
580       /* Comparison, yielding GT/LT/EQ/UN(ordered), as per the following:
581             0x45 Unordered
582             0x01 LT
583             0x00 GT
584             0x40 EQ
585          This just happens to be the Intel encoding.  The values
586          are recorded in the type IRCmpF64Result.
587       */
588       /* :: F64 x F64 -> IRCmpF64Result(I32) */
589       Iop_CmpF64,
590       Iop_CmpF32,
591       Iop_CmpF128,
592 
593       /* --- Int to/from FP conversions. --- */
594 
595       /* For the most part, these take a first argument :: Ity_I32 (as
596          IRRoundingMode) which is an indication of the rounding mode
597          to use, as per the following encoding ("the standard
598          encoding"):
599             00b  to nearest (the default)
600             01b  to -infinity
601             10b  to +infinity
602             11b  to zero
603          This just happens to be the Intel encoding.  For reference only,
604          the PPC encoding is:
605             00b  to nearest (the default)
606             01b  to zero
607             10b  to +infinity
608             11b  to -infinity
609          Any PPC -> IR front end will have to translate these PPC
610          encodings, as encoded in the guest state, to the standard
611          encodings, to pass to the primops.
612          For reference only, the ARM VFP encoding is:
613             00b  to nearest
614             01b  to +infinity
615             10b  to -infinity
616             11b  to zero
617          Again, this will have to be converted to the standard encoding
618          to pass to primops.
619 
620          If one of these conversions gets an out-of-range condition,
621          or a NaN, as an argument, the result is host-defined.  On x86
622          the "integer indefinite" value 0x80..00 is produced.  On PPC
623          it is either 0x80..00 or 0x7F..FF depending on the sign of
624          the argument.
625 
626          On ARMvfp, when converting to a signed integer result, the
627          overflow result is 0x80..00 for negative args and 0x7F..FF
628          for positive args.  For unsigned integer results it is
629          0x00..00 and 0xFF..FF respectively.
630 
631          Rounding is required whenever the destination type cannot
632          represent exactly all values of the source type.
633       */
634       Iop_F64toI16S, /* IRRoundingMode(I32) x F64 -> signed I16 */
635       Iop_F64toI32S, /* IRRoundingMode(I32) x F64 -> signed I32 */
636       Iop_F64toI64S, /* IRRoundingMode(I32) x F64 -> signed I64 */
637       Iop_F64toI64U, /* IRRoundingMode(I32) x F64 -> unsigned I64 */
638 
639       Iop_F64toI32U, /* IRRoundingMode(I32) x F64 -> unsigned I32 */
640 
641       Iop_I32StoF64, /*                       signed I32 -> F64 */
642       Iop_I64StoF64, /* IRRoundingMode(I32) x signed I64 -> F64 */
643       Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */
644       Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */
645 
646       Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */
647       Iop_I32UtoF64, /*                       unsigned I32 -> F64 */
648 
649       Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */
650       Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */
651       Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */
652       Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */
653 
654       Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */
655       Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */
656 
657       /* Conversion between floating point formats */
658       Iop_F32toF64,  /*                       F32 -> F64 */
659       Iop_F64toF32,  /* IRRoundingMode(I32) x F64 -> F32 */
660 
661       /* Reinterpretation.  Take an F64 and produce an I64 with
662          the same bit pattern, or vice versa. */
663       Iop_ReinterpF64asI64, Iop_ReinterpI64asF64,
664       Iop_ReinterpF32asI32, Iop_ReinterpI32asF32,
665 
666       /* Support for 128-bit floating point */
667       Iop_F64HLtoF128,/* (high half of F128,low half of F128) -> F128 */
668       Iop_F128HItoF64,/* F128 -> high half of F128 into a F64 register */
669       Iop_F128LOtoF64,/* F128 -> low  half of F128 into a F64 register */
670 
671       /* :: IRRoundingMode(I32) x F128 x F128 -> F128 */
672       Iop_AddF128, Iop_SubF128, Iop_MulF128, Iop_DivF128,
673       Iop_MAddF128,    // (A * B) + C
674       Iop_MSubF128,    // (A * B) - C
675       Iop_NegMAddF128, // -((A * B) + C)
676       Iop_NegMSubF128, // -((A * B) - C)
677 
678       /* :: F128 -> F128 */
679       Iop_NegF128, Iop_AbsF128,
680 
681       /* :: IRRoundingMode(I32) x F128 -> F128 */
682       Iop_SqrtF128,
683 
684       Iop_I32StoF128, /*                signed I32  -> F128 */
685       Iop_I64StoF128, /*                signed I64  -> F128 */
686       Iop_I32UtoF128, /*              unsigned I32  -> F128 */
687       Iop_I64UtoF128, /*              unsigned I64  -> F128 */
688       Iop_F32toF128,  /*                       F32  -> F128 */
689       Iop_F64toF128,  /*                       F64  -> F128 */
690 
691       Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32  */
692       Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64  */
693       Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32  */
694       Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64  */
695       Iop_F128toI128S,/* IRRoundingMode(I32) x F128 -> signed I128 */
696       Iop_F128toF64,  /* IRRoundingMode(I32) x F128 -> F64         */
697       Iop_F128toF32,  /* IRRoundingMode(I32) x F128 -> F32         */
698       Iop_RndF128,    /* IRRoundingMode(I32) x F128 -> F128         */
699 
700       /* Truncate to the specified value, source and result
701        * are stroed in a F128 register.
702        */
703       Iop_TruncF128toI32S,  /* truncate F128 -> I32         */
704       Iop_TruncF128toI32U,  /* truncate F128 -> I32         */
705       Iop_TruncF128toI64U,  /* truncate F128 -> I64         */
706       Iop_TruncF128toI64S,  /* truncate F128 -> I64         */
707 
708       /* --- guest x86/amd64 specifics, not mandated by 754. --- */
709 
710       /* Binary ops, with rounding. */
711       /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
712       Iop_AtanF64,       /* FPATAN,  arctan(arg1/arg2)       */
713       Iop_Yl2xF64,       /* FYL2X,   arg1 * log2(arg2)       */
714       Iop_Yl2xp1F64,     /* FYL2XP1, arg1 * log2(arg2+1.0)   */
715       Iop_PRemF64,       /* FPREM,   non-IEEE remainder(arg1/arg2)    */
716       Iop_PRemC3210F64,  /* C3210 flags resulting from FPREM, :: I32 */
717       Iop_PRem1F64,      /* FPREM1,  IEEE remainder(arg1/arg2)    */
718       Iop_PRem1C3210F64, /* C3210 flags resulting from FPREM1, :: I32 */
719       Iop_ScaleF64,      /* FSCALE,  arg1 * (2^RoundTowardsZero(arg2)) */
720       /* Note that on x86 guest, PRem1{C3210} has the same behaviour
721          as the IEEE mandated RemF64, except it is limited in the
722          range of its operand.  Hence the partialness. */
723 
724       /* Unary ops, with rounding. */
725       /* :: IRRoundingMode(I32) x F64 -> F64 */
726       Iop_SinF64,    /* FSIN */
727       Iop_CosF64,    /* FCOS */
728       Iop_TanF64,    /* FTAN */
729       Iop_2xm1F64,   /* (2^arg - 1.0) */
730       Iop_RoundF128toInt, /* F128 value to nearest integral value (still
731                              as F128) */
732       Iop_RoundF64toInt, /* F64 value to nearest integral value (still
733                             as F64) */
734       Iop_RoundF32toInt, /* F32 value to nearest integral value (still
735                             as F32) */
736 
737       /* --- guest s390 specifics, not mandated by 754. --- */
738 
739       /* Fused multiply-add/sub */
740       /* :: IRRoundingMode(I32) x F32 x F32 x F32 -> F32
741             (computes arg2 * arg3 +/- arg4) */
742       Iop_MAddF32, Iop_MSubF32,
743 
744       /* --- guest ppc32/64 specifics, not mandated by 754. --- */
745 
746       /* Ternary operations, with rounding. */
747       /* Fused multiply-add/sub, with 112-bit intermediate
748          precision for ppc.
749          Also used to implement fused multiply-add/sub for s390. */
750       /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64
751             (computes arg2 * arg3 +/- arg4) */
752       Iop_MAddF64, Iop_MSubF64,
753 
754       /* Variants of the above which produce a 64-bit result but which
755          round their result to a IEEE float range first. */
756       /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 */
757       Iop_MAddF64r32, Iop_MSubF64r32,
758 
759       /* :: F64 -> F64 */
760       Iop_RSqrtEst5GoodF64, /* reciprocal square root estimate, 5 good bits */
761       Iop_RoundF64toF64_NEAREST, /* frin */
762       Iop_RoundF64toF64_NegINF,  /* frim */
763       Iop_RoundF64toF64_PosINF,  /* frip */
764       Iop_RoundF64toF64_ZERO,    /* friz */
765 
766       /* :: F64 -> F32 */
767       Iop_TruncF64asF32, /* do F64->F32 truncation as per 'fsts' */
768 
769       /* :: IRRoundingMode(I32) x F64 -> F64 */
770       Iop_RoundF64toF32, /* round F64 to nearest F32 value (still as F64) */
771       /* NB: pretty much the same as Iop_F64toF32, except no change
772          of type. */
773 
774       /* --- guest arm64 specifics, not mandated by 754. --- */
775 
776       Iop_RecpExpF64,  /* FRECPX d  :: IRRoundingMode(I32) x F64 -> F64 */
777       Iop_RecpExpF32,  /* FRECPX s  :: IRRoundingMode(I32) x F32 -> F32 */
778 
779       /* --------- Possibly required by IEEE 754-2008. --------- */
780 
781       Iop_MaxNumF64,  /* max, F64, numerical operand if other is a qNaN */
782       Iop_MinNumF64,  /* min, F64, ditto */
783       Iop_MaxNumF32,  /* max, F32, ditto */
784       Iop_MinNumF32,  /* min, F32, ditto */
785 
786       /* ------------------ 16-bit scalar FP ------------------ */
787 
788       Iop_F16toF64,  /*                       F16 -> F64 */
789       Iop_F64toF16,  /* IRRoundingMode(I32) x F64 -> F16 */
790 
791       Iop_F16toF32,  /*                       F16 -> F32 */
792       Iop_F32toF16,  /* IRRoundingMode(I32) x F32 -> F16 */
793 
794       /* ------------------ 32-bit SIMD Integer ------------------ */
795 
796       /* 32x1 saturating add/sub (ok, well, not really SIMD :) */
797       Iop_QAdd32S,
798       Iop_QSub32S,
799 
800       /* 16x2 add/sub, also signed/unsigned saturating variants */
801       Iop_Add16x2, Iop_Sub16x2,
802       Iop_QAdd16Sx2, Iop_QAdd16Ux2,
803       Iop_QSub16Sx2, Iop_QSub16Ux2,
804 
805       /* 16x2 signed/unsigned halving add/sub.  For each lane, these
806          compute bits 16:1 of (eg) sx(argL) + sx(argR),
807          or zx(argL) - zx(argR) etc. */
808       Iop_HAdd16Ux2, Iop_HAdd16Sx2,
809       Iop_HSub16Ux2, Iop_HSub16Sx2,
810 
811       /* 8x4 add/sub, also signed/unsigned saturating variants */
812       Iop_Add8x4, Iop_Sub8x4,
813       Iop_QAdd8Sx4, Iop_QAdd8Ux4,
814       Iop_QSub8Sx4, Iop_QSub8Ux4,
815 
816       /* 8x4 signed/unsigned halving add/sub.  For each lane, these
817          compute bits 8:1 of (eg) sx(argL) + sx(argR),
818          or zx(argL) - zx(argR) etc. */
819       Iop_HAdd8Ux4, Iop_HAdd8Sx4,
820       Iop_HSub8Ux4, Iop_HSub8Sx4,
821 
822       /* 8x4 sum of absolute unsigned differences. */
823       Iop_Sad8Ux4,
824 
825       /* MISC (vector integer cmp != 0) */
826       Iop_CmpNEZ16x2, Iop_CmpNEZ8x4,
827 
828       /* ------------------ 64-bit SIMD FP ------------------------ */
829 
830       /* Convertion to/from int */
831       Iop_I32UtoFx2,  Iop_I32StoFx2,    /* I32x4 -> F32x4 */
832       Iop_FtoI32Ux2_RZ,  Iop_FtoI32Sx2_RZ,    /* F32x4 -> I32x4 */
833       /* Fixed32 format is floating-point number with fixed number of fraction
834          bits. The number of fraction bits is passed as a second argument of
835          type I8. */
836       Iop_F32ToFixed32Ux2_RZ, Iop_F32ToFixed32Sx2_RZ, /* fp -> fixed-point */
837       Iop_Fixed32UToF32x2_RN, Iop_Fixed32SToF32x2_RN, /* fixed-point -> fp */
838 
839       /* Binary operations */
840       Iop_Max32Fx2,      Iop_Min32Fx2,
841       /* Pairwise Min and Max. See integer pairwise operations for more
842          details. */
843       Iop_PwMax32Fx2,    Iop_PwMin32Fx2,
844       /* Note: For the following compares, the arm front-end assumes a
845          nan in a lane of either argument returns zero for that lane. */
846       Iop_CmpEQ32Fx2, Iop_CmpGT32Fx2, Iop_CmpGE32Fx2,
847 
848       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
849       element in the operand vector, and places the results in the destination
850       vector.  */
851       Iop_RecipEst32Fx2,
852 
853       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
854          Note, that if one of the arguments is zero and another one is infinity
855          of arbitrary sign the result of the operation is 2.0. */
856       Iop_RecipStep32Fx2,
857 
858       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
859          square root of each element in the operand vector. */
860       Iop_RSqrtEst32Fx2,
861 
862       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
863          Note, that of one of the arguments is zero and another one is infiinty
864          of arbitrary sign the result of the operation is 1.5. */
865       Iop_RSqrtStep32Fx2,
866 
867       /* Unary */
868       Iop_Neg32Fx2, Iop_Abs32Fx2,
869 
870       /* ------------------ 64-bit SIMD Integer. ------------------ */
871 
872       /* MISC (vector integer cmp != 0) */
873       Iop_CmpNEZ8x8, Iop_CmpNEZ16x4, Iop_CmpNEZ32x2,
874 
875       /* ADDITION (normal / unsigned sat / signed sat) */
876       Iop_Add8x8,   Iop_Add16x4,   Iop_Add32x2,
877       Iop_QAdd8Ux8, Iop_QAdd16Ux4, Iop_QAdd32Ux2, Iop_QAdd64Ux1,
878       Iop_QAdd8Sx8, Iop_QAdd16Sx4, Iop_QAdd32Sx2, Iop_QAdd64Sx1,
879 
880       /* PAIRWISE operations */
881       /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
882             [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
883       Iop_PwAdd8x8,  Iop_PwAdd16x4,  Iop_PwAdd32x2,
884       Iop_PwMax8Sx8, Iop_PwMax16Sx4, Iop_PwMax32Sx2,
885       Iop_PwMax8Ux8, Iop_PwMax16Ux4, Iop_PwMax32Ux2,
886       Iop_PwMin8Sx8, Iop_PwMin16Sx4, Iop_PwMin32Sx2,
887       Iop_PwMin8Ux8, Iop_PwMin16Ux4, Iop_PwMin32Ux2,
888       /* Longening variant is unary. The resulting vector contains two times
889          less elements than operand, but they are two times wider.
890          Example:
891             Iop_PAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
892                where a+b and c+d are unsigned 32-bit values. */
893       Iop_PwAddL8Ux8, Iop_PwAddL16Ux4, Iop_PwAddL32Ux2,
894       Iop_PwAddL8Sx8, Iop_PwAddL16Sx4, Iop_PwAddL32Sx2,
895 
896       /* SUBTRACTION (normal / unsigned sat / signed sat) */
897       Iop_Sub8x8,   Iop_Sub16x4,   Iop_Sub32x2,
898       Iop_QSub8Ux8, Iop_QSub16Ux4, Iop_QSub32Ux2, Iop_QSub64Ux1,
899       Iop_QSub8Sx8, Iop_QSub16Sx4, Iop_QSub32Sx2, Iop_QSub64Sx1,
900 
901       /* ABSOLUTE VALUE */
902       Iop_Abs8x8, Iop_Abs16x4, Iop_Abs32x2,
903 
904       /* MULTIPLICATION (normal / high half of signed/unsigned / plynomial ) */
905       Iop_Mul8x8, Iop_Mul16x4, Iop_Mul32x2,
906       Iop_Mul32Fx2,
907       Iop_MulHi16Ux4,
908       Iop_MulHi16Sx4,
909       /* Plynomial multiplication treats it's arguments as coefficients of
910          polynoms over {0, 1}. */
911       Iop_PolynomialMul8x8,
912 
913       /* Vector Saturating Doubling Multiply Returning High Half and
914          Vector Saturating Rounding Doubling Multiply Returning High Half */
915       /* These IROp's multiply corresponding elements in two vectors, double
916          the results, and place the most significant half of the final results
917          in the destination vector. The results are truncated or rounded. If
918          any of the results overflow, they are saturated. */
919       Iop_QDMulHi16Sx4, Iop_QDMulHi32Sx2,
920       Iop_QRDMulHi16Sx4, Iop_QRDMulHi32Sx2,
921 
922       /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
923       Iop_Avg8Ux8,
924       Iop_Avg16Ux4,
925 
926       /* MIN/MAX */
927       Iop_Max8Sx8, Iop_Max16Sx4, Iop_Max32Sx2,
928       Iop_Max8Ux8, Iop_Max16Ux4, Iop_Max32Ux2,
929       Iop_Min8Sx8, Iop_Min16Sx4, Iop_Min32Sx2,
930       Iop_Min8Ux8, Iop_Min16Ux4, Iop_Min32Ux2,
931 
932       /* COMPARISON */
933       Iop_CmpEQ8x8,  Iop_CmpEQ16x4,  Iop_CmpEQ32x2,
934       Iop_CmpGT8Ux8, Iop_CmpGT16Ux4, Iop_CmpGT32Ux2,
935       Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2,
936 
937       /* COUNT ones / leading zeroes / leading sign bits (not including topmost
938          bit) */
939       Iop_Cnt8x8,
940       Iop_Clz8x8, Iop_Clz16x4, Iop_Clz32x2,
941       Iop_Cls8x8, Iop_Cls16x4, Iop_Cls32x2,
942       Iop_Clz64x2,
943 
944       /*Vector COUNT trailing zeros */
945       Iop_Ctz8x16, Iop_Ctz16x8, Iop_Ctz32x4, Iop_Ctz64x2,
946 
947       /* VECTOR x VECTOR SHIFT / ROTATE */
948       Iop_Shl8x8, Iop_Shl16x4, Iop_Shl32x2,
949       Iop_Shr8x8, Iop_Shr16x4, Iop_Shr32x2,
950       Iop_Sar8x8, Iop_Sar16x4, Iop_Sar32x2,
951       Iop_Sal8x8, Iop_Sal16x4, Iop_Sal32x2, Iop_Sal64x1,
952 
953       /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
954       Iop_ShlN8x8, Iop_ShlN16x4, Iop_ShlN32x2,
955       Iop_ShrN8x8, Iop_ShrN16x4, Iop_ShrN32x2,
956       Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2,
957 
958       /* VECTOR x VECTOR SATURATING SHIFT */
959       Iop_QShl8x8, Iop_QShl16x4, Iop_QShl32x2, Iop_QShl64x1,
960       Iop_QSal8x8, Iop_QSal16x4, Iop_QSal32x2, Iop_QSal64x1,
961       /* VECTOR x INTEGER SATURATING SHIFT */
962       Iop_QShlNsatSU8x8,  Iop_QShlNsatSU16x4,
963       Iop_QShlNsatSU32x2, Iop_QShlNsatSU64x1,
964       Iop_QShlNsatUU8x8,  Iop_QShlNsatUU16x4,
965       Iop_QShlNsatUU32x2, Iop_QShlNsatUU64x1,
966       Iop_QShlNsatSS8x8,  Iop_QShlNsatSS16x4,
967       Iop_QShlNsatSS32x2, Iop_QShlNsatSS64x1,
968 
969       /* NARROWING (binary)
970          -- narrow 2xI64 into 1xI64, hi half from left arg */
971       /* For saturated narrowing, I believe there are 4 variants of
972          the basic arithmetic operation, depending on the signedness
973          of argument and result.  Here are examples that exemplify
974          what I mean:
975 
976          QNarrow16Uto8U ( UShort x )  if (x >u 255) x = 255;
977                                       return x[7:0];
978 
979          QNarrow16Sto8S ( Short x )   if (x <s -128) x = -128;
980                                       if (x >s  127) x = 127;
981                                       return x[7:0];
982 
983          QNarrow16Uto8S ( UShort x )  if (x >u 127) x = 127;
984                                       return x[7:0];
985 
986          QNarrow16Sto8U ( Short x )   if (x <s 0)   x = 0;
987                                       if (x >s 255) x = 255;
988                                       return x[7:0];
989       */
990       Iop_QNarrowBin16Sto8Ux8,
991       Iop_QNarrowBin16Sto8Sx8, Iop_QNarrowBin32Sto16Sx4,
992       Iop_NarrowBin16to8x8,    Iop_NarrowBin32to16x4,
993 
994       /* INTERLEAVING */
995       /* Interleave lanes from low or high halves of
996          operands.  Most-significant result lane is from the left
997          arg. */
998       Iop_InterleaveHI8x8, Iop_InterleaveHI16x4, Iop_InterleaveHI32x2,
999       Iop_InterleaveLO8x8, Iop_InterleaveLO16x4, Iop_InterleaveLO32x2,
1000       /* Interleave odd/even lanes of operands.  Most-significant result lane
1001          is from the left arg.  Note that Interleave{Odd,Even}Lanes32x2 are
1002          identical to Interleave{HI,LO}32x2 and so are omitted.*/
1003       Iop_InterleaveOddLanes8x8, Iop_InterleaveEvenLanes8x8,
1004       Iop_InterleaveOddLanes16x4, Iop_InterleaveEvenLanes16x4,
1005 
1006       /* CONCATENATION -- build a new value by concatenating either
1007          the even or odd lanes of both operands.  Note that
1008          Cat{Odd,Even}Lanes32x2 are identical to Interleave{HI,LO}32x2
1009          and so are omitted. */
1010       Iop_CatOddLanes8x8, Iop_CatOddLanes16x4,
1011       Iop_CatEvenLanes8x8, Iop_CatEvenLanes16x4,
1012 
1013       /* GET / SET elements of VECTOR
1014          GET is binop (I64, I8) -> I<elem_size>
1015          SET is triop (I64, I8, I<elem_size>) -> I64 */
1016       /* Note: the arm back-end handles only constant second argument */
1017       Iop_GetElem8x8, Iop_GetElem16x4, Iop_GetElem32x2,
1018       Iop_SetElem8x8, Iop_SetElem16x4, Iop_SetElem32x2,
1019 
1020       /* DUPLICATING -- copy value to all lanes */
1021       Iop_Dup8x8,   Iop_Dup16x4,   Iop_Dup32x2,
1022 
1023       /* SLICE -- produces the lowest 64 bits of (arg1:arg2) >> (8 * arg3).
1024          arg3 is a shift amount in bytes and may be between 0 and 8
1025          inclusive.  When 0, the result is arg2; when 8, the result is arg1.
1026          Not all back ends handle all values.  The arm32 and arm64 back
1027          ends handle only immediate arg3 values. */
1028       Iop_Slice64,  // (I64, I64, I8) -> I64
1029 
1030       /* REVERSE the order of chunks in vector lanes.  Chunks must be
1031          smaller than the vector lanes (obviously) and so may be 8-,
1032          16- and 32-bit in size. */
1033       /* Examples:
1034             Reverse8sIn16_x4([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g]
1035             Reverse8sIn32_x2([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e]
1036             Reverse8sIn64_x1([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */
1037       Iop_Reverse8sIn16_x4,
1038       Iop_Reverse8sIn32_x2, Iop_Reverse16sIn32_x2,
1039       Iop_Reverse8sIn64_x1, Iop_Reverse16sIn64_x1, Iop_Reverse32sIn64_x1,
1040 
1041       /* PERMUTING -- copy src bytes to dst,
1042          as indexed by control vector bytes:
1043             for i in 0 .. 7 . result[i] = argL[ argR[i] ]
1044          argR[i] values may only be in the range 0 .. 7, else behaviour
1045          is undefined. */
1046       Iop_Perm8x8,
1047 
1048       /* MISC CONVERSION -- get high bits of each byte lane, a la
1049          x86/amd64 pmovmskb */
1050       Iop_GetMSBs8x8, /* I64 -> I8 */
1051 
1052       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1053          See floating-point equivalents for details. */
1054       Iop_RecipEst32Ux2, Iop_RSqrtEst32Ux2,
1055 
1056       /* ------------------ Decimal Floating Point ------------------ */
1057 
1058       /* ARITHMETIC INSTRUCTIONS   64-bit
1059 	 ----------------------------------
1060 	 IRRoundingMode(I32) X D64 X D64 -> D64
1061       */
1062       Iop_AddD64, Iop_SubD64, Iop_MulD64, Iop_DivD64,
1063 
1064       /* ARITHMETIC INSTRUCTIONS  128-bit
1065 	 ----------------------------------
1066 	 IRRoundingMode(I32) X D128 X D128 -> D128
1067       */
1068       Iop_AddD128, Iop_SubD128, Iop_MulD128, Iop_DivD128,
1069 
1070       /* SHIFT SIGNIFICAND INSTRUCTIONS
1071        *    The DFP significand is shifted by the number of digits specified
1072        *    by the U8 operand.  Digits shifted out of the leftmost digit are
1073        *    lost. Zeros are supplied to the vacated positions on the right.
1074        *    The sign of the result is the same as the sign of the original
1075        *    operand.
1076        *
1077        * D64 x U8  -> D64    left shift and right shift respectively */
1078       Iop_ShlD64, Iop_ShrD64,
1079 
1080       /* D128 x U8  -> D128  left shift and right shift respectively */
1081       Iop_ShlD128, Iop_ShrD128,
1082 
1083 
1084       /* FORMAT CONVERSION INSTRUCTIONS
1085        *   D32 -> D64
1086        */
1087       Iop_D32toD64,
1088 
1089       /*   D64 -> D128 */
1090       Iop_D64toD128,
1091 
1092       /*   I32S -> D128 */
1093       Iop_I32StoD128,
1094 
1095       /*   I32U -> D128 */
1096       Iop_I32UtoD128,
1097 
1098       /*   I64S -> D128 */
1099       Iop_I64StoD128,
1100 
1101       /*   I64U -> D128 */
1102       Iop_I64UtoD128,
1103 
1104       /*   IRRoundingMode(I32) x D64 -> D32 */
1105       Iop_D64toD32,
1106 
1107       /*   IRRoundingMode(I32) x D128 -> D64 */
1108       Iop_D128toD64,
1109 
1110       /*   I32S -> D64 */
1111       Iop_I32StoD64,
1112 
1113       /*   I32U -> D64 */
1114       Iop_I32UtoD64,
1115 
1116       /*   IRRoundingMode(I32) x I64 -> D64 */
1117       Iop_I64StoD64,
1118 
1119       /*   IRRoundingMode(I32) x I64 -> D64 */
1120       Iop_I64UtoD64,
1121 
1122       /*   IRRoundingMode(I32) x D64 -> I32 */
1123       Iop_D64toI32S,
1124 
1125       /*   IRRoundingMode(I32) x D64 -> I32 */
1126       Iop_D64toI32U,
1127 
1128       /*   IRRoundingMode(I32) x D64 -> I64 */
1129       Iop_D64toI64S,
1130 
1131       /*   IRRoundingMode(I32) x D64 -> I64 */
1132       Iop_D64toI64U,
1133 
1134       /*   IRRoundingMode(I32) x D128 -> I32 */
1135       Iop_D128toI32S,
1136 
1137       /*   IRRoundingMode(I32) x D128 -> I32 */
1138       Iop_D128toI32U,
1139 
1140       /*   IRRoundingMode(I32) x D128 -> I64 */
1141       Iop_D128toI64S,
1142 
1143       /*   IRRoundingMode(I32) x D128 -> I64 */
1144       Iop_D128toI64U,
1145 
1146       /*   IRRoundingMode(I32) x F32 -> D32 */
1147       Iop_F32toD32,
1148 
1149       /*   IRRoundingMode(I32) x F32 -> D64 */
1150       Iop_F32toD64,
1151 
1152       /*   IRRoundingMode(I32) x F32 -> D128 */
1153       Iop_F32toD128,
1154 
1155       /*   IRRoundingMode(I32) x F64 -> D32 */
1156       Iop_F64toD32,
1157 
1158       /*   IRRoundingMode(I32) x F64 -> D64 */
1159       Iop_F64toD64,
1160 
1161       /*   IRRoundingMode(I32) x F64 -> D128 */
1162       Iop_F64toD128,
1163 
1164       /*   IRRoundingMode(I32) x F128 -> D32 */
1165       Iop_F128toD32,
1166 
1167       /*   IRRoundingMode(I32) x F128 -> D64 */
1168       Iop_F128toD64,
1169 
1170       /*   IRRoundingMode(I32) x F128 -> D128 */
1171       Iop_F128toD128,
1172 
1173       /*   IRRoundingMode(I32) x D32 -> F32 */
1174       Iop_D32toF32,
1175 
1176       /*   IRRoundingMode(I32) x D32 -> F64 */
1177       Iop_D32toF64,
1178 
1179       /*   IRRoundingMode(I32) x D32 -> F128 */
1180       Iop_D32toF128,
1181 
1182       /*   IRRoundingMode(I32) x D64 -> F32 */
1183       Iop_D64toF32,
1184 
1185       /*   IRRoundingMode(I32) x D64 -> F64 */
1186       Iop_D64toF64,
1187 
1188       /*   IRRoundingMode(I32) x D64 -> F128 */
1189       Iop_D64toF128,
1190 
1191       /*   IRRoundingMode(I32) x D128 -> F32 */
1192       Iop_D128toF32,
1193 
1194       /*   IRRoundingMode(I32) x D128 -> F64 */
1195       Iop_D128toF64,
1196 
1197       /*   IRRoundingMode(I32) x D128 -> F128 */
1198       Iop_D128toF128,
1199 
1200       /* ROUNDING INSTRUCTIONS
1201        * IRRoundingMode(I32) x D64 -> D64
1202        * The D64 operand, if a finite number, it is rounded to a
1203        * floating point integer value, i.e. no fractional part.
1204        */
1205       Iop_RoundD64toInt,
1206 
1207       /* IRRoundingMode(I32) x D128 -> D128 */
1208       Iop_RoundD128toInt,
1209 
1210       /* COMPARE INSTRUCTIONS
1211        * D64 x D64 -> IRCmpD64Result(I32) */
1212       Iop_CmpD64,
1213 
1214       /* D128 x D128 -> IRCmpD128Result(I32) */
1215       Iop_CmpD128,
1216 
1217       /* COMPARE BIASED EXPONENET INSTRUCTIONS
1218        * D64 x D64 -> IRCmpD64Result(I32) */
1219       Iop_CmpExpD64,
1220 
1221       /* D128 x D128 -> IRCmpD128Result(I32) */
1222       Iop_CmpExpD128,
1223 
1224       /* QUANTIZE AND ROUND INSTRUCTIONS
1225        * The source operand is converted and rounded to the form with the
1226        * immediate exponent specified by the rounding and exponent parameter.
1227        *
1228        * The second operand is converted and rounded to the form
1229        * of the first operand's exponent and the rounded based on the specified
1230        * rounding mode parameter.
1231        *
1232        * IRRoundingMode(I32) x D64 x D64-> D64 */
1233       Iop_QuantizeD64,
1234 
1235       /* IRRoundingMode(I32) x D128 x D128 -> D128 */
1236       Iop_QuantizeD128,
1237 
1238       /* IRRoundingMode(I32) x I8 x D64 -> D64
1239        *    The Decimal Floating point operand is rounded to the requested
1240        *    significance given by the I8 operand as specified by the rounding
1241        *    mode.
1242        */
1243       Iop_SignificanceRoundD64,
1244 
1245       /* IRRoundingMode(I32) x I8 x D128 -> D128 */
1246       Iop_SignificanceRoundD128,
1247 
1248       /* EXTRACT AND INSERT INSTRUCTIONS
1249        * D64 -> I64
1250        *    The exponent of the D32 or D64 operand is extracted.  The
1251        *    extracted exponent is converted to a 64-bit signed binary integer.
1252        */
1253       Iop_ExtractExpD64,
1254 
1255       /* D128 -> I64 */
1256       Iop_ExtractExpD128,
1257 
1258       /* D64 -> I64
1259        * The number of significand digits of the D64 operand is extracted.
1260        * The number is stored as a 64-bit signed binary integer.
1261        */
1262       Iop_ExtractSigD64,
1263 
1264       /* D128 -> I64 */
1265       Iop_ExtractSigD128,
1266 
1267       /* I64 x D64  -> D64
1268        *    The exponent is specified by the first I64 operand the signed
1269        *    significand is given by the second I64 value.  The result is a D64
1270        *    value consisting of the specified significand and exponent whose
1271        *    sign is that of the specified significand.
1272        */
1273       Iop_InsertExpD64,
1274 
1275       /* I64 x D128 -> D128 */
1276       Iop_InsertExpD128,
1277 
1278       /* Support for 128-bit DFP type */
1279       Iop_D64HLtoD128, Iop_D128HItoD64, Iop_D128LOtoD64,
1280 
1281       /*  I64 -> I64
1282        *     Convert 50-bit densely packed BCD string to 60 bit BCD string
1283        */
1284       Iop_DPBtoBCD,
1285 
1286       /* I64 -> I64
1287        *     Convert 60 bit BCD string to 50-bit densely packed BCD string
1288        */
1289       Iop_BCDtoDPB,
1290 
1291       /* BCD arithmetic instructions, (V128, V128) -> V128
1292        * The BCD format is the same as that used in the BCD<->DPB conversion
1293        * routines, except using 124 digits (vs 60) plus the trailing 4-bit
1294        * signed code. */
1295       Iop_BCDAdd, Iop_BCDSub,
1296 
1297       /* Conversion signed 128-bit integer to signed BCD 128-bit */
1298       Iop_I128StoBCD128,
1299 
1300       /* Conversion signed BCD 128-bit to 128-bit integer */
1301       Iop_BCD128toI128S,
1302 
1303       /* Conversion I64 -> D64 */
1304       Iop_ReinterpI64asD64,
1305 
1306       /* Conversion D64 -> I64 */
1307       Iop_ReinterpD64asI64,
1308 
1309       /* ------------------ 128-bit SIMD FP. ------------------ */
1310 
1311       /* --- 32x4 vector FP --- */
1312 
1313       /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1314       Iop_Add32Fx4, Iop_Sub32Fx4, Iop_Mul32Fx4, Iop_Div32Fx4,
1315 
1316       /* binary */
1317       Iop_Max32Fx4, Iop_Min32Fx4,
1318       Iop_Add32Fx2, Iop_Sub32Fx2,
1319       /* Note: For the following compares, the ppc and arm front-ends assume a
1320          nan in a lane of either argument returns zero for that lane. */
1321       Iop_CmpEQ32Fx4, Iop_CmpLT32Fx4, Iop_CmpLE32Fx4, Iop_CmpUN32Fx4,
1322       Iop_CmpGT32Fx4, Iop_CmpGE32Fx4,
1323 
1324       /* Pairwise Max and Min. See integer pairwise operations for details. */
1325       Iop_PwMax32Fx4, Iop_PwMin32Fx4,
1326 
1327       /* unary */
1328       Iop_Abs32Fx4,
1329       Iop_Neg32Fx4,
1330 
1331       /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1332       Iop_Sqrt32Fx4,
1333 
1334       /* Vector Reciprocal Estimate finds an approximate reciprocal of each
1335          element in the operand vector, and places the results in the
1336          destination vector.  */
1337       Iop_RecipEst32Fx4,
1338 
1339       /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
1340          Note, that if one of the arguments is zero and another one is infinity
1341          of arbitrary sign the result of the operation is 2.0. */
1342       Iop_RecipStep32Fx4,
1343 
1344       /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
1345          square root of each element in the operand vector. */
1346       Iop_RSqrtEst32Fx4,
1347 
1348       /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
1349          Note, that of one of the arguments is zero and another one is infiinty
1350          of arbitrary sign the result of the operation is 1.5. */
1351       Iop_RSqrtStep32Fx4,
1352 
1353       /* --- Int to/from FP conversion --- */
1354       /* Unlike the standard fp conversions, these irops take no
1355          rounding mode argument. Instead the irop trailers _R{M,P,N,Z}
1356          indicate the mode: {-inf, +inf, nearest, zero} respectively. */
1357       Iop_I32UtoFx4,     Iop_I32StoFx4,       /* I32x4 -> F32x4       */
1358       Iop_FtoI32Ux4_RZ,  Iop_FtoI32Sx4_RZ,    /* F32x4 -> I32x4       */
1359       Iop_QFtoI32Ux4_RZ, Iop_QFtoI32Sx4_RZ,   /* F32x4 -> I32x4 (saturating) */
1360       Iop_RoundF32x4_RM, Iop_RoundF32x4_RP,   /* round to fp integer  */
1361       Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ,   /* round to fp integer  */
1362       /* Fixed32 format is floating-point number with fixed number of fraction
1363          bits. The number of fraction bits is passed as a second argument of
1364          type I8. */
1365       Iop_F32ToFixed32Ux4_RZ, Iop_F32ToFixed32Sx4_RZ, /* fp -> fixed-point */
1366       Iop_Fixed32UToF32x4_RN, Iop_Fixed32SToF32x4_RN, /* fixed-point -> fp */
1367 
1368       /* --- Single to/from half conversion --- */
1369       /* FIXME: what kind of rounding in F32x4 -> F16x4 case? */
1370       Iop_F32toF16x4, Iop_F16toF32x4,         /* F32x4 <-> F16x4      */
1371 
1372       /* -- Double to/from half conversion -- */
1373       Iop_F64toF16x2, Iop_F16toF64x2,
1374 
1375       /* --- 32x4 lowest-lane-only scalar FP --- */
1376 
1377       /* In binary cases, upper 3/4 is copied from first operand.  In
1378          unary cases, upper 3/4 is copied from the operand. */
1379 
1380       /* binary */
1381       Iop_Add32F0x4, Iop_Sub32F0x4, Iop_Mul32F0x4, Iop_Div32F0x4,
1382       Iop_Max32F0x4, Iop_Min32F0x4,
1383       Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4,
1384 
1385       /* unary */
1386       Iop_RecipEst32F0x4, Iop_Sqrt32F0x4, Iop_RSqrtEst32F0x4,
1387 
1388       /* --- 64x2 vector FP --- */
1389 
1390       /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1391       Iop_Add64Fx2, Iop_Sub64Fx2, Iop_Mul64Fx2, Iop_Div64Fx2,
1392 
1393       /* binary */
1394       Iop_Max64Fx2, Iop_Min64Fx2,
1395       Iop_CmpEQ64Fx2, Iop_CmpLT64Fx2, Iop_CmpLE64Fx2, Iop_CmpUN64Fx2,
1396 
1397       /* unary */
1398       Iop_Abs64Fx2,
1399       Iop_Neg64Fx2,
1400 
1401       /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1402       Iop_Sqrt64Fx2,
1403 
1404       /* see 32Fx4 variants for description */
1405       Iop_RecipEst64Fx2,    // unary
1406       Iop_RecipStep64Fx2,   // binary
1407       Iop_RSqrtEst64Fx2,    // unary
1408       Iop_RSqrtStep64Fx2,   // binary
1409 
1410       /* --- 64x2 lowest-lane-only scalar FP --- */
1411 
1412       /* In binary cases, upper half is copied from first operand.  In
1413          unary cases, upper half is copied from the operand. */
1414 
1415       /* binary */
1416       Iop_Add64F0x2, Iop_Sub64F0x2, Iop_Mul64F0x2, Iop_Div64F0x2,
1417       Iop_Max64F0x2, Iop_Min64F0x2,
1418       Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2,
1419 
1420       /* unary */
1421       Iop_Sqrt64F0x2,
1422 
1423       /* --- pack / unpack --- */
1424 
1425       /* 64 <-> 128 bit vector */
1426       Iop_V128to64,     // :: V128 -> I64, low half
1427       Iop_V128HIto64,   // :: V128 -> I64, high half
1428       Iop_64HLtoV128,   // :: (I64,I64) -> V128
1429 
1430       Iop_64UtoV128,
1431       Iop_SetV128lo64,
1432 
1433       /* Copies lower 64/32/16/8 bits, zeroes out the rest. */
1434       Iop_ZeroHI64ofV128,    // :: V128 -> V128
1435       Iop_ZeroHI96ofV128,    // :: V128 -> V128
1436       Iop_ZeroHI112ofV128,   // :: V128 -> V128
1437       Iop_ZeroHI120ofV128,   // :: V128 -> V128
1438 
1439       /* 32 <-> 128 bit vector */
1440       Iop_32UtoV128,
1441       Iop_V128to32,     // :: V128 -> I32, lowest lane
1442       Iop_SetV128lo32,  // :: (V128,I32) -> V128
1443 
1444       /* ------------------ 128-bit SIMD Integer. ------------------ */
1445 
1446       /* BITWISE OPS */
1447       Iop_NotV128,
1448       Iop_AndV128, Iop_OrV128, Iop_XorV128,
1449 
1450       /* VECTOR SHIFT (shift amt :: Ity_I8) */
1451       Iop_ShlV128, Iop_ShrV128,
1452 
1453       /* MISC (vector integer cmp != 0) */
1454       Iop_CmpNEZ8x16, Iop_CmpNEZ16x8, Iop_CmpNEZ32x4, Iop_CmpNEZ64x2,
1455 
1456       /* ADDITION (normal / U->U sat / S->S sat) */
1457       Iop_Add8x16,    Iop_Add16x8,    Iop_Add32x4,    Iop_Add64x2,
1458       Iop_QAdd8Ux16,  Iop_QAdd16Ux8,  Iop_QAdd32Ux4,  Iop_QAdd64Ux2,
1459       Iop_QAdd8Sx16,  Iop_QAdd16Sx8,  Iop_QAdd32Sx4,  Iop_QAdd64Sx2,
1460 
1461       /* ADDITION, ARM64 specific saturating variants. */
1462       /* Unsigned widen left arg, signed widen right arg, add, saturate S->S.
1463          This corresponds to SUQADD. */
1464       Iop_QAddExtUSsatSS8x16, Iop_QAddExtUSsatSS16x8,
1465       Iop_QAddExtUSsatSS32x4, Iop_QAddExtUSsatSS64x2,
1466       /* Signed widen left arg, unsigned widen right arg, add, saturate U->U.
1467          This corresponds to USQADD. */
1468       Iop_QAddExtSUsatUU8x16, Iop_QAddExtSUsatUU16x8,
1469       Iop_QAddExtSUsatUU32x4, Iop_QAddExtSUsatUU64x2,
1470 
1471       /* SUBTRACTION (normal / unsigned sat / signed sat) */
1472       Iop_Sub8x16,   Iop_Sub16x8,   Iop_Sub32x4,   Iop_Sub64x2,
1473       Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2,
1474       Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2,
1475 
1476       /* MULTIPLICATION (normal / high half of signed/unsigned) */
1477       Iop_Mul8x16,  Iop_Mul16x8,    Iop_Mul32x4,
1478                     Iop_MulHi16Ux8, Iop_MulHi32Ux4,
1479                     Iop_MulHi16Sx8, Iop_MulHi32Sx4,
1480       /* (widening signed/unsigned of even lanes, with lowest lane=zero) */
1481       Iop_MullEven8Ux16, Iop_MullEven16Ux8, Iop_MullEven32Ux4,
1482       Iop_MullEven8Sx16, Iop_MullEven16Sx8, Iop_MullEven32Sx4,
1483 
1484       /* Widening multiplies, all of the form (I64, I64) -> V128 */
1485       Iop_Mull8Ux8, Iop_Mull8Sx8,
1486       Iop_Mull16Ux4, Iop_Mull16Sx4,
1487       Iop_Mull32Ux2, Iop_Mull32Sx2,
1488 
1489       /* Signed doubling saturating widening multiplies, (I64, I64) -> V128 */
1490       Iop_QDMull16Sx4, Iop_QDMull32Sx2,
1491 
1492       /* Vector Saturating Doubling Multiply Returning High Half and
1493          Vector Saturating Rounding Doubling Multiply Returning High Half.
1494          These IROps multiply corresponding elements in two vectors, double
1495          the results, and place the most significant half of the final results
1496          in the destination vector.  The results are truncated or rounded.  If
1497          any of the results overflow, they are saturated.  To be more precise,
1498          for each lane, the computed result is:
1499            QDMulHi:
1500              hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2 )
1501            QRDMulHi:
1502              hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2
1503                       +q (1 << (lane-width-in-bits - 1)) )
1504       */
1505       Iop_QDMulHi16Sx8,  Iop_QDMulHi32Sx4,  /* (V128, V128) -> V128 */
1506       Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, /* (V128, V128) -> V128 */
1507 
1508       /* Polynomial multiplication treats its arguments as
1509          coefficients of polynomials over {0, 1}. */
1510       Iop_PolynomialMul8x16, /* (V128, V128) -> V128 */
1511       Iop_PolynomialMull8x8, /*   (I64, I64) -> V128 */
1512 
1513       /* Vector Polynomial multiplication add.   (V128, V128) -> V128
1514 
1515        *** Below is the algorithm for the instructions. These Iops could
1516            be emulated to get this functionality, but the emulation would
1517            be long and messy.
1518 
1519         Example for polynomial multiply add for vector of bytes
1520         do i = 0 to 15
1521             prod[i].bit[0:14] <- 0
1522             srcA <- VR[argL].byte[i]
1523             srcB <- VR[argR].byte[i]
1524             do j = 0 to 7
1525                 do k = 0 to j
1526                     gbit <- srcA.bit[k] & srcB.bit[j-k]
1527                     prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1528                 end
1529             end
1530 
1531             do j = 8 to 14
1532                 do k = j-7 to 7
1533                      gbit <- (srcA.bit[k] & srcB.bit[j-k])
1534                      prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1535                 end
1536             end
1537         end
1538 
1539         do i = 0 to 7
1540             VR[dst].hword[i] <- 0b0 || (prod[2×i] ^ prod[2×i+1])
1541         end
1542       */
1543       Iop_PolynomialMulAdd8x16, Iop_PolynomialMulAdd16x8,
1544       Iop_PolynomialMulAdd32x4, Iop_PolynomialMulAdd64x2,
1545 
1546       /* PAIRWISE operations */
1547       /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
1548             [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
1549       Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4,
1550       Iop_PwAdd32Fx2,
1551       /* Longening variant is unary. The resulting vector contains two times
1552          less elements than operand, but they are two times wider.
1553          Example:
1554             Iop_PwAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
1555                where a+b and c+d are unsigned 32-bit values. */
1556       Iop_PwAddL8Ux16, Iop_PwAddL16Ux8, Iop_PwAddL32Ux4,
1557       Iop_PwAddL8Sx16, Iop_PwAddL16Sx8, Iop_PwAddL32Sx4,
1558 
1559       /* Other unary pairwise ops */
1560 
1561       /* Vector bit matrix transpose.  (V128) -> V128 */
1562       /* For each doubleword element of the source vector, an 8-bit x 8-bit
1563        * matrix transpose is performed. */
1564       Iop_PwBitMtxXpose64x2,
1565 
1566       /* ABSOLUTE VALUE */
1567       Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, Iop_Abs64x2,
1568 
1569       /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
1570       Iop_Avg8Ux16, Iop_Avg16Ux8, Iop_Avg32Ux4,
1571       Iop_Avg8Sx16, Iop_Avg16Sx8, Iop_Avg32Sx4,
1572 
1573       /* MIN/MAX */
1574       Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2,
1575       Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2,
1576       Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2,
1577       Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2,
1578 
1579       /* COMPARISON */
1580       Iop_CmpEQ8x16,  Iop_CmpEQ16x8,  Iop_CmpEQ32x4,  Iop_CmpEQ64x2,
1581       Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2,
1582       Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2,
1583 
1584       /* COUNT ones / leading zeroes / leading sign bits (not including topmost
1585          bit) */
1586       Iop_Cnt8x16,
1587       Iop_Clz8x16, Iop_Clz16x8, Iop_Clz32x4,
1588       Iop_Cls8x16, Iop_Cls16x8, Iop_Cls32x4,
1589 
1590       /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
1591       Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
1592       Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
1593       Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2,
1594 
1595       /* VECTOR x VECTOR SHIFT / ROTATE */
1596       /* FIXME: I'm pretty sure the ARM32 front/back ends interpret these
1597          differently from all other targets.  The intention is that
1598          the shift amount (2nd arg) is interpreted as unsigned and
1599          only the lowest log2(lane-bits) bits are relevant.  But the
1600          ARM32 versions treat the shift amount as an 8 bit signed
1601          number.  The ARM32 uses should be replaced by the relevant
1602          vector x vector bidirectional shifts instead. */
1603       Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4, Iop_Shl64x2,
1604       Iop_Shr8x16, Iop_Shr16x8, Iop_Shr32x4, Iop_Shr64x2,
1605       Iop_Sar8x16, Iop_Sar16x8, Iop_Sar32x4, Iop_Sar64x2,
1606       Iop_Sal8x16, Iop_Sal16x8, Iop_Sal32x4, Iop_Sal64x2,
1607       Iop_Rol8x16, Iop_Rol16x8, Iop_Rol32x4, Iop_Rol64x2,
1608 
1609       /* VECTOR x VECTOR SATURATING SHIFT */
1610       Iop_QShl8x16, Iop_QShl16x8, Iop_QShl32x4, Iop_QShl64x2,
1611       Iop_QSal8x16, Iop_QSal16x8, Iop_QSal32x4, Iop_QSal64x2,
1612       /* VECTOR x INTEGER SATURATING SHIFT */
1613       Iop_QShlNsatSU8x16, Iop_QShlNsatSU16x8,
1614       Iop_QShlNsatSU32x4, Iop_QShlNsatSU64x2,
1615       Iop_QShlNsatUU8x16, Iop_QShlNsatUU16x8,
1616       Iop_QShlNsatUU32x4, Iop_QShlNsatUU64x2,
1617       Iop_QShlNsatSS8x16, Iop_QShlNsatSS16x8,
1618       Iop_QShlNsatSS32x4, Iop_QShlNsatSS64x2,
1619 
1620       /* VECTOR x VECTOR BIDIRECTIONAL SATURATING (& MAYBE ROUNDING) SHIFT */
1621       /* All of type (V128, V128) -> V256. */
1622       /* The least significant 8 bits of each lane of the second
1623          operand are used as the shift amount, and interpreted signedly.
1624          Positive values mean a shift left, negative a shift right.  The
1625          result is signedly or unsignedly saturated.  There are also
1626          rounding variants, which add 2^(shift_amount-1) to the value before
1627          shifting, but only in the shift-right case.  Vacated positions
1628          are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
1629 
1630          These operations return 129 bits: one bit ("Q") indicating whether
1631          saturation occurred, and the shift result.  The result type is V256,
1632          of which the lower V128 is the shift result, and Q occupies the
1633          least significant bit of the upper V128.  All other bits of the
1634          upper V128 are zero. */
1635       // Unsigned saturation, no rounding
1636       Iop_QandUQsh8x16, Iop_QandUQsh16x8,
1637       Iop_QandUQsh32x4, Iop_QandUQsh64x2,
1638       // Signed saturation, no rounding
1639       Iop_QandSQsh8x16, Iop_QandSQsh16x8,
1640       Iop_QandSQsh32x4, Iop_QandSQsh64x2,
1641 
1642       // Unsigned saturation, rounding
1643       Iop_QandUQRsh8x16, Iop_QandUQRsh16x8,
1644       Iop_QandUQRsh32x4, Iop_QandUQRsh64x2,
1645       // Signed saturation, rounding
1646       Iop_QandSQRsh8x16, Iop_QandSQRsh16x8,
1647       Iop_QandSQRsh32x4, Iop_QandSQRsh64x2,
1648 
1649       /* VECTOR x VECTOR BIDIRECTIONAL (& MAYBE ROUNDING) SHIFT */
1650       /* All of type (V128, V128) -> V128 */
1651       /* The least significant 8 bits of each lane of the second
1652          operand are used as the shift amount, and interpreted signedly.
1653          Positive values mean a shift left, negative a shift right.
1654          There are also rounding variants, which add 2^(shift_amount-1)
1655          to the value before shifting, but only in the shift-right case.
1656 
1657          For left shifts, the vacated places are filled with zeroes.
1658          For right shifts, the vacated places are filled with zeroes
1659          for the U variants and sign bits for the S variants. */
1660       // Signed and unsigned, non-rounding
1661       Iop_Sh8Sx16, Iop_Sh16Sx8, Iop_Sh32Sx4, Iop_Sh64Sx2,
1662       Iop_Sh8Ux16, Iop_Sh16Ux8, Iop_Sh32Ux4, Iop_Sh64Ux2,
1663 
1664       // Signed and unsigned, rounding
1665       Iop_Rsh8Sx16, Iop_Rsh16Sx8, Iop_Rsh32Sx4, Iop_Rsh64Sx2,
1666       Iop_Rsh8Ux16, Iop_Rsh16Ux8, Iop_Rsh32Ux4, Iop_Rsh64Ux2,
1667 
1668       /* The least significant 8 bits of each lane of the second
1669          operand are used as the shift amount, and interpreted signedly.
1670          Positive values mean a shift left, negative a shift right.  The
1671          result is signedly or unsignedly saturated.  There are also
1672          rounding variants, which add 2^(shift_amount-1) to the value before
1673          shifting, but only in the shift-right case.  Vacated positions
1674          are filled with zeroes.  IOW, it's either SHR or SHL, but not SAR.
1675       */
1676 
1677       /* VECTOR x SCALAR SATURATING (& MAYBE ROUNDING) NARROWING SHIFT RIGHT */
1678       /* All of type (V128, I8) -> V128 */
1679       /* The first argument is shifted right, then narrowed to half the width
1680          by saturating it.  The second argument is a scalar shift amount that
1681          applies to all lanes, and must be a value in the range 1 to lane_width.
1682          The shift may be done signedly (Sar variants) or unsignedly (Shr
1683          variants).  The saturation is done according to the two signedness
1684          indicators at the end of the name.  For example 64Sto32U means a
1685          signed 64 bit value is saturated into an unsigned 32 bit value.
1686          Additionally, the QRS variants do rounding, that is, they add the
1687          value (1 << (shift_amount-1)) to each source lane before shifting.
1688 
1689          These operations return 65 bits: one bit ("Q") indicating whether
1690          saturation occurred, and the shift result.  The result type is V128,
1691          of which the lower half is the shift result, and Q occupies the
1692          least significant bit of the upper half.  All other bits of the
1693          upper half are zero. */
1694       // No rounding, sat U->U
1695       Iop_QandQShrNnarrow16Uto8Ux8,
1696       Iop_QandQShrNnarrow32Uto16Ux4, Iop_QandQShrNnarrow64Uto32Ux2,
1697       // No rounding, sat S->S
1698       Iop_QandQSarNnarrow16Sto8Sx8,
1699       Iop_QandQSarNnarrow32Sto16Sx4, Iop_QandQSarNnarrow64Sto32Sx2,
1700       // No rounding, sat S->U
1701       Iop_QandQSarNnarrow16Sto8Ux8,
1702       Iop_QandQSarNnarrow32Sto16Ux4, Iop_QandQSarNnarrow64Sto32Ux2,
1703 
1704       // Rounding, sat U->U
1705       Iop_QandQRShrNnarrow16Uto8Ux8,
1706       Iop_QandQRShrNnarrow32Uto16Ux4, Iop_QandQRShrNnarrow64Uto32Ux2,
1707       // Rounding, sat S->S
1708       Iop_QandQRSarNnarrow16Sto8Sx8,
1709       Iop_QandQRSarNnarrow32Sto16Sx4, Iop_QandQRSarNnarrow64Sto32Sx2,
1710       // Rounding, sat S->U
1711       Iop_QandQRSarNnarrow16Sto8Ux8,
1712       Iop_QandQRSarNnarrow32Sto16Ux4, Iop_QandQRSarNnarrow64Sto32Ux2,
1713 
1714       /* NARROWING (binary)
1715          -- narrow 2xV128 into 1xV128, hi half from left arg */
1716       /* See comments above w.r.t. U vs S issues in saturated narrowing. */
1717       Iop_QNarrowBin16Sto8Ux16, Iop_QNarrowBin32Sto16Ux8,
1718       Iop_QNarrowBin16Sto8Sx16, Iop_QNarrowBin32Sto16Sx8,
1719       Iop_QNarrowBin16Uto8Ux16, Iop_QNarrowBin32Uto16Ux8,
1720       Iop_NarrowBin16to8x16, Iop_NarrowBin32to16x8,
1721       Iop_QNarrowBin64Sto32Sx4, Iop_QNarrowBin64Uto32Ux4,
1722       Iop_NarrowBin64to32x4,
1723 
1724       /* NARROWING (unary) -- narrow V128 into I64 */
1725       Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4, Iop_NarrowUn64to32x2,
1726       /* Saturating narrowing from signed source to signed/unsigned
1727          destination */
1728       Iop_QNarrowUn16Sto8Sx8, Iop_QNarrowUn32Sto16Sx4, Iop_QNarrowUn64Sto32Sx2,
1729       Iop_QNarrowUn16Sto8Ux8, Iop_QNarrowUn32Sto16Ux4, Iop_QNarrowUn64Sto32Ux2,
1730       /* Saturating narrowing from unsigned source to unsigned destination */
1731       Iop_QNarrowUn16Uto8Ux8, Iop_QNarrowUn32Uto16Ux4, Iop_QNarrowUn64Uto32Ux2,
1732 
1733       /* WIDENING -- sign or zero extend each element of the argument
1734          vector to the twice original size.  The resulting vector consists of
1735          the same number of elements but each element and the vector itself
1736          are twice as wide.
1737          All operations are I64->V128.
1738          Example
1739             Iop_Widen32Sto64x2( [a, b] ) = [c, d]
1740                where c = Iop_32Sto64(a) and d = Iop_32Sto64(b) */
1741       Iop_Widen8Uto16x8, Iop_Widen16Uto32x4, Iop_Widen32Uto64x2,
1742       Iop_Widen8Sto16x8, Iop_Widen16Sto32x4, Iop_Widen32Sto64x2,
1743 
1744       /* INTERLEAVING */
1745       /* Interleave lanes from low or high halves of
1746          operands.  Most-significant result lane is from the left
1747          arg. */
1748       Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
1749       Iop_InterleaveHI32x4, Iop_InterleaveHI64x2,
1750       Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
1751       Iop_InterleaveLO32x4, Iop_InterleaveLO64x2,
1752       /* Interleave odd/even lanes of operands.  Most-significant result lane
1753          is from the left arg. */
1754       Iop_InterleaveOddLanes8x16, Iop_InterleaveEvenLanes8x16,
1755       Iop_InterleaveOddLanes16x8, Iop_InterleaveEvenLanes16x8,
1756       Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4,
1757 
1758       /* CONCATENATION -- build a new value by concatenating either
1759          the even or odd lanes of both operands.  Note that
1760          Cat{Odd,Even}Lanes64x2 are identical to Interleave{HI,LO}64x2
1761          and so are omitted. */
1762       Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4,
1763       Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4,
1764 
1765       /* GET elements of VECTOR
1766          GET is binop (V128, I8) -> I<elem_size> */
1767       /* Note: the arm back-end handles only constant second argument. */
1768       Iop_GetElem8x16, Iop_GetElem16x8, Iop_GetElem32x4, Iop_GetElem64x2,
1769 
1770       /* DUPLICATING -- copy value to all lanes */
1771       Iop_Dup8x16,   Iop_Dup16x8,   Iop_Dup32x4,
1772 
1773       /* SLICE -- produces the lowest 128 bits of (arg1:arg2) >> (8 * arg3).
1774          arg3 is a shift amount in bytes and may be between 0 and 16
1775          inclusive.  When 0, the result is arg2; when 16, the result is arg1.
1776          Not all back ends handle all values.  The arm64 back
1777          end handles only immediate arg3 values. */
1778       Iop_SliceV128,  // (V128, V128, I8) -> V128
1779 
1780       /* REVERSE the order of chunks in vector lanes.  Chunks must be
1781          smaller than the vector lanes (obviously) and so may be 8-,
1782          16- and 32-bit in size.  See definitions of 64-bit SIMD
1783          versions above for examples. */
1784       Iop_Reverse8sIn16_x8,
1785       Iop_Reverse8sIn32_x4, Iop_Reverse16sIn32_x4,
1786       Iop_Reverse8sIn64_x2, Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2,
1787       Iop_Reverse1sIn8_x16, /* Reverse bits in each byte lane. */
1788 
1789       /* PERMUTING -- copy src bytes to dst,
1790          as indexed by control vector bytes:
1791             for i in 0 .. 15 . result[i] = argL[ argR[i] ]
1792          argR[i] values may only be in the range 0 .. 15, else behaviour
1793          is undefined. */
1794       Iop_Perm8x16,
1795       Iop_Perm32x4, /* ditto, except argR values are restricted to 0 .. 3 */
1796 
1797       /* MISC CONVERSION -- get high bits of each byte lane, a la
1798          x86/amd64 pmovmskb */
1799       Iop_GetMSBs8x16, /* V128 -> I16 */
1800 
1801       /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1802          See floating-point equivalents for details. */
1803       Iop_RecipEst32Ux4, Iop_RSqrtEst32Ux4,
1804 
1805       /* 128-bit multipy by 10 instruction, result is lower 128-bits */
1806       Iop_MulI128by10,
1807 
1808       /* 128-bit multipy by 10 instruction, result is carry out from the MSB */
1809       Iop_MulI128by10Carry,
1810 
1811       /* 128-bit multipy by 10 instruction, result is lower 128-bits of the
1812        * source times 10 plus the carry in
1813        */
1814       Iop_MulI128by10E,
1815 
1816       /* 128-bit multipy by 10 instruction, result is carry out from the MSB
1817        * of the source times 10 plus the carry in
1818        */
1819       Iop_MulI128by10ECarry,
1820 
1821       /* ------------------ 256-bit SIMD Integer. ------------------ */
1822 
1823       /* Pack/unpack */
1824       Iop_V256to64_0,  // V256 -> I64, extract least significant lane
1825       Iop_V256to64_1,
1826       Iop_V256to64_2,
1827       Iop_V256to64_3,  // V256 -> I64, extract most significant lane
1828 
1829       Iop_64x4toV256,  // (I64,I64,I64,I64)->V256
1830                        // first arg is most significant lane
1831 
1832       Iop_V256toV128_0, // V256 -> V128, less significant lane
1833       Iop_V256toV128_1, // V256 -> V128, more significant lane
1834       Iop_V128HLtoV256, // (V128,V128)->V256, first arg is most signif
1835 
1836       Iop_AndV256,
1837       Iop_OrV256,
1838       Iop_XorV256,
1839       Iop_NotV256,
1840 
1841       /* MISC (vector integer cmp != 0) */
1842       Iop_CmpNEZ8x32, Iop_CmpNEZ16x16, Iop_CmpNEZ32x8, Iop_CmpNEZ64x4,
1843 
1844       Iop_Add8x32,    Iop_Add16x16,    Iop_Add32x8,    Iop_Add64x4,
1845       Iop_Sub8x32,    Iop_Sub16x16,    Iop_Sub32x8,    Iop_Sub64x4,
1846 
1847       Iop_CmpEQ8x32,  Iop_CmpEQ16x16,  Iop_CmpEQ32x8,  Iop_CmpEQ64x4,
1848       Iop_CmpGT8Sx32, Iop_CmpGT16Sx16, Iop_CmpGT32Sx8, Iop_CmpGT64Sx4,
1849 
1850       Iop_ShlN16x16, Iop_ShlN32x8, Iop_ShlN64x4,
1851       Iop_ShrN16x16, Iop_ShrN32x8, Iop_ShrN64x4,
1852       Iop_SarN16x16, Iop_SarN32x8,
1853 
1854       Iop_Max8Sx32, Iop_Max16Sx16, Iop_Max32Sx8,
1855       Iop_Max8Ux32, Iop_Max16Ux16, Iop_Max32Ux8,
1856       Iop_Min8Sx32, Iop_Min16Sx16, Iop_Min32Sx8,
1857       Iop_Min8Ux32, Iop_Min16Ux16, Iop_Min32Ux8,
1858 
1859       Iop_Mul16x16, Iop_Mul32x8,
1860       Iop_MulHi16Ux16, Iop_MulHi16Sx16,
1861 
1862       Iop_QAdd8Ux32, Iop_QAdd16Ux16,
1863       Iop_QAdd8Sx32, Iop_QAdd16Sx16,
1864       Iop_QSub8Ux32, Iop_QSub16Ux16,
1865       Iop_QSub8Sx32, Iop_QSub16Sx16,
1866 
1867       Iop_Avg8Ux32, Iop_Avg16Ux16,
1868 
1869       Iop_Perm32x8,
1870 
1871       /* (V128, V128) -> V128 */
1872       Iop_CipherV128, Iop_CipherLV128, Iop_CipherSV128,
1873       Iop_NCipherV128, Iop_NCipherLV128,
1874 
1875       /* Hash instructions, Federal Information Processing Standards
1876        * Publication 180-3 Secure Hash Standard. */
1877       /* (V128, I8) -> V128; The I8 input arg is (ST | SIX), where ST and
1878        * SIX are fields from the insn. See ISA 2.07 description of
1879        * vshasigmad and vshasigmaw insns.*/
1880       Iop_SHA512, Iop_SHA256,
1881 
1882       /* ------------------ 256-bit SIMD FP. ------------------ */
1883 
1884       /* ternary :: IRRoundingMode(I32) x V256 x V256 -> V256 */
1885       Iop_Add64Fx4, Iop_Sub64Fx4, Iop_Mul64Fx4, Iop_Div64Fx4,
1886       Iop_Add32Fx8, Iop_Sub32Fx8, Iop_Mul32Fx8, Iop_Div32Fx8,
1887 
1888       Iop_Sqrt32Fx8,
1889       Iop_Sqrt64Fx4,
1890       Iop_RSqrtEst32Fx8,
1891       Iop_RecipEst32Fx8,
1892 
1893       Iop_Max32Fx8, Iop_Min32Fx8,
1894       Iop_Max64Fx4, Iop_Min64Fx4,
1895       Iop_LAST      /* must be the last enumerator */
1896    }
1897    IROp;
1898 
1899 /* Pretty-print an op. */
1900 extern void ppIROp ( IROp );
1901 
1902 /* For a given operand return the types of its arguments and its result. */
1903 extern void typeOfPrimop ( IROp op,
1904                            /*OUTs*/ IRType* t_dst, IRType* t_arg1,
1905                            IRType* t_arg2, IRType* t_arg3, IRType* t_arg4 );
1906 
1907 /* Encoding of IEEE754-specified rounding modes.
1908    Note, various front and back ends rely on the actual numerical
1909    values of these, so do not change them. */
1910 typedef
1911    enum {
1912       Irrm_NEAREST              = 0,  // Round to nearest, ties to even
1913       Irrm_NegINF               = 1,  // Round to negative infinity
1914       Irrm_PosINF               = 2,  // Round to positive infinity
1915       Irrm_ZERO                 = 3,  // Round toward zero
1916       Irrm_NEAREST_TIE_AWAY_0   = 4,  // Round to nearest, ties away from 0
1917       Irrm_PREPARE_SHORTER      = 5,  // Round to prepare for shorter
1918                                       // precision
1919       Irrm_AWAY_FROM_ZERO       = 6,  // Round to away from 0
1920       Irrm_NEAREST_TIE_TOWARD_0 = 7   // Round to nearest, ties towards 0
1921    }
1922    IRRoundingMode;
1923 
1924 /* Binary floating point comparison result values.
1925    This is also derived from what IA32 does. */
1926 typedef
1927    enum {
1928       Ircr_UN = 0x45,
1929       Ircr_LT = 0x01,
1930       Ircr_GT = 0x00,
1931       Ircr_EQ = 0x40
1932    }
1933    IRCmpFResult;
1934 
1935 typedef IRCmpFResult IRCmpF32Result;
1936 typedef IRCmpFResult IRCmpF64Result;
1937 typedef IRCmpFResult IRCmpF128Result;
1938 
1939 /* Decimal floating point result values. */
1940 typedef IRCmpFResult IRCmpDResult;
1941 typedef IRCmpDResult IRCmpD64Result;
1942 typedef IRCmpDResult IRCmpD128Result;
1943 
1944 /* ------------------ Expressions ------------------ */
1945 
1946 typedef struct _IRQop   IRQop;   /* forward declaration */
1947 typedef struct _IRTriop IRTriop; /* forward declaration */
1948 
1949 
1950 /* The different kinds of expressions.  Their meaning is explained below
1951    in the comments for IRExpr. */
1952 typedef
1953    enum {
1954       Iex_Binder=0x1900,
1955       Iex_Get,
1956       Iex_GetI,
1957       Iex_RdTmp,
1958       Iex_Qop,
1959       Iex_Triop,
1960       Iex_Binop,
1961       Iex_Unop,
1962       Iex_Load,
1963       Iex_Const,
1964       Iex_ITE,
1965       Iex_CCall,
1966       Iex_VECRET,
1967       Iex_GSPTR
1968    }
1969    IRExprTag;
1970 
1971 /* An expression.  Stored as a tagged union.  'tag' indicates what kind
1972    of expression this is.  'Iex' is the union that holds the fields.  If
1973    an IRExpr 'e' has e.tag equal to Iex_Load, then it's a load
1974    expression, and the fields can be accessed with
1975    'e.Iex.Load.<fieldname>'.
1976 
1977    For each kind of expression, we show what it looks like when
1978    pretty-printed with ppIRExpr().
1979 */
1980 typedef
1981    struct _IRExpr
1982    IRExpr;
1983 
1984 struct _IRExpr {
1985    IRExprTag tag;
1986    union {
1987       /* Used only in pattern matching within Vex.  Should not be seen
1988          outside of Vex. */
1989       struct {
1990          Int binder;
1991       } Binder;
1992 
1993       /* Read a guest register, at a fixed offset in the guest state.
1994          ppIRExpr output: GET:<ty>(<offset>), eg. GET:I32(0)
1995       */
1996       struct {
1997          Int    offset;    /* Offset into the guest state */
1998          IRType ty;        /* Type of the value being read */
1999       } Get;
2000 
2001       /* Read a guest register at a non-fixed offset in the guest
2002          state.  This allows circular indexing into parts of the guest
2003          state, which is essential for modelling situations where the
2004          identity of guest registers is not known until run time.  One
2005          example is the x87 FP register stack.
2006 
2007          The part of the guest state to be treated as a circular array
2008          is described in the IRRegArray 'descr' field.  It holds the
2009          offset of the first element in the array, the type of each
2010          element, and the number of elements.
2011 
2012          The array index is indicated rather indirectly, in a way
2013          which makes optimisation easy: as the sum of variable part
2014          (the 'ix' field) and a constant offset (the 'bias' field).
2015 
2016          Since the indexing is circular, the actual array index to use
2017          is computed as (ix + bias) % num-of-elems-in-the-array.
2018 
2019          Here's an example.  The description
2020 
2021             (96:8xF64)[t39,-7]
2022 
2023          describes an array of 8 F64-typed values, the
2024          guest-state-offset of the first being 96.  This array is
2025          being indexed at (t39 - 7) % 8.
2026 
2027          It is important to get the array size/type exactly correct
2028          since IR optimisation looks closely at such info in order to
2029          establish aliasing/non-aliasing between seperate GetI and
2030          PutI events, which is used to establish when they can be
2031          reordered, etc.  Putting incorrect info in will lead to
2032          obscure IR optimisation bugs.
2033 
2034             ppIRExpr output: GETI<descr>[<ix>,<bias]
2035                          eg. GETI(128:8xI8)[t1,0]
2036       */
2037       struct {
2038          IRRegArray* descr; /* Part of guest state treated as circular */
2039          IRExpr*     ix;    /* Variable part of index into array */
2040          Int         bias;  /* Constant offset part of index into array */
2041       } GetI;
2042 
2043       /* The value held by a temporary.
2044          ppIRExpr output: t<tmp>, eg. t1
2045       */
2046       struct {
2047          IRTemp tmp;       /* The temporary number */
2048       } RdTmp;
2049 
2050       /* A quaternary operation.
2051          ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>, <arg4>),
2052                       eg. MAddF64r32(t1, t2, t3, t4)
2053       */
2054       struct {
2055         IRQop* details;
2056       } Qop;
2057 
2058       /* A ternary operation.
2059          ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>),
2060                       eg. MulF64(1, 2.0, 3.0)
2061       */
2062       struct {
2063         IRTriop* details;
2064       } Triop;
2065 
2066       /* A binary operation.
2067          ppIRExpr output: <op>(<arg1>, <arg2>), eg. Add32(t1,t2)
2068       */
2069       struct {
2070          IROp op;          /* op-code   */
2071          IRExpr* arg1;     /* operand 1 */
2072          IRExpr* arg2;     /* operand 2 */
2073       } Binop;
2074 
2075       /* A unary operation.
2076          ppIRExpr output: <op>(<arg>), eg. Neg8(t1)
2077       */
2078       struct {
2079          IROp    op;       /* op-code */
2080          IRExpr* arg;      /* operand */
2081       } Unop;
2082 
2083       /* A load from memory -- a normal load, not a load-linked.
2084          Load-Linkeds (and Store-Conditionals) are instead represented
2085          by IRStmt.LLSC since Load-Linkeds have side effects and so
2086          are not semantically valid IRExpr's.
2087          ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
2088       */
2089       struct {
2090          IREndness end;    /* Endian-ness of the load */
2091          IRType    ty;     /* Type of the loaded value */
2092          IRExpr*   addr;   /* Address being loaded from */
2093       } Load;
2094 
2095       /* A constant-valued expression.
2096          ppIRExpr output: <con>, eg. 0x4:I32
2097       */
2098       struct {
2099          IRConst* con;     /* The constant itself */
2100       } Const;
2101 
2102       /* A call to a pure (no side-effects) helper C function.
2103 
2104          With the 'cee' field, 'name' is the function's name.  It is
2105          only used for pretty-printing purposes.  The address to call
2106          (host address, of course) is stored in the 'addr' field
2107          inside 'cee'.
2108 
2109          The 'args' field is a NULL-terminated array of arguments.
2110          The stated return IRType, and the implied argument types,
2111          must match that of the function being called well enough so
2112          that the back end can actually generate correct code for the
2113          call.
2114 
2115          The called function **must** satisfy the following:
2116 
2117          * no side effects -- must be a pure function, the result of
2118            which depends only on the passed parameters.
2119 
2120          * it may not look at, nor modify, any of the guest state
2121            since that would hide guest state transitions from
2122            instrumenters
2123 
2124          * it may not access guest memory, since that would hide
2125            guest memory transactions from the instrumenters
2126 
2127          * it must not assume that arguments are being evaluated in a
2128            particular order. The oder of evaluation is unspecified.
2129 
2130          This is restrictive, but makes the semantics clean, and does
2131          not interfere with IR optimisation.
2132 
2133          If you want to call a helper which can mess with guest state
2134          and/or memory, instead use Ist_Dirty.  This is a lot more
2135          flexible, but you have to give a bunch of details about what
2136          the helper does (and you better be telling the truth,
2137          otherwise any derived instrumentation will be wrong).  Also
2138          Ist_Dirty inhibits various IR optimisations and so can cause
2139          quite poor code to be generated.  Try to avoid it.
2140 
2141          In principle it would be allowable to have the arg vector
2142          contain an IRExpr_VECRET(), although not IRExpr_GSPTR(). However,
2143          at the moment there is no requirement for clean helper calls to
2144          be able to return V128 or V256 values.  Hence this is not allowed.
2145 
2146          ppIRExpr output: <cee>(<args>):<retty>
2147                       eg. foo{0x80489304}(t1, t2):I32
2148       */
2149       struct {
2150          IRCallee* cee;    /* Function to call. */
2151          IRType    retty;  /* Type of return value. */
2152          IRExpr**  args;   /* Vector of argument expressions. */
2153       }  CCall;
2154 
2155       /* A ternary if-then-else operator.  It returns iftrue if cond is
2156          nonzero, iffalse otherwise.  Note that it is STRICT, ie. both
2157          iftrue and iffalse are evaluated in all cases.
2158 
2159          ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>),
2160                          eg. ITE(t6,t7,t8)
2161       */
2162       struct {
2163          IRExpr* cond;     /* Condition */
2164          IRExpr* iftrue;   /* True expression */
2165          IRExpr* iffalse;  /* False expression */
2166       } ITE;
2167    } Iex;
2168 };
2169 
2170 /* Expression auxiliaries: a ternary expression. */
2171 struct _IRTriop {
2172    IROp op;          /* op-code   */
2173    IRExpr* arg1;     /* operand 1 */
2174    IRExpr* arg2;     /* operand 2 */
2175    IRExpr* arg3;     /* operand 3 */
2176 };
2177 
2178 /* Expression auxiliaries: a quarternary expression. */
2179 struct _IRQop {
2180    IROp op;          /* op-code   */
2181    IRExpr* arg1;     /* operand 1 */
2182    IRExpr* arg2;     /* operand 2 */
2183    IRExpr* arg3;     /* operand 3 */
2184    IRExpr* arg4;     /* operand 4 */
2185 };
2186 
2187 
2188 /* Two special kinds of IRExpr, which can ONLY be used in
2189    argument lists for dirty helper calls (IRDirty.args) and in NO
2190    OTHER PLACES.  And then only in very limited ways.  */
2191 
2192 /* Denotes an argument which (in the helper) takes a pointer to a
2193    (naturally aligned) V128 or V256, into which the helper is expected
2194    to write its result.  Use of IRExpr_VECRET() is strictly
2195    controlled.  If the helper returns a V128 or V256 value then
2196    IRExpr_VECRET() must appear exactly once in the arg list, although
2197    it can appear anywhere, and the helper must have a C 'void' return
2198    type.  If the helper returns any other type, IRExpr_VECRET() may
2199    not appear in the argument list. */
2200 
2201 /* Denotes an void* argument which is passed to the helper, which at
2202    run time will point to the thread's guest state area.  This can
2203    only appear at most once in an argument list, and it may not appear
2204    at all in argument lists for clean helper calls. */
2205 
is_IRExpr_VECRET_or_GSPTR(const IRExpr * e)2206 static inline Bool is_IRExpr_VECRET_or_GSPTR ( const IRExpr* e ) {
2207    return e->tag == Iex_VECRET || e->tag == Iex_GSPTR;
2208 }
2209 
2210 
2211 /* Expression constructors. */
2212 extern IRExpr* IRExpr_Binder ( Int binder );
2213 extern IRExpr* IRExpr_Get    ( Int off, IRType ty );
2214 extern IRExpr* IRExpr_GetI   ( IRRegArray* descr, IRExpr* ix, Int bias );
2215 extern IRExpr* IRExpr_RdTmp  ( IRTemp tmp );
2216 extern IRExpr* IRExpr_Qop    ( IROp op, IRExpr* arg1, IRExpr* arg2,
2217                                         IRExpr* arg3, IRExpr* arg4 );
2218 extern IRExpr* IRExpr_Triop  ( IROp op, IRExpr* arg1,
2219                                         IRExpr* arg2, IRExpr* arg3 );
2220 extern IRExpr* IRExpr_Binop  ( IROp op, IRExpr* arg1, IRExpr* arg2 );
2221 extern IRExpr* IRExpr_Unop   ( IROp op, IRExpr* arg );
2222 extern IRExpr* IRExpr_Load   ( IREndness end, IRType ty, IRExpr* addr );
2223 extern IRExpr* IRExpr_Const  ( IRConst* con );
2224 extern IRExpr* IRExpr_CCall  ( IRCallee* cee, IRType retty, IRExpr** args );
2225 extern IRExpr* IRExpr_ITE    ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
2226 extern IRExpr* IRExpr_VECRET ( void );
2227 extern IRExpr* IRExpr_GSPTR  ( void );
2228 
2229 /* Deep-copy an IRExpr. */
2230 extern IRExpr* deepCopyIRExpr ( const IRExpr* );
2231 
2232 /* Pretty-print an IRExpr. */
2233 extern void ppIRExpr ( const IRExpr* );
2234 
2235 /* NULL-terminated IRExpr vector constructors, suitable for
2236    use as arg lists in clean/dirty helper calls. */
2237 extern IRExpr** mkIRExprVec_0 ( void );
2238 extern IRExpr** mkIRExprVec_1 ( IRExpr* );
2239 extern IRExpr** mkIRExprVec_2 ( IRExpr*, IRExpr* );
2240 extern IRExpr** mkIRExprVec_3 ( IRExpr*, IRExpr*, IRExpr* );
2241 extern IRExpr** mkIRExprVec_4 ( IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2242 extern IRExpr** mkIRExprVec_5 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2243                                 IRExpr* );
2244 extern IRExpr** mkIRExprVec_6 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2245                                 IRExpr*, IRExpr* );
2246 extern IRExpr** mkIRExprVec_7 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2247                                 IRExpr*, IRExpr*, IRExpr* );
2248 extern IRExpr** mkIRExprVec_8 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2249                                 IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2250 extern IRExpr** mkIRExprVec_9 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2251                                 IRExpr*, IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2252 extern IRExpr** mkIRExprVec_13 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2253                                  IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2254                                  IRExpr*, IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2255 
2256 /* IRExpr copiers:
2257    - shallowCopy: shallow-copy (ie. create a new vector that shares the
2258      elements with the original).
2259    - deepCopy: deep-copy (ie. create a completely new vector). */
2260 extern IRExpr** shallowCopyIRExprVec ( IRExpr** );
2261 extern IRExpr** deepCopyIRExprVec ( IRExpr *const * );
2262 
2263 /* Make a constant expression from the given host word taking into
2264    account (of course) the host word size. */
2265 extern IRExpr* mkIRExpr_HWord ( HWord );
2266 
2267 /* Convenience function for constructing clean helper calls. */
2268 extern
2269 IRExpr* mkIRExprCCall ( IRType retty,
2270                         Int regparms, const HChar* name, void* addr,
2271                         IRExpr** args );
2272 
2273 
2274 /* Convenience functions for atoms (IRExprs which are either Iex_Tmp or
2275  * Iex_Const). */
isIRAtom(const IRExpr * e)2276 static inline Bool isIRAtom ( const IRExpr* e ) {
2277    return toBool(e->tag == Iex_RdTmp || e->tag == Iex_Const);
2278 }
2279 
2280 /* Are these two IR atoms identical?  Causes an assertion
2281    failure if they are passed non-atoms. */
2282 extern Bool eqIRAtom ( const IRExpr*, const IRExpr* );
2283 
2284 
2285 /* ------------------ Jump kinds ------------------ */
2286 
2287 /* This describes hints which can be passed to the dispatcher at guest
2288    control-flow transfer points.
2289 
2290    Re Ijk_InvalICache and Ijk_FlushDCache: the guest state _must_ have
2291    two pseudo-registers, guest_CMSTART and guest_CMLEN, which specify
2292    the start and length of the region to be invalidated.  CM stands
2293    for "Cache Management".  These are both the size of a guest word.
2294    It is the responsibility of the relevant toIR.c to ensure that
2295    these are filled in with suitable values before issuing a jump of
2296    kind Ijk_InvalICache or Ijk_FlushDCache.
2297 
2298    Ijk_InvalICache requests invalidation of translations taken from
2299    the requested range.  Ijk_FlushDCache requests flushing of the D
2300    cache for the specified range.
2301 
2302    Re Ijk_EmWarn and Ijk_EmFail: the guest state must have a
2303    pseudo-register guest_EMNOTE, which is 32-bits regardless of the
2304    host or guest word size.  That register should be made to hold a
2305    VexEmNote value to indicate the reason for the exit.
2306 
2307    In the case of Ijk_EmFail, the exit is fatal (Vex-generated code
2308    cannot continue) and so the jump destination can be anything.
2309 
2310    Re Ijk_Sys_ (syscall jumps): the guest state must have a
2311    pseudo-register guest_IP_AT_SYSCALL, which is the size of a guest
2312    word.  Front ends should set this to be the IP at the most recently
2313    executed kernel-entering (system call) instruction.  This makes it
2314    very much easier (viz, actually possible at all) to back up the
2315    guest to restart a syscall that has been interrupted by a signal.
2316 */
2317 typedef
2318    enum {
2319       Ijk_INVALID=0x1A00,
2320       Ijk_Boring,         /* not interesting; just goto next */
2321       Ijk_Call,           /* guest is doing a call */
2322       Ijk_Ret,            /* guest is doing a return */
2323       Ijk_ClientReq,      /* do guest client req before continuing */
2324       Ijk_Yield,          /* client is yielding to thread scheduler */
2325       Ijk_EmWarn,         /* report emulation warning before continuing */
2326       Ijk_EmFail,         /* emulation critical (FATAL) error; give up */
2327       Ijk_NoDecode,       /* current instruction cannot be decoded */
2328       Ijk_MapFail,        /* Vex-provided address translation failed */
2329       Ijk_InvalICache,    /* Inval icache for range [CMSTART, +CMLEN) */
2330       Ijk_FlushDCache,    /* Flush dcache for range [CMSTART, +CMLEN) */
2331       Ijk_NoRedir,        /* Jump to un-redirected guest addr */
2332       Ijk_SigILL,         /* current instruction synths SIGILL */
2333       Ijk_SigTRAP,        /* current instruction synths SIGTRAP */
2334       Ijk_SigSEGV,        /* current instruction synths SIGSEGV */
2335       Ijk_SigBUS,         /* current instruction synths SIGBUS */
2336       Ijk_SigFPE_IntDiv,  /* current instruction synths SIGFPE - IntDiv */
2337       Ijk_SigFPE_IntOvf,  /* current instruction synths SIGFPE - IntOvf */
2338       /* Unfortunately, various guest-dependent syscall kinds.  They
2339 	 all mean: do a syscall before continuing. */
2340       Ijk_Sys_syscall,    /* amd64/x86 'syscall', ppc 'sc', arm 'svc #0' */
2341       Ijk_Sys_int32,      /* amd64/x86 'int $0x20' */
2342       Ijk_Sys_int128,     /* amd64/x86 'int $0x80' */
2343       Ijk_Sys_int129,     /* amd64/x86 'int $0x81' */
2344       Ijk_Sys_int130,     /* amd64/x86 'int $0x82' */
2345       Ijk_Sys_int145,     /* amd64/x86 'int $0x91' */
2346       Ijk_Sys_int210,     /* amd64/x86 'int $0xD2' */
2347       Ijk_Sys_sysenter    /* x86 'sysenter'.  guest_EIP becomes
2348                              invalid at the point this happens. */
2349    }
2350    IRJumpKind;
2351 
2352 extern void ppIRJumpKind ( IRJumpKind );
2353 
2354 
2355 /* ------------------ Dirty helper calls ------------------ */
2356 
2357 /* A dirty call is a flexible mechanism for calling (possibly
2358    conditionally) a helper function or procedure.  The helper function
2359    may read, write or modify client memory, and may read, write or
2360    modify client state.  It can take arguments and optionally return a
2361    value.  It may return different results and/or do different things
2362    when called repeatedly with the same arguments, by means of storing
2363    private state.
2364 
2365    If a value is returned, it is assigned to the nominated return
2366    temporary.
2367 
2368    Dirty calls are statements rather than expressions for obvious
2369    reasons.  If a dirty call is marked as writing guest state, any
2370    pre-existing values derived from the written parts of the guest
2371    state are invalid.  Similarly, if the dirty call is stated as
2372    writing memory, any pre-existing loaded values are invalidated by
2373    it.
2374 
2375    In order that instrumentation is possible, the call must state, and
2376    state correctly:
2377 
2378    * Whether it reads, writes or modifies memory, and if so where.
2379 
2380    * Whether it reads, writes or modifies guest state, and if so which
2381      pieces.  Several pieces may be stated, and their extents must be
2382      known at translation-time.  Each piece is allowed to repeat some
2383      number of times at a fixed interval, if required.
2384 
2385    Normally, code is generated to pass just the args to the helper.
2386    However, if IRExpr_GSPTR() is present in the argument list (at most
2387    one instance is allowed), then the guest state pointer is passed for
2388    that arg, so that the callee can access the guest state.  It is
2389    invalid for .nFxState to be zero but IRExpr_GSPTR() to be present,
2390    since .nFxState==0 is a claim that the call does not access guest
2391    state.
2392 
2393    IMPORTANT NOTE re GUARDS: Dirty calls are strict, very strict.  The
2394    arguments and 'mFx' are evaluated REGARDLESS of the guard value.
2395    The order of argument evaluation is unspecified.  The guard
2396    expression is evaluated AFTER the arguments and 'mFx' have been
2397    evaluated.  'mFx' is expected (by Memcheck) to be a defined value
2398    even if the guard evaluates to false.
2399 */
2400 
2401 #define VEX_N_FXSTATE  7   /* enough for FXSAVE/FXRSTOR on x86 */
2402 
2403 /* Effects on resources (eg. registers, memory locations) */
2404 typedef
2405    enum {
2406       Ifx_None=0x1B00,      /* no effect */
2407       Ifx_Read,             /* reads the resource */
2408       Ifx_Write,            /* writes the resource */
2409       Ifx_Modify,           /* modifies the resource */
2410    }
2411    IREffect;
2412 
2413 /* Pretty-print an IREffect */
2414 extern void ppIREffect ( IREffect );
2415 
2416 typedef
2417    struct _IRDirty {
2418       /* What to call, and details of args/results.  .guard must be
2419          non-NULL.  If .tmp is not IRTemp_INVALID, then the call
2420          returns a result which is placed in .tmp.  If at runtime the
2421          guard evaluates to false, .tmp has an 0x555..555 bit pattern
2422          written to it.  Hence conditional calls that assign .tmp are
2423          allowed. */
2424       IRCallee* cee;    /* where to call */
2425       IRExpr*   guard;  /* :: Ity_Bit.  Controls whether call happens */
2426       /* The args vector may contain IRExpr_GSPTR() and/or
2427          IRExpr_VECRET(), in both cases, at most once. */
2428       IRExpr**  args;   /* arg vector, ends in NULL. */
2429       IRTemp    tmp;    /* to assign result to, or IRTemp_INVALID if none */
2430 
2431       /* Mem effects; we allow only one R/W/M region to be stated */
2432       IREffect  mFx;    /* indicates memory effects, if any */
2433       IRExpr*   mAddr;  /* of access, or NULL if mFx==Ifx_None */
2434       Int       mSize;  /* of access, or zero if mFx==Ifx_None */
2435 
2436       /* Guest state effects; up to N allowed */
2437       Int  nFxState; /* must be 0 .. VEX_N_FXSTATE */
2438       struct {
2439          IREffect fx:16;   /* read, write or modify?  Ifx_None is invalid. */
2440          UShort   offset;
2441          UShort   size;
2442          UChar    nRepeats;
2443          UChar    repeatLen;
2444       } fxState[VEX_N_FXSTATE];
2445       /* The access can be repeated, as specified by nRepeats and
2446          repeatLen.  To describe only a single access, nRepeats and
2447          repeatLen should be zero.  Otherwise, repeatLen must be a
2448          multiple of size and greater than size. */
2449       /* Overall, the parts of the guest state denoted by (offset,
2450          size, nRepeats, repeatLen) is
2451                [offset, +size)
2452             and, if nRepeats > 0,
2453                for (i = 1; i <= nRepeats; i++)
2454                   [offset + i * repeatLen, +size)
2455          A convenient way to enumerate all segments is therefore
2456             for (i = 0; i < 1 + nRepeats; i++)
2457                [offset + i * repeatLen, +size)
2458       */
2459    }
2460    IRDirty;
2461 
2462 /* Pretty-print a dirty call */
2463 extern void     ppIRDirty ( const IRDirty* );
2464 
2465 /* Allocate an uninitialised dirty call */
2466 extern IRDirty* emptyIRDirty ( void );
2467 
2468 /* Deep-copy a dirty call */
2469 extern IRDirty* deepCopyIRDirty ( const IRDirty* );
2470 
2471 /* A handy function which takes some of the tedium out of constructing
2472    dirty helper calls.  The called function impliedly does not return
2473    any value and has a constant-True guard.  The call is marked as
2474    accessing neither guest state nor memory (hence the "unsafe"
2475    designation) -- you can change this marking later if need be.  A
2476    suitable IRCallee is constructed from the supplied bits. */
2477 extern
2478 IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr,
2479                              IRExpr** args );
2480 
2481 /* Similarly, make a zero-annotation dirty call which returns a value,
2482    and assign that to the given temp. */
2483 extern
2484 IRDirty* unsafeIRDirty_1_N ( IRTemp dst,
2485                              Int regparms, const HChar* name, void* addr,
2486                              IRExpr** args );
2487 
2488 
2489 /* --------------- Memory Bus Events --------------- */
2490 
2491 typedef
2492    enum {
2493       Imbe_Fence=0x1C00,
2494       /* Needed only on ARM.  It cancels a reservation made by a
2495          preceding Linked-Load, and needs to be handed through to the
2496          back end, just as LL and SC themselves are. */
2497       Imbe_CancelReservation
2498    }
2499    IRMBusEvent;
2500 
2501 extern void ppIRMBusEvent ( IRMBusEvent );
2502 
2503 
2504 /* --------------- Compare and Swap --------------- */
2505 
2506 /* This denotes an atomic compare and swap operation, either
2507    a single-element one or a double-element one.
2508 
2509    In the single-element case:
2510 
2511      .addr is the memory address.
2512      .end  is the endianness with which memory is accessed
2513 
2514      If .addr contains the same value as .expdLo, then .dataLo is
2515      written there, else there is no write.  In both cases, the
2516      original value at .addr is copied into .oldLo.
2517 
2518      Types: .expdLo, .dataLo and .oldLo must all have the same type.
2519      It may be any integral type, viz: I8, I16, I32 or, for 64-bit
2520      guests, I64.
2521 
2522      .oldHi must be IRTemp_INVALID, and .expdHi and .dataHi must
2523      be NULL.
2524 
2525    In the double-element case:
2526 
2527      .addr is the memory address.
2528      .end  is the endianness with which memory is accessed
2529 
2530      The operation is the same:
2531 
2532      If .addr contains the same value as .expdHi:.expdLo, then
2533      .dataHi:.dataLo is written there, else there is no write.  In
2534      both cases the original value at .addr is copied into
2535      .oldHi:.oldLo.
2536 
2537      Types: .expdHi, .expdLo, .dataHi, .dataLo, .oldHi, .oldLo must
2538      all have the same type, which may be any integral type, viz: I8,
2539      I16, I32 or, for 64-bit guests, I64.
2540 
2541      The double-element case is complicated by the issue of
2542      endianness.  In all cases, the two elements are understood to be
2543      located adjacently in memory, starting at the address .addr.
2544 
2545        If .end is Iend_LE, then the .xxxLo component is at the lower
2546        address and the .xxxHi component is at the higher address, and
2547        each component is itself stored little-endianly.
2548 
2549        If .end is Iend_BE, then the .xxxHi component is at the lower
2550        address and the .xxxLo component is at the higher address, and
2551        each component is itself stored big-endianly.
2552 
2553    This allows representing more cases than most architectures can
2554    handle.  For example, x86 cannot do DCAS on 8- or 16-bit elements.
2555 
2556    How to know if the CAS succeeded?
2557 
2558    * if .oldLo == .expdLo (resp. .oldHi:.oldLo == .expdHi:.expdLo),
2559      then the CAS succeeded, .dataLo (resp. .dataHi:.dataLo) is now
2560      stored at .addr, and the original value there was .oldLo (resp
2561      .oldHi:.oldLo).
2562 
2563    * if .oldLo != .expdLo (resp. .oldHi:.oldLo != .expdHi:.expdLo),
2564      then the CAS failed, and the original value at .addr was .oldLo
2565      (resp. .oldHi:.oldLo).
2566 
2567    Hence it is easy to know whether or not the CAS succeeded.
2568 */
2569 typedef
2570    struct {
2571       IRTemp    oldHi;  /* old value of *addr is written here */
2572       IRTemp    oldLo;
2573       IREndness end;    /* endianness of the data in memory */
2574       IRExpr*   addr;   /* store address */
2575       IRExpr*   expdHi; /* expected old value at *addr */
2576       IRExpr*   expdLo;
2577       IRExpr*   dataHi; /* new value for *addr */
2578       IRExpr*   dataLo;
2579    }
2580    IRCAS;
2581 
2582 extern void ppIRCAS ( const IRCAS* cas );
2583 
2584 extern IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo,
2585                         IREndness end, IRExpr* addr,
2586                         IRExpr* expdHi, IRExpr* expdLo,
2587                         IRExpr* dataHi, IRExpr* dataLo );
2588 
2589 extern IRCAS* deepCopyIRCAS ( const IRCAS* );
2590 
2591 
2592 /* ------------------ Circular Array Put ------------------ */
2593 
2594 typedef
2595    struct {
2596       IRRegArray* descr; /* Part of guest state treated as circular */
2597       IRExpr*     ix;    /* Variable part of index into array */
2598       Int         bias;  /* Constant offset part of index into array */
2599       IRExpr*     data;  /* The value to write */
2600    } IRPutI;
2601 
2602 extern void ppIRPutI ( const IRPutI* puti );
2603 
2604 extern IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
2605                           Int bias, IRExpr* data );
2606 
2607 extern IRPutI* deepCopyIRPutI ( const IRPutI* );
2608 
2609 
2610 /* --------------- Guarded loads and stores --------------- */
2611 
2612 /* Conditional stores are straightforward.  They are the same as
2613    normal stores, with an extra 'guard' field :: Ity_I1 that
2614    determines whether or not the store actually happens.  If not,
2615    memory is unmodified.
2616 
2617    The semantics of this is that 'addr' and 'data' are fully evaluated
2618    even in the case where 'guard' evaluates to zero (false).
2619 */
2620 typedef
2621    struct {
2622       IREndness end;    /* Endianness of the store */
2623       IRExpr*   addr;   /* store address */
2624       IRExpr*   data;   /* value to write */
2625       IRExpr*   guard;  /* Guarding value */
2626    }
2627    IRStoreG;
2628 
2629 /* Conditional loads are a little more complex.  'addr' is the
2630    address, 'guard' is the guarding condition.  If the load takes
2631    place, the loaded value is placed in 'dst'.  If it does not take
2632    place, 'alt' is copied to 'dst'.  However, the loaded value is not
2633    placed directly in 'dst' -- it is first subjected to the conversion
2634    specified by 'cvt'.
2635 
2636    For example, imagine doing a conditional 8-bit load, in which the
2637    loaded value is zero extended to 32 bits.  Hence:
2638    * 'dst' and 'alt' must have type I32
2639    * 'cvt' must be a unary op which converts I8 to I32.  In this
2640      example, it would be ILGop_8Uto32.
2641 
2642    There is no explicit indication of the type at which the load is
2643    done, since that is inferrable from the arg type of 'cvt'.  Note
2644    that the types of 'alt' and 'dst' and the result type of 'cvt' must
2645    all be the same.
2646 
2647    Semantically, 'addr' is evaluated even in the case where 'guard'
2648    evaluates to zero (false), and 'alt' is evaluated even when 'guard'
2649    evaluates to one (true).  That is, 'addr' and 'alt' are always
2650    evaluated.
2651 */
2652 typedef
2653    enum {
2654       ILGop_INVALID=0x1D00,
2655       ILGop_IdentV128, /* 128 bit vector, no conversion */
2656       ILGop_Ident64,   /* 64 bit, no conversion */
2657       ILGop_Ident32,   /* 32 bit, no conversion */
2658       ILGop_16Uto32,   /* 16 bit load, Z-widen to 32 */
2659       ILGop_16Sto32,   /* 16 bit load, S-widen to 32 */
2660       ILGop_8Uto32,    /* 8 bit load, Z-widen to 32 */
2661       ILGop_8Sto32     /* 8 bit load, S-widen to 32 */
2662    }
2663    IRLoadGOp;
2664 
2665 typedef
2666    struct {
2667       IREndness end;    /* Endianness of the load */
2668       IRLoadGOp cvt;    /* Conversion to apply to the loaded value */
2669       IRTemp    dst;    /* Destination (LHS) of assignment */
2670       IRExpr*   addr;   /* Address being loaded from */
2671       IRExpr*   alt;    /* Value if load is not done. */
2672       IRExpr*   guard;  /* Guarding value */
2673    }
2674    IRLoadG;
2675 
2676 extern void ppIRStoreG ( const IRStoreG* sg );
2677 
2678 extern void ppIRLoadGOp ( IRLoadGOp cvt );
2679 
2680 extern void ppIRLoadG ( const IRLoadG* lg );
2681 
2682 extern IRStoreG* mkIRStoreG ( IREndness end,
2683                               IRExpr* addr, IRExpr* data,
2684                               IRExpr* guard );
2685 
2686 extern IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
2687                             IRTemp dst, IRExpr* addr, IRExpr* alt,
2688                             IRExpr* guard );
2689 
2690 
2691 /* ------------------ Statements ------------------ */
2692 
2693 /* The different kinds of statements.  Their meaning is explained
2694    below in the comments for IRStmt.
2695 
2696    Those marked META do not represent code, but rather extra
2697    information about the code.  These statements can be removed
2698    without affecting the functional behaviour of the code, however
2699    they are required by some IR consumers such as tools that
2700    instrument the code.
2701 */
2702 
2703 typedef
2704    enum {
2705       Ist_NoOp=0x1E00,
2706       Ist_IMark,     /* META */
2707       Ist_AbiHint,   /* META */
2708       Ist_Put,
2709       Ist_PutI,
2710       Ist_WrTmp,
2711       Ist_Store,
2712       Ist_LoadG,
2713       Ist_StoreG,
2714       Ist_CAS,
2715       Ist_LLSC,
2716       Ist_Dirty,
2717       Ist_MBE,
2718       Ist_Exit
2719    }
2720    IRStmtTag;
2721 
2722 /* A statement.  Stored as a tagged union.  'tag' indicates what kind
2723    of expression this is.  'Ist' is the union that holds the fields.
2724    If an IRStmt 'st' has st.tag equal to Iex_Store, then it's a store
2725    statement, and the fields can be accessed with
2726    'st.Ist.Store.<fieldname>'.
2727 
2728    For each kind of statement, we show what it looks like when
2729    pretty-printed with ppIRStmt().
2730 */
2731 typedef
2732    struct _IRStmt {
2733       IRStmtTag tag;
2734       union {
2735          /* A no-op (usually resulting from IR optimisation).  Can be
2736             omitted without any effect.
2737 
2738             ppIRStmt output: IR-NoOp
2739          */
2740          struct {
2741 	 } NoOp;
2742 
2743          /* META: instruction mark.  Marks the start of the statements
2744             that represent a single machine instruction (the end of
2745             those statements is marked by the next IMark or the end of
2746             the IRSB).  Contains the address and length of the
2747             instruction.
2748 
2749             It also contains a delta value.  The delta must be
2750             subtracted from a guest program counter value before
2751             attempting to establish, by comparison with the address
2752             and length values, whether or not that program counter
2753             value refers to this instruction.  For x86, amd64, ppc32,
2754             ppc64 and arm, the delta value is zero.  For Thumb
2755             instructions, the delta value is one.  This is because, on
2756             Thumb, guest PC values (guest_R15T) are encoded using the
2757             top 31 bits of the instruction address and a 1 in the lsb;
2758             hence they appear to be (numerically) 1 past the start of
2759             the instruction they refer to.  IOW, guest_R15T on ARM
2760             holds a standard ARM interworking address.
2761 
2762             ppIRStmt output: ------ IMark(<addr>, <len>, <delta>) ------,
2763                          eg. ------ IMark(0x4000792, 5, 0) ------,
2764          */
2765          struct {
2766             Addr   addr;   /* instruction address */
2767             UInt   len;    /* instruction length */
2768             UChar  delta;  /* addr = program counter as encoded in guest state
2769                                      - delta */
2770          } IMark;
2771 
2772          /* META: An ABI hint, which says something about this
2773             platform's ABI.
2774 
2775             At the moment, the only AbiHint is one which indicates
2776             that a given chunk of address space, [base .. base+len-1],
2777             has become undefined.  This is used on amd64-linux and
2778             some ppc variants to pass stack-redzoning hints to whoever
2779             wants to see them.  It also indicates the address of the
2780             next (dynamic) instruction that will be executed.  This is
2781             to help Memcheck to origin tracking.
2782 
2783             ppIRStmt output: ====== AbiHint(<base>, <len>, <nia>) ======
2784                          eg. ====== AbiHint(t1, 16, t2) ======
2785          */
2786          struct {
2787             IRExpr* base;     /* Start  of undefined chunk */
2788             Int     len;      /* Length of undefined chunk */
2789             IRExpr* nia;      /* Address of next (guest) insn */
2790          } AbiHint;
2791 
2792          /* Write a guest register, at a fixed offset in the guest state.
2793             ppIRStmt output: PUT(<offset>) = <data>, eg. PUT(60) = t1
2794          */
2795          struct {
2796             Int     offset;   /* Offset into the guest state */
2797             IRExpr* data;     /* The value to write */
2798          } Put;
2799 
2800          /* Write a guest register, at a non-fixed offset in the guest
2801             state.  See the comment for GetI expressions for more
2802             information.
2803 
2804             ppIRStmt output: PUTI<descr>[<ix>,<bias>] = <data>,
2805                          eg. PUTI(64:8xF64)[t5,0] = t1
2806          */
2807          struct {
2808             IRPutI* details;
2809          } PutI;
2810 
2811          /* Assign a value to a temporary.  Note that SSA rules require
2812             each tmp is only assigned to once.  IR sanity checking will
2813             reject any block containing a temporary which is not assigned
2814             to exactly once.
2815 
2816             ppIRStmt output: t<tmp> = <data>, eg. t1 = 3
2817          */
2818          struct {
2819             IRTemp  tmp;   /* Temporary  (LHS of assignment) */
2820             IRExpr* data;  /* Expression (RHS of assignment) */
2821          } WrTmp;
2822 
2823          /* Write a value to memory.  This is a normal store, not a
2824             Store-Conditional.  To represent a Store-Conditional,
2825             instead use IRStmt.LLSC.
2826             ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
2827          */
2828          struct {
2829             IREndness end;    /* Endianness of the store */
2830             IRExpr*   addr;   /* store address */
2831             IRExpr*   data;   /* value to write */
2832          } Store;
2833 
2834          /* Guarded store.  Note that this is defined to evaluate all
2835             expression fields (addr, data) even if the guard evaluates
2836             to false.
2837             ppIRStmt output:
2838               if (<guard>) ST<end>(<addr>) = <data> */
2839          struct {
2840             IRStoreG* details;
2841          } StoreG;
2842 
2843          /* Guarded load.  Note that this is defined to evaluate all
2844             expression fields (addr, alt) even if the guard evaluates
2845             to false.
2846             ppIRStmt output:
2847               t<tmp> = if (<guard>) <cvt>(LD<end>(<addr>)) else <alt> */
2848          struct {
2849             IRLoadG* details;
2850          } LoadG;
2851 
2852          /* Do an atomic compare-and-swap operation.  Semantics are
2853             described above on a comment at the definition of IRCAS.
2854 
2855             ppIRStmt output:
2856                t<tmp> = CAS<end>(<addr> :: <expected> -> <new>)
2857             eg
2858                t1 = CASle(t2 :: t3->Add32(t3,1))
2859                which denotes a 32-bit atomic increment
2860                of a value at address t2
2861 
2862             A double-element CAS may also be denoted, in which case <tmp>,
2863             <expected> and <new> are all pairs of items, separated by
2864             commas.
2865          */
2866          struct {
2867             IRCAS* details;
2868          } CAS;
2869 
2870          /* Either Load-Linked or Store-Conditional, depending on
2871             STOREDATA.
2872 
2873             If STOREDATA is NULL then this is a Load-Linked, meaning
2874             that data is loaded from memory as normal, but a
2875             'reservation' for the address is also lodged in the
2876             hardware.
2877 
2878                result = Load-Linked(addr, end)
2879 
2880             The data transfer type is the type of RESULT (I32, I64,
2881             etc).  ppIRStmt output:
2882 
2883                result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
2884 
2885             If STOREDATA is not NULL then this is a Store-Conditional,
2886             hence:
2887 
2888                result = Store-Conditional(addr, storedata, end)
2889 
2890             The data transfer type is the type of STOREDATA and RESULT
2891             has type Ity_I1. The store may fail or succeed depending
2892             on the state of a previously lodged reservation on this
2893             address.  RESULT is written 1 if the store succeeds and 0
2894             if it fails.  eg ppIRStmt output:
2895 
2896                result = ( ST<end>-Cond(<addr>) = <storedata> )
2897                eg t3 = ( STbe-Cond(t1, t2) )
2898 
2899             In all cases, the address must be naturally aligned for
2900             the transfer type -- any misaligned addresses should be
2901             caught by a dominating IR check and side exit.  This
2902             alignment restriction exists because on at least some
2903             LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
2904             misaligned addresses, and we have to actually generate
2905             stwcx. on the host, and we don't want it trapping on the
2906             host.
2907 
2908             Summary of rules for transfer type:
2909               STOREDATA == NULL (LL):
2910                 transfer type = type of RESULT
2911               STOREDATA != NULL (SC):
2912                 transfer type = type of STOREDATA, and RESULT :: Ity_I1
2913          */
2914          struct {
2915             IREndness end;
2916             IRTemp    result;
2917             IRExpr*   addr;
2918             IRExpr*   storedata; /* NULL => LL, non-NULL => SC */
2919          } LLSC;
2920 
2921          /* Call (possibly conditionally) a C function that has side
2922             effects (ie. is "dirty").  See the comments above the
2923             IRDirty type declaration for more information.
2924 
2925             ppIRStmt output:
2926                t<tmp> = DIRTY <guard> <effects>
2927                   ::: <callee>(<args>)
2928             eg.
2929                t1 = DIRTY t27 RdFX-gst(16,4) RdFX-gst(60,4)
2930                      ::: foo{0x380035f4}(t2)
2931          */
2932          struct {
2933             IRDirty* details;
2934          } Dirty;
2935 
2936          /* A memory bus event - a fence, or acquisition/release of the
2937             hardware bus lock.  IR optimisation treats all these as fences
2938             across which no memory references may be moved.
2939             ppIRStmt output: MBusEvent-Fence,
2940                              MBusEvent-BusLock, MBusEvent-BusUnlock.
2941          */
2942          struct {
2943             IRMBusEvent event;
2944          } MBE;
2945 
2946          /* Conditional exit from the middle of an IRSB.
2947             ppIRStmt output: if (<guard>) goto {<jk>} <dst>
2948                          eg. if (t69) goto {Boring} 0x4000AAA:I32
2949             If <guard> is true, the guest state is also updated by
2950             PUT-ing <dst> at <offsIP>.  This is done because a
2951             taken exit must update the guest program counter.
2952          */
2953          struct {
2954             IRExpr*    guard;    /* Conditional expression */
2955             IRConst*   dst;      /* Jump target (constant only) */
2956             IRJumpKind jk;       /* Jump kind */
2957             Int        offsIP;   /* Guest state offset for IP */
2958          } Exit;
2959       } Ist;
2960    }
2961    IRStmt;
2962 
2963 /* Statement constructors. */
2964 extern IRStmt* IRStmt_NoOp    ( void );
2965 extern IRStmt* IRStmt_IMark   ( Addr addr, UInt len, UChar delta );
2966 extern IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia );
2967 extern IRStmt* IRStmt_Put     ( Int off, IRExpr* data );
2968 extern IRStmt* IRStmt_PutI    ( IRPutI* details );
2969 extern IRStmt* IRStmt_WrTmp   ( IRTemp tmp, IRExpr* data );
2970 extern IRStmt* IRStmt_Store   ( IREndness end, IRExpr* addr, IRExpr* data );
2971 extern IRStmt* IRStmt_StoreG  ( IREndness end, IRExpr* addr, IRExpr* data,
2972                                 IRExpr* guard );
2973 extern IRStmt* IRStmt_LoadG   ( IREndness end, IRLoadGOp cvt, IRTemp dst,
2974                                 IRExpr* addr, IRExpr* alt, IRExpr* guard );
2975 extern IRStmt* IRStmt_CAS     ( IRCAS* details );
2976 extern IRStmt* IRStmt_LLSC    ( IREndness end, IRTemp result,
2977                                 IRExpr* addr, IRExpr* storedata );
2978 extern IRStmt* IRStmt_Dirty   ( IRDirty* details );
2979 extern IRStmt* IRStmt_MBE     ( IRMBusEvent event );
2980 extern IRStmt* IRStmt_Exit    ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
2981                                 Int offsIP );
2982 
2983 /* Deep-copy an IRStmt. */
2984 extern IRStmt* deepCopyIRStmt ( const IRStmt* );
2985 
2986 /* Pretty-print an IRStmt. */
2987 extern void ppIRStmt ( const IRStmt* );
2988 
2989 
2990 /* ------------------ Basic Blocks ------------------ */
2991 
2992 /* Type environments: a bunch of statements, expressions, etc, are
2993    incomplete without an environment indicating the type of each
2994    IRTemp.  So this provides one.  IR temporaries are really just
2995    unsigned ints and so this provides an array, 0 .. n_types_used-1 of
2996    them.
2997 */
2998 typedef
2999    struct {
3000       IRType* types;
3001       Int     types_size;
3002       Int     types_used;
3003    }
3004    IRTypeEnv;
3005 
3006 /* Obtain a new IRTemp */
3007 extern IRTemp newIRTemp ( IRTypeEnv*, IRType );
3008 
3009 /* Deep-copy a type environment */
3010 extern IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* );
3011 
3012 /* Pretty-print a type environment */
3013 extern void ppIRTypeEnv ( const IRTypeEnv* );
3014 
3015 
3016 /* Code blocks, which in proper compiler terminology are superblocks
3017    (single entry, multiple exit code sequences) contain:
3018 
3019    - A table giving a type for each temp (the "type environment")
3020    - An expandable array of statements
3021    - An expression of type 32 or 64 bits, depending on the
3022      guest's word size, indicating the next destination if the block
3023      executes all the way to the end, without a side exit
3024    - An indication of any special actions (JumpKind) needed
3025      for this final jump.
3026    - Offset of the IP field in the guest state.  This will be
3027      updated before the final jump is done.
3028 
3029    "IRSB" stands for "IR Super Block".
3030 */
3031 typedef
3032    struct {
3033       IRTypeEnv* tyenv;
3034       IRStmt**   stmts;
3035       Int        stmts_size;
3036       Int        stmts_used;
3037       IRExpr*    next;
3038       IRJumpKind jumpkind;
3039       Int        offsIP;
3040    }
3041    IRSB;
3042 
3043 /* Allocate a new, uninitialised IRSB */
3044 extern IRSB* emptyIRSB ( void );
3045 
3046 /* Deep-copy an IRSB */
3047 extern IRSB* deepCopyIRSB ( const IRSB* );
3048 
3049 /* Deep-copy an IRSB, except for the statements list, which set to be
3050    a new, empty, list of statements. */
3051 extern IRSB* deepCopyIRSBExceptStmts ( const IRSB* );
3052 
3053 /* Pretty-print an IRSB */
3054 extern void ppIRSB ( const IRSB* );
3055 
3056 /* Append an IRStmt to an IRSB */
3057 extern void addStmtToIRSB ( IRSB*, IRStmt* );
3058 
3059 
3060 /*---------------------------------------------------------------*/
3061 /*--- Helper functions for the IR                             ---*/
3062 /*---------------------------------------------------------------*/
3063 
3064 /* For messing with IR type environments */
3065 extern IRTypeEnv* emptyIRTypeEnv  ( void );
3066 
3067 /* What is the type of this expression? */
3068 extern IRType typeOfIRConst ( const IRConst* );
3069 extern IRType typeOfIRTemp  ( const IRTypeEnv*, IRTemp );
3070 extern IRType typeOfIRExpr  ( const IRTypeEnv*, const IRExpr* );
3071 
3072 /* What are the arg and result type for this IRLoadGOp? */
3073 extern void typeOfIRLoadGOp ( IRLoadGOp cvt,
3074                               /*OUT*/IRType* t_res,
3075                               /*OUT*/IRType* t_arg );
3076 
3077 /* Sanity check a BB of IR */
3078 extern void sanityCheckIRSB ( const  IRSB*  bb,
3079                               const  HChar* caller,
3080                               Bool   require_flatness,
3081                               IRType guest_word_size );
3082 extern Bool isFlatIRStmt ( const IRStmt* );
3083 
3084 /* Is this any value actually in the enumeration 'IRType' ? */
3085 extern Bool isPlausibleIRType ( IRType ty );
3086 
3087 
3088 /*---------------------------------------------------------------*/
3089 /*--- IR injection                                            ---*/
3090 /*---------------------------------------------------------------*/
3091 
3092 void vex_inject_ir(IRSB *, IREndness);
3093 
3094 
3095 #endif /* ndef __LIBVEX_IR_H */
3096 
3097 /*---------------------------------------------------------------*/
3098 /*---                                             libvex_ir.h ---*/
3099 /*---------------------------------------------------------------*/
3100