1
2 /*---------------------------------------------------------------*/
3 /*--- begin libvex_ir.h ---*/
4 /*---------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2004-2013 OpenWorks LLP
11 info@open-works.net
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26 02110-1301, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29
30 Neither the names of the U.S. Department of Energy nor the
31 University of California nor the names of its contributors may be
32 used to endorse or promote products derived from this software
33 without prior written permission.
34 */
35
36 #ifndef __LIBVEX_IR_H
37 #define __LIBVEX_IR_H
38
39 #include "libvex_basictypes.h"
40
41
42 /*---------------------------------------------------------------*/
43 /*--- High-level IR description ---*/
44 /*---------------------------------------------------------------*/
45
46 /* Vex IR is an architecture-neutral intermediate representation.
47 Unlike some IRs in systems similar to Vex, it is not like assembly
48 language (ie. a list of instructions). Rather, it is more like the
49 IR that might be used in a compiler.
50
51 Code blocks
52 ~~~~~~~~~~~
53 The code is broken into small code blocks ("superblocks", type:
54 'IRSB'). Each code block typically represents from 1 to perhaps 50
55 instructions. IRSBs are single-entry, multiple-exit code blocks.
56 Each IRSB contains three things:
57 - a type environment, which indicates the type of each temporary
58 value present in the IRSB
59 - a list of statements, which represent code
60 - a jump that exits from the end the IRSB
61 Because the blocks are multiple-exit, there can be additional
62 conditional exit statements that cause control to leave the IRSB
63 before the final exit. Also because of this, IRSBs can cover
64 multiple non-consecutive sequences of code (up to 3). These are
65 recorded in the type VexGuestExtents (see libvex.h).
66
67 Statements and expressions
68 ~~~~~~~~~~~~~~~~~~~~~~~~~~
69 Statements (type 'IRStmt') represent operations with side-effects,
70 eg. guest register writes, stores, and assignments to temporaries.
71 Expressions (type 'IRExpr') represent operations without
72 side-effects, eg. arithmetic operations, loads, constants.
73 Expressions can contain sub-expressions, forming expression trees,
74 eg. (3 + (4 * load(addr1)).
75
76 Storage of guest state
77 ~~~~~~~~~~~~~~~~~~~~~~
78 The "guest state" contains the guest registers of the guest machine
79 (ie. the machine that we are simulating). It is stored by default
80 in a block of memory supplied by the user of the VEX library,
81 generally referred to as the guest state (area). To operate on
82 these registers, one must first read ("Get") them from the guest
83 state into a temporary value. Afterwards, one can write ("Put")
84 them back into the guest state.
85
86 Get and Put are characterised by a byte offset into the guest
87 state, a small integer which effectively gives the identity of the
88 referenced guest register, and a type, which indicates the size of
89 the value to be transferred.
90
91 The basic "Get" and "Put" operations are sufficient to model normal
92 fixed registers on the guest. Selected areas of the guest state
93 can be treated as a circular array of registers (type:
94 'IRRegArray'), which can be indexed at run-time. This is done with
95 the "GetI" and "PutI" primitives. This is necessary to describe
96 rotating register files, for example the x87 FPU stack, SPARC
97 register windows, and the Itanium register files.
98
99 Examples, and flattened vs. unflattened code
100 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
101 For example, consider this x86 instruction:
102
103 addl %eax, %ebx
104
105 One Vex IR translation for this code would be this:
106
107 ------ IMark(0x24F275, 7, 0) ------
108 t3 = GET:I32(0) # get %eax, a 32-bit integer
109 t2 = GET:I32(12) # get %ebx, a 32-bit integer
110 t1 = Add32(t3,t2) # addl
111 PUT(0) = t1 # put %eax
112
113 (For simplicity, this ignores the effects on the condition codes, and
114 the update of the instruction pointer.)
115
116 The "IMark" is an IR statement that doesn't represent actual code.
117 Instead it indicates the address and length of the original
118 instruction. The numbers 0 and 12 are offsets into the guest state
119 for %eax and %ebx. The full list of offsets for an architecture
120 <ARCH> can be found in the type VexGuest<ARCH>State in the file
121 VEX/pub/libvex_guest_<ARCH>.h.
122
123 The five statements in this example are:
124 - the IMark
125 - three assignments to temporaries
126 - one register write (put)
127
128 The six expressions in this example are:
129 - two register reads (gets)
130 - one arithmetic (add) operation
131 - three temporaries (two nested within the Add32, one in the PUT)
132
133 The above IR is "flattened", ie. all sub-expressions are "atoms",
134 either constants or temporaries. An equivalent, unflattened version
135 would be:
136
137 PUT(0) = Add32(GET:I32(0), GET:I32(12))
138
139 IR is guaranteed to be flattened at instrumentation-time. This makes
140 instrumentation easier. Equivalent flattened and unflattened IR
141 typically results in the same generated code.
142
143 Another example, this one showing loads and stores:
144
145 addl %edx,4(%eax)
146
147 This becomes (again ignoring condition code and instruction pointer
148 updates):
149
150 ------ IMark(0x4000ABA, 3, 0) ------
151 t3 = Add32(GET:I32(0),0x4:I32)
152 t2 = LDle:I32(t3)
153 t1 = GET:I32(8)
154 t0 = Add32(t2,t1)
155 STle(t3) = t0
156
157 The "le" in "LDle" and "STle" is short for "little-endian".
158
159 No need for deallocations
160 ~~~~~~~~~~~~~~~~~~~~~~~~~
161 Although there are allocation functions for various data structures
162 in this file, there are no deallocation functions. This is because
163 Vex uses a memory allocation scheme that automatically reclaims the
164 memory used by allocated structures once translation is completed.
165 This makes things easier for tools that instruments/transforms code
166 blocks.
167
168 SSAness and typing
169 ~~~~~~~~~~~~~~~~~~
170 The IR is fully typed. For every IRSB (IR block) it is possible to
171 say unambiguously whether or not it is correctly typed.
172 Incorrectly typed IR has no meaning and the VEX will refuse to
173 process it. At various points during processing VEX typechecks the
174 IR and aborts if any violations are found. This seems overkill but
175 makes it a great deal easier to build a reliable JIT.
176
177 IR also has the SSA property. SSA stands for Static Single
178 Assignment, and what it means is that each IR temporary may be
179 assigned to only once. This idea became widely used in compiler
180 construction in the mid to late 90s. It makes many IR-level
181 transformations/code improvements easier, simpler and faster.
182 Whenever it typechecks an IR block, VEX also checks the SSA
183 property holds, and will abort if not so. So SSAness is
184 mechanically and rigidly enforced.
185 */
186
187 /*---------------------------------------------------------------*/
188 /*--- Type definitions for the IR ---*/
189 /*---------------------------------------------------------------*/
190
191 /* General comments about naming schemes:
192
193 All publically visible functions contain the name of the primary
194 type on which they operate (IRFoo, IRBar, etc). Hence you should
195 be able to identify these functions by grepping for "IR[A-Z]".
196
197 For some type 'IRFoo':
198
199 - ppIRFoo is the printing method for IRFoo, printing it to the
200 output channel specified in the LibVEX_Initialise call.
201
202 - eqIRFoo is a structural equality predicate for IRFoos.
203
204 - deepCopyIRFoo is a deep copy constructor for IRFoos.
205 It recursively traverses the entire argument tree and
206 produces a complete new tree. All types have a deep copy
207 constructor.
208
209 - shallowCopyIRFoo is the shallow copy constructor for IRFoos.
210 It creates a new top-level copy of the supplied object,
211 but does not copy any sub-objects. Only some types have a
212 shallow copy constructor.
213 */
214
215 /* ------------------ Types ------------------ */
216
217 /* A type indicates the size of a value, and whether it's an integer, a
218 float, or a vector (SIMD) value. */
219 typedef
220 enum {
221 Ity_INVALID=0x1100,
222 Ity_I1,
223 Ity_I8,
224 Ity_I16,
225 Ity_I32,
226 Ity_I64,
227 Ity_I128, /* 128-bit scalar */
228 Ity_F16, /* 16 bit float */
229 Ity_F32, /* IEEE 754 float */
230 Ity_F64, /* IEEE 754 double */
231 Ity_D32, /* 32-bit Decimal floating point */
232 Ity_D64, /* 64-bit Decimal floating point */
233 Ity_D128, /* 128-bit Decimal floating point */
234 Ity_F128, /* 128-bit floating point; implementation defined */
235 Ity_V128, /* 128-bit SIMD */
236 Ity_V256 /* 256-bit SIMD */
237 }
238 IRType;
239
240 /* Pretty-print an IRType */
241 extern void ppIRType ( IRType );
242
243 /* Get the size (in bytes) of an IRType */
244 extern Int sizeofIRType ( IRType );
245
246 /* Translate 1/2/4/8 into Ity_I{8,16,32,64} respectively. Asserts on
247 any other input. */
248 extern IRType integerIRTypeOfSize ( Int szB );
249
250
251 /* ------------------ Endianness ------------------ */
252
253 /* IREndness is used in load IRExprs and store IRStmts. */
254 typedef
255 enum {
256 Iend_LE=0x1200, /* little endian */
257 Iend_BE /* big endian */
258 }
259 IREndness;
260
261
262 /* ------------------ Constants ------------------ */
263
264 /* IRConsts are used within 'Const' and 'Exit' IRExprs. */
265
266 /* The various kinds of constant. */
267 typedef
268 enum {
269 Ico_U1=0x1300,
270 Ico_U8,
271 Ico_U16,
272 Ico_U32,
273 Ico_U64,
274 Ico_F32, /* 32-bit IEEE754 floating */
275 Ico_F32i, /* 32-bit unsigned int to be interpreted literally
276 as a IEEE754 single value. */
277 Ico_F64, /* 64-bit IEEE754 floating */
278 Ico_F64i, /* 64-bit unsigned int to be interpreted literally
279 as a IEEE754 double value. */
280 Ico_V128, /* 128-bit restricted vector constant, with 1 bit
281 (repeated 8 times) for each of the 16 x 1-byte lanes */
282 Ico_V256 /* 256-bit restricted vector constant, with 1 bit
283 (repeated 8 times) for each of the 32 x 1-byte lanes */
284 }
285 IRConstTag;
286
287 /* A constant. Stored as a tagged union. 'tag' indicates what kind of
288 constant this is. 'Ico' is the union that holds the fields. If an
289 IRConst 'c' has c.tag equal to Ico_U32, then it's a 32-bit constant,
290 and its value can be accessed with 'c.Ico.U32'. */
291 typedef
292 struct _IRConst {
293 IRConstTag tag;
294 union {
295 Bool U1;
296 UChar U8;
297 UShort U16;
298 UInt U32;
299 ULong U64;
300 Float F32;
301 UInt F32i;
302 Double F64;
303 ULong F64i;
304 UShort V128; /* 16-bit value; see Ico_V128 comment above */
305 UInt V256; /* 32-bit value; see Ico_V256 comment above */
306 } Ico;
307 }
308 IRConst;
309
310 /* IRConst constructors */
311 extern IRConst* IRConst_U1 ( Bool );
312 extern IRConst* IRConst_U8 ( UChar );
313 extern IRConst* IRConst_U16 ( UShort );
314 extern IRConst* IRConst_U32 ( UInt );
315 extern IRConst* IRConst_U64 ( ULong );
316 extern IRConst* IRConst_F32 ( Float );
317 extern IRConst* IRConst_F32i ( UInt );
318 extern IRConst* IRConst_F64 ( Double );
319 extern IRConst* IRConst_F64i ( ULong );
320 extern IRConst* IRConst_V128 ( UShort );
321 extern IRConst* IRConst_V256 ( UInt );
322
323 /* Deep-copy an IRConst */
324 extern IRConst* deepCopyIRConst ( const IRConst* );
325
326 /* Pretty-print an IRConst */
327 extern void ppIRConst ( const IRConst* );
328
329 /* Compare two IRConsts for equality */
330 extern Bool eqIRConst ( const IRConst*, const IRConst* );
331
332
333 /* ------------------ Call targets ------------------ */
334
335 /* Describes a helper function to call. The name part is purely for
336 pretty printing and not actually used. regparms=n tells the back
337 end that the callee has been declared
338 "__attribute__((regparm(n)))", although indirectly using the
339 VEX_REGPARM(n) macro. On some targets (x86) the back end will need
340 to construct a non-standard sequence to call a function declared
341 like this.
342
343 mcx_mask is a sop to Memcheck. It indicates which args should be
344 considered 'always defined' when lazily computing definedness of
345 the result. Bit 0 of mcx_mask corresponds to args[0], bit 1 to
346 args[1], etc. If a bit is set, the corresponding arg is excluded
347 (hence "x" in "mcx") from definedness checking.
348 */
349
350 typedef
351 struct {
352 Int regparms;
353 const HChar* name;
354 void* addr;
355 UInt mcx_mask;
356 }
357 IRCallee;
358
359 /* Create an IRCallee. */
360 extern IRCallee* mkIRCallee ( Int regparms, const HChar* name, void* addr );
361
362 /* Deep-copy an IRCallee. */
363 extern IRCallee* deepCopyIRCallee ( const IRCallee* );
364
365 /* Pretty-print an IRCallee. */
366 extern void ppIRCallee ( const IRCallee* );
367
368
369 /* ------------------ Guest state arrays ------------------ */
370
371 /* This describes a section of the guest state that we want to
372 be able to index at run time, so as to be able to describe
373 indexed or rotating register files on the guest. */
374 typedef
375 struct {
376 Int base; /* guest state offset of start of indexed area */
377 IRType elemTy; /* type of each element in the indexed area */
378 Int nElems; /* number of elements in the indexed area */
379 }
380 IRRegArray;
381
382 extern IRRegArray* mkIRRegArray ( Int, IRType, Int );
383
384 extern IRRegArray* deepCopyIRRegArray ( const IRRegArray* );
385
386 extern void ppIRRegArray ( const IRRegArray* );
387 extern Bool eqIRRegArray ( const IRRegArray*, const IRRegArray* );
388
389
390 /* ------------------ Temporaries ------------------ */
391
392 /* This represents a temporary, eg. t1. The IR optimiser relies on the
393 fact that IRTemps are 32-bit ints. Do not change them to be ints of
394 any other size. */
395 typedef UInt IRTemp;
396
397 /* Pretty-print an IRTemp. */
398 extern void ppIRTemp ( IRTemp );
399
400 #define IRTemp_INVALID ((IRTemp)0xFFFFFFFF)
401
402
403 /* --------------- Primops (arity 1,2,3 and 4) --------------- */
404
405 /* Primitive operations that are used in Unop, Binop, Triop and Qop
406 IRExprs. Once we take into account integer, floating point and SIMD
407 operations of all the different sizes, there are quite a lot of them.
408 Most instructions supported by the architectures that Vex supports
409 (x86, PPC, etc) are represented. Some more obscure ones (eg. cpuid)
410 are not; they are instead handled with dirty helpers that emulate
411 their functionality. Such obscure ones are thus not directly visible
412 in the IR, but their effects on guest state (memory and registers)
413 are made visible via the annotations in IRDirty structures.
414 */
415 typedef
416 enum {
417 /* -- Do not change this ordering. The IR generators rely on
418 (eg) Iop_Add64 == IopAdd8 + 3. -- */
419
420 Iop_INVALID=0x1400,
421 Iop_Add8, Iop_Add16, Iop_Add32, Iop_Add64,
422 Iop_Sub8, Iop_Sub16, Iop_Sub32, Iop_Sub64,
423 /* Signless mul. MullS/MullU is elsewhere. */
424 Iop_Mul8, Iop_Mul16, Iop_Mul32, Iop_Mul64,
425 Iop_Or8, Iop_Or16, Iop_Or32, Iop_Or64,
426 Iop_And8, Iop_And16, Iop_And32, Iop_And64,
427 Iop_Xor8, Iop_Xor16, Iop_Xor32, Iop_Xor64,
428 Iop_Shl8, Iop_Shl16, Iop_Shl32, Iop_Shl64,
429 Iop_Shr8, Iop_Shr16, Iop_Shr32, Iop_Shr64,
430 Iop_Sar8, Iop_Sar16, Iop_Sar32, Iop_Sar64,
431 /* Integer comparisons. */
432 Iop_CmpEQ8, Iop_CmpEQ16, Iop_CmpEQ32, Iop_CmpEQ64,
433 Iop_CmpNE8, Iop_CmpNE16, Iop_CmpNE32, Iop_CmpNE64,
434 /* Tags for unary ops */
435 Iop_Not8, Iop_Not16, Iop_Not32, Iop_Not64,
436
437 /* Exactly like CmpEQ8/16/32/64, but carrying the additional
438 hint that these compute the success/failure of a CAS
439 operation, and hence are almost certainly applied to two
440 copies of the same value, which in turn has implications for
441 Memcheck's instrumentation. */
442 Iop_CasCmpEQ8, Iop_CasCmpEQ16, Iop_CasCmpEQ32, Iop_CasCmpEQ64,
443 Iop_CasCmpNE8, Iop_CasCmpNE16, Iop_CasCmpNE32, Iop_CasCmpNE64,
444
445 /* Exactly like CmpNE8/16/32/64, but carrying the additional
446 hint that these needs expensive definedness tracking. */
447 Iop_ExpCmpNE8, Iop_ExpCmpNE16, Iop_ExpCmpNE32, Iop_ExpCmpNE64,
448
449 /* -- Ordering not important after here. -- */
450
451 /* Widening multiplies */
452 Iop_MullS8, Iop_MullS16, Iop_MullS32, Iop_MullS64,
453 Iop_MullU8, Iop_MullU16, Iop_MullU32, Iop_MullU64,
454
455 /* Wierdo integer stuff */
456 Iop_Clz64, Iop_Clz32, /* count leading zeroes */
457 Iop_Ctz64, Iop_Ctz32, /* count trailing zeros */
458 /* Ctz64/Ctz32/Clz64/Clz32 are UNDEFINED when given arguments of
459 zero. You must ensure they are never given a zero argument.
460 */
461
462 /* Standard integer comparisons */
463 Iop_CmpLT32S, Iop_CmpLT64S,
464 Iop_CmpLE32S, Iop_CmpLE64S,
465 Iop_CmpLT32U, Iop_CmpLT64U,
466 Iop_CmpLE32U, Iop_CmpLE64U,
467
468 /* As a sop to Valgrind-Memcheck, the following are useful. */
469 Iop_CmpNEZ8, Iop_CmpNEZ16, Iop_CmpNEZ32, Iop_CmpNEZ64,
470 Iop_CmpwNEZ32, Iop_CmpwNEZ64, /* all-0s -> all-Os; other -> all-1s */
471 Iop_Left8, Iop_Left16, Iop_Left32, Iop_Left64, /* \x -> x | -x */
472 Iop_Max32U, /* unsigned max */
473
474 /* PowerPC-style 3-way integer comparisons. Without them it is
475 difficult to simulate PPC efficiently.
476 op(x,y) | x < y = 0x8 else
477 | x > y = 0x4 else
478 | x == y = 0x2
479 */
480 Iop_CmpORD32U, Iop_CmpORD64U,
481 Iop_CmpORD32S, Iop_CmpORD64S,
482
483 /* Division */
484 /* TODO: clarify semantics wrt rounding, negative values, whatever */
485 Iop_DivU32, // :: I32,I32 -> I32 (simple div, no mod)
486 Iop_DivS32, // ditto, signed
487 Iop_DivU64, // :: I64,I64 -> I64 (simple div, no mod)
488 Iop_DivS64, // ditto, signed
489 Iop_DivU64E, // :: I64,I64 -> I64 (dividend is 64-bit arg (hi)
490 // concat with 64 0's (low))
491 Iop_DivS64E, // ditto, signed
492 Iop_DivU32E, // :: I32,I32 -> I32 (dividend is 32-bit arg (hi)
493 // concat with 32 0's (low))
494 Iop_DivS32E, // ditto, signed
495
496 Iop_DivModU64to32, // :: I64,I32 -> I64
497 // of which lo half is div and hi half is mod
498 Iop_DivModS64to32, // ditto, signed
499
500 Iop_DivModU128to64, // :: V128,I64 -> V128
501 // of which lo half is div and hi half is mod
502 Iop_DivModS128to64, // ditto, signed
503
504 Iop_DivModS64to64, // :: I64,I64 -> I128
505 // of which lo half is div and hi half is mod
506
507 /* Integer conversions. Some of these are redundant (eg
508 Iop_64to8 is the same as Iop_64to32 and then Iop_32to8), but
509 having a complete set reduces the typical dynamic size of IR
510 and makes the instruction selectors easier to write. */
511
512 /* Widening conversions */
513 Iop_8Uto16, Iop_8Uto32, Iop_8Uto64,
514 Iop_16Uto32, Iop_16Uto64,
515 Iop_32Uto64,
516 Iop_8Sto16, Iop_8Sto32, Iop_8Sto64,
517 Iop_16Sto32, Iop_16Sto64,
518 Iop_32Sto64,
519
520 /* Narrowing conversions */
521 Iop_64to8, Iop_32to8, Iop_64to16,
522 /* 8 <-> 16 bit conversions */
523 Iop_16to8, // :: I16 -> I8, low half
524 Iop_16HIto8, // :: I16 -> I8, high half
525 Iop_8HLto16, // :: (I8,I8) -> I16
526 /* 16 <-> 32 bit conversions */
527 Iop_32to16, // :: I32 -> I16, low half
528 Iop_32HIto16, // :: I32 -> I16, high half
529 Iop_16HLto32, // :: (I16,I16) -> I32
530 /* 32 <-> 64 bit conversions */
531 Iop_64to32, // :: I64 -> I32, low half
532 Iop_64HIto32, // :: I64 -> I32, high half
533 Iop_32HLto64, // :: (I32,I32) -> I64
534 /* 64 <-> 128 bit conversions */
535 Iop_128to64, // :: I128 -> I64, low half
536 Iop_128HIto64, // :: I128 -> I64, high half
537 Iop_64HLto128, // :: (I64,I64) -> I128
538 /* 1-bit stuff */
539 Iop_Not1, /* :: Ity_Bit -> Ity_Bit */
540 Iop_32to1, /* :: Ity_I32 -> Ity_Bit, just select bit[0] */
541 Iop_64to1, /* :: Ity_I64 -> Ity_Bit, just select bit[0] */
542 Iop_1Uto8, /* :: Ity_Bit -> Ity_I8, unsigned widen */
543 Iop_1Uto32, /* :: Ity_Bit -> Ity_I32, unsigned widen */
544 Iop_1Uto64, /* :: Ity_Bit -> Ity_I64, unsigned widen */
545 Iop_1Sto8, /* :: Ity_Bit -> Ity_I8, signed widen */
546 Iop_1Sto16, /* :: Ity_Bit -> Ity_I16, signed widen */
547 Iop_1Sto32, /* :: Ity_Bit -> Ity_I32, signed widen */
548 Iop_1Sto64, /* :: Ity_Bit -> Ity_I64, signed widen */
549
550 /* ------ Floating point. We try to be IEEE754 compliant. ------ */
551
552 /* --- Simple stuff as mandated by 754. --- */
553
554 /* Binary operations, with rounding. */
555 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
556 Iop_AddF64, Iop_SubF64, Iop_MulF64, Iop_DivF64,
557
558 /* :: IRRoundingMode(I32) x F32 x F32 -> F32 */
559 Iop_AddF32, Iop_SubF32, Iop_MulF32, Iop_DivF32,
560
561 /* Variants of the above which produce a 64-bit result but which
562 round their result to a IEEE float range first. */
563 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
564 Iop_AddF64r32, Iop_SubF64r32, Iop_MulF64r32, Iop_DivF64r32,
565
566 /* Unary operations, without rounding. */
567 /* :: F64 -> F64 */
568 Iop_NegF64, Iop_AbsF64,
569
570 /* :: F32 -> F32 */
571 Iop_NegF32, Iop_AbsF32,
572
573 /* Unary operations, with rounding. */
574 /* :: IRRoundingMode(I32) x F64 -> F64 */
575 Iop_SqrtF64,
576
577 /* :: IRRoundingMode(I32) x F32 -> F32 */
578 Iop_SqrtF32,
579
580 /* Comparison, yielding GT/LT/EQ/UN(ordered), as per the following:
581 0x45 Unordered
582 0x01 LT
583 0x00 GT
584 0x40 EQ
585 This just happens to be the Intel encoding. The values
586 are recorded in the type IRCmpF64Result.
587 */
588 /* :: F64 x F64 -> IRCmpF64Result(I32) */
589 Iop_CmpF64,
590 Iop_CmpF32,
591 Iop_CmpF128,
592
593 /* --- Int to/from FP conversions. --- */
594
595 /* For the most part, these take a first argument :: Ity_I32 (as
596 IRRoundingMode) which is an indication of the rounding mode
597 to use, as per the following encoding ("the standard
598 encoding"):
599 00b to nearest (the default)
600 01b to -infinity
601 10b to +infinity
602 11b to zero
603 This just happens to be the Intel encoding. For reference only,
604 the PPC encoding is:
605 00b to nearest (the default)
606 01b to zero
607 10b to +infinity
608 11b to -infinity
609 Any PPC -> IR front end will have to translate these PPC
610 encodings, as encoded in the guest state, to the standard
611 encodings, to pass to the primops.
612 For reference only, the ARM VFP encoding is:
613 00b to nearest
614 01b to +infinity
615 10b to -infinity
616 11b to zero
617 Again, this will have to be converted to the standard encoding
618 to pass to primops.
619
620 If one of these conversions gets an out-of-range condition,
621 or a NaN, as an argument, the result is host-defined. On x86
622 the "integer indefinite" value 0x80..00 is produced. On PPC
623 it is either 0x80..00 or 0x7F..FF depending on the sign of
624 the argument.
625
626 On ARMvfp, when converting to a signed integer result, the
627 overflow result is 0x80..00 for negative args and 0x7F..FF
628 for positive args. For unsigned integer results it is
629 0x00..00 and 0xFF..FF respectively.
630
631 Rounding is required whenever the destination type cannot
632 represent exactly all values of the source type.
633 */
634 Iop_F64toI16S, /* IRRoundingMode(I32) x F64 -> signed I16 */
635 Iop_F64toI32S, /* IRRoundingMode(I32) x F64 -> signed I32 */
636 Iop_F64toI64S, /* IRRoundingMode(I32) x F64 -> signed I64 */
637 Iop_F64toI64U, /* IRRoundingMode(I32) x F64 -> unsigned I64 */
638
639 Iop_F64toI32U, /* IRRoundingMode(I32) x F64 -> unsigned I32 */
640
641 Iop_I32StoF64, /* signed I32 -> F64 */
642 Iop_I64StoF64, /* IRRoundingMode(I32) x signed I64 -> F64 */
643 Iop_I64UtoF64, /* IRRoundingMode(I32) x unsigned I64 -> F64 */
644 Iop_I64UtoF32, /* IRRoundingMode(I32) x unsigned I64 -> F32 */
645
646 Iop_I32UtoF32, /* IRRoundingMode(I32) x unsigned I32 -> F32 */
647 Iop_I32UtoF64, /* unsigned I32 -> F64 */
648
649 Iop_F32toI32S, /* IRRoundingMode(I32) x F32 -> signed I32 */
650 Iop_F32toI64S, /* IRRoundingMode(I32) x F32 -> signed I64 */
651 Iop_F32toI32U, /* IRRoundingMode(I32) x F32 -> unsigned I32 */
652 Iop_F32toI64U, /* IRRoundingMode(I32) x F32 -> unsigned I64 */
653
654 Iop_I32StoF32, /* IRRoundingMode(I32) x signed I32 -> F32 */
655 Iop_I64StoF32, /* IRRoundingMode(I32) x signed I64 -> F32 */
656
657 /* Conversion between floating point formats */
658 Iop_F32toF64, /* F32 -> F64 */
659 Iop_F64toF32, /* IRRoundingMode(I32) x F64 -> F32 */
660
661 /* Reinterpretation. Take an F64 and produce an I64 with
662 the same bit pattern, or vice versa. */
663 Iop_ReinterpF64asI64, Iop_ReinterpI64asF64,
664 Iop_ReinterpF32asI32, Iop_ReinterpI32asF32,
665
666 /* Support for 128-bit floating point */
667 Iop_F64HLtoF128,/* (high half of F128,low half of F128) -> F128 */
668 Iop_F128HItoF64,/* F128 -> high half of F128 into a F64 register */
669 Iop_F128LOtoF64,/* F128 -> low half of F128 into a F64 register */
670
671 /* :: IRRoundingMode(I32) x F128 x F128 -> F128 */
672 Iop_AddF128, Iop_SubF128, Iop_MulF128, Iop_DivF128,
673
674 /* :: F128 -> F128 */
675 Iop_NegF128, Iop_AbsF128,
676
677 /* :: IRRoundingMode(I32) x F128 -> F128 */
678 Iop_SqrtF128,
679
680 Iop_I32StoF128, /* signed I32 -> F128 */
681 Iop_I64StoF128, /* signed I64 -> F128 */
682 Iop_I32UtoF128, /* unsigned I32 -> F128 */
683 Iop_I64UtoF128, /* unsigned I64 -> F128 */
684 Iop_F32toF128, /* F32 -> F128 */
685 Iop_F64toF128, /* F64 -> F128 */
686
687 Iop_F128toI32S, /* IRRoundingMode(I32) x F128 -> signed I32 */
688 Iop_F128toI64S, /* IRRoundingMode(I32) x F128 -> signed I64 */
689 Iop_F128toI32U, /* IRRoundingMode(I32) x F128 -> unsigned I32 */
690 Iop_F128toI64U, /* IRRoundingMode(I32) x F128 -> unsigned I64 */
691 Iop_F128toF64, /* IRRoundingMode(I32) x F128 -> F64 */
692 Iop_F128toF32, /* IRRoundingMode(I32) x F128 -> F32 */
693
694 /* --- guest x86/amd64 specifics, not mandated by 754. --- */
695
696 /* Binary ops, with rounding. */
697 /* :: IRRoundingMode(I32) x F64 x F64 -> F64 */
698 Iop_AtanF64, /* FPATAN, arctan(arg1/arg2) */
699 Iop_Yl2xF64, /* FYL2X, arg1 * log2(arg2) */
700 Iop_Yl2xp1F64, /* FYL2XP1, arg1 * log2(arg2+1.0) */
701 Iop_PRemF64, /* FPREM, non-IEEE remainder(arg1/arg2) */
702 Iop_PRemC3210F64, /* C3210 flags resulting from FPREM, :: I32 */
703 Iop_PRem1F64, /* FPREM1, IEEE remainder(arg1/arg2) */
704 Iop_PRem1C3210F64, /* C3210 flags resulting from FPREM1, :: I32 */
705 Iop_ScaleF64, /* FSCALE, arg1 * (2^RoundTowardsZero(arg2)) */
706 /* Note that on x86 guest, PRem1{C3210} has the same behaviour
707 as the IEEE mandated RemF64, except it is limited in the
708 range of its operand. Hence the partialness. */
709
710 /* Unary ops, with rounding. */
711 /* :: IRRoundingMode(I32) x F64 -> F64 */
712 Iop_SinF64, /* FSIN */
713 Iop_CosF64, /* FCOS */
714 Iop_TanF64, /* FTAN */
715 Iop_2xm1F64, /* (2^arg - 1.0) */
716 Iop_RoundF64toInt, /* F64 value to nearest integral value (still
717 as F64) */
718 Iop_RoundF32toInt, /* F32 value to nearest integral value (still
719 as F32) */
720
721 /* --- guest s390 specifics, not mandated by 754. --- */
722
723 /* Fused multiply-add/sub */
724 /* :: IRRoundingMode(I32) x F32 x F32 x F32 -> F32
725 (computes arg2 * arg3 +/- arg4) */
726 Iop_MAddF32, Iop_MSubF32,
727
728 /* --- guest ppc32/64 specifics, not mandated by 754. --- */
729
730 /* Ternary operations, with rounding. */
731 /* Fused multiply-add/sub, with 112-bit intermediate
732 precision for ppc.
733 Also used to implement fused multiply-add/sub for s390. */
734 /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64
735 (computes arg2 * arg3 +/- arg4) */
736 Iop_MAddF64, Iop_MSubF64,
737
738 /* Variants of the above which produce a 64-bit result but which
739 round their result to a IEEE float range first. */
740 /* :: IRRoundingMode(I32) x F64 x F64 x F64 -> F64 */
741 Iop_MAddF64r32, Iop_MSubF64r32,
742
743 /* :: F64 -> F64 */
744 Iop_RSqrtEst5GoodF64, /* reciprocal square root estimate, 5 good bits */
745 Iop_RoundF64toF64_NEAREST, /* frin */
746 Iop_RoundF64toF64_NegINF, /* frim */
747 Iop_RoundF64toF64_PosINF, /* frip */
748 Iop_RoundF64toF64_ZERO, /* friz */
749
750 /* :: F64 -> F32 */
751 Iop_TruncF64asF32, /* do F64->F32 truncation as per 'fsts' */
752
753 /* :: IRRoundingMode(I32) x F64 -> F64 */
754 Iop_RoundF64toF32, /* round F64 to nearest F32 value (still as F64) */
755 /* NB: pretty much the same as Iop_F64toF32, except no change
756 of type. */
757
758 /* --- guest arm64 specifics, not mandated by 754. --- */
759
760 Iop_RecpExpF64, /* FRECPX d :: IRRoundingMode(I32) x F64 -> F64 */
761 Iop_RecpExpF32, /* FRECPX s :: IRRoundingMode(I32) x F32 -> F32 */
762
763 /* ------------------ 16-bit scalar FP ------------------ */
764
765 Iop_F16toF64, /* F16 -> F64 */
766 Iop_F64toF16, /* IRRoundingMode(I32) x F64 -> F16 */
767
768 Iop_F16toF32, /* F16 -> F32 */
769 Iop_F32toF16, /* IRRoundingMode(I32) x F32 -> F16 */
770
771 /* ------------------ 32-bit SIMD Integer ------------------ */
772
773 /* 32x1 saturating add/sub (ok, well, not really SIMD :) */
774 Iop_QAdd32S,
775 Iop_QSub32S,
776
777 /* 16x2 add/sub, also signed/unsigned saturating variants */
778 Iop_Add16x2, Iop_Sub16x2,
779 Iop_QAdd16Sx2, Iop_QAdd16Ux2,
780 Iop_QSub16Sx2, Iop_QSub16Ux2,
781
782 /* 16x2 signed/unsigned halving add/sub. For each lane, these
783 compute bits 16:1 of (eg) sx(argL) + sx(argR),
784 or zx(argL) - zx(argR) etc. */
785 Iop_HAdd16Ux2, Iop_HAdd16Sx2,
786 Iop_HSub16Ux2, Iop_HSub16Sx2,
787
788 /* 8x4 add/sub, also signed/unsigned saturating variants */
789 Iop_Add8x4, Iop_Sub8x4,
790 Iop_QAdd8Sx4, Iop_QAdd8Ux4,
791 Iop_QSub8Sx4, Iop_QSub8Ux4,
792
793 /* 8x4 signed/unsigned halving add/sub. For each lane, these
794 compute bits 8:1 of (eg) sx(argL) + sx(argR),
795 or zx(argL) - zx(argR) etc. */
796 Iop_HAdd8Ux4, Iop_HAdd8Sx4,
797 Iop_HSub8Ux4, Iop_HSub8Sx4,
798
799 /* 8x4 sum of absolute unsigned differences. */
800 Iop_Sad8Ux4,
801
802 /* MISC (vector integer cmp != 0) */
803 Iop_CmpNEZ16x2, Iop_CmpNEZ8x4,
804
805 /* ------------------ 64-bit SIMD FP ------------------------ */
806
807 /* Convertion to/from int */
808 Iop_I32UtoFx2, Iop_I32StoFx2, /* I32x4 -> F32x4 */
809 Iop_FtoI32Ux2_RZ, Iop_FtoI32Sx2_RZ, /* F32x4 -> I32x4 */
810 /* Fixed32 format is floating-point number with fixed number of fraction
811 bits. The number of fraction bits is passed as a second argument of
812 type I8. */
813 Iop_F32ToFixed32Ux2_RZ, Iop_F32ToFixed32Sx2_RZ, /* fp -> fixed-point */
814 Iop_Fixed32UToF32x2_RN, Iop_Fixed32SToF32x2_RN, /* fixed-point -> fp */
815
816 /* Binary operations */
817 Iop_Max32Fx2, Iop_Min32Fx2,
818 /* Pairwise Min and Max. See integer pairwise operations for more
819 details. */
820 Iop_PwMax32Fx2, Iop_PwMin32Fx2,
821 /* Note: For the following compares, the arm front-end assumes a
822 nan in a lane of either argument returns zero for that lane. */
823 Iop_CmpEQ32Fx2, Iop_CmpGT32Fx2, Iop_CmpGE32Fx2,
824
825 /* Vector Reciprocal Estimate finds an approximate reciprocal of each
826 element in the operand vector, and places the results in the destination
827 vector. */
828 Iop_RecipEst32Fx2,
829
830 /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
831 Note, that if one of the arguments is zero and another one is infinity
832 of arbitrary sign the result of the operation is 2.0. */
833 Iop_RecipStep32Fx2,
834
835 /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
836 square root of each element in the operand vector. */
837 Iop_RSqrtEst32Fx2,
838
839 /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
840 Note, that of one of the arguments is zero and another one is infiinty
841 of arbitrary sign the result of the operation is 1.5. */
842 Iop_RSqrtStep32Fx2,
843
844 /* Unary */
845 Iop_Neg32Fx2, Iop_Abs32Fx2,
846
847 /* ------------------ 64-bit SIMD Integer. ------------------ */
848
849 /* MISC (vector integer cmp != 0) */
850 Iop_CmpNEZ8x8, Iop_CmpNEZ16x4, Iop_CmpNEZ32x2,
851
852 /* ADDITION (normal / unsigned sat / signed sat) */
853 Iop_Add8x8, Iop_Add16x4, Iop_Add32x2,
854 Iop_QAdd8Ux8, Iop_QAdd16Ux4, Iop_QAdd32Ux2, Iop_QAdd64Ux1,
855 Iop_QAdd8Sx8, Iop_QAdd16Sx4, Iop_QAdd32Sx2, Iop_QAdd64Sx1,
856
857 /* PAIRWISE operations */
858 /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
859 [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
860 Iop_PwAdd8x8, Iop_PwAdd16x4, Iop_PwAdd32x2,
861 Iop_PwMax8Sx8, Iop_PwMax16Sx4, Iop_PwMax32Sx2,
862 Iop_PwMax8Ux8, Iop_PwMax16Ux4, Iop_PwMax32Ux2,
863 Iop_PwMin8Sx8, Iop_PwMin16Sx4, Iop_PwMin32Sx2,
864 Iop_PwMin8Ux8, Iop_PwMin16Ux4, Iop_PwMin32Ux2,
865 /* Longening variant is unary. The resulting vector contains two times
866 less elements than operand, but they are two times wider.
867 Example:
868 Iop_PAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
869 where a+b and c+d are unsigned 32-bit values. */
870 Iop_PwAddL8Ux8, Iop_PwAddL16Ux4, Iop_PwAddL32Ux2,
871 Iop_PwAddL8Sx8, Iop_PwAddL16Sx4, Iop_PwAddL32Sx2,
872
873 /* SUBTRACTION (normal / unsigned sat / signed sat) */
874 Iop_Sub8x8, Iop_Sub16x4, Iop_Sub32x2,
875 Iop_QSub8Ux8, Iop_QSub16Ux4, Iop_QSub32Ux2, Iop_QSub64Ux1,
876 Iop_QSub8Sx8, Iop_QSub16Sx4, Iop_QSub32Sx2, Iop_QSub64Sx1,
877
878 /* ABSOLUTE VALUE */
879 Iop_Abs8x8, Iop_Abs16x4, Iop_Abs32x2,
880
881 /* MULTIPLICATION (normal / high half of signed/unsigned / plynomial ) */
882 Iop_Mul8x8, Iop_Mul16x4, Iop_Mul32x2,
883 Iop_Mul32Fx2,
884 Iop_MulHi16Ux4,
885 Iop_MulHi16Sx4,
886 /* Plynomial multiplication treats it's arguments as coefficients of
887 polynoms over {0, 1}. */
888 Iop_PolynomialMul8x8,
889
890 /* Vector Saturating Doubling Multiply Returning High Half and
891 Vector Saturating Rounding Doubling Multiply Returning High Half */
892 /* These IROp's multiply corresponding elements in two vectors, double
893 the results, and place the most significant half of the final results
894 in the destination vector. The results are truncated or rounded. If
895 any of the results overflow, they are saturated. */
896 Iop_QDMulHi16Sx4, Iop_QDMulHi32Sx2,
897 Iop_QRDMulHi16Sx4, Iop_QRDMulHi32Sx2,
898
899 /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
900 Iop_Avg8Ux8,
901 Iop_Avg16Ux4,
902
903 /* MIN/MAX */
904 Iop_Max8Sx8, Iop_Max16Sx4, Iop_Max32Sx2,
905 Iop_Max8Ux8, Iop_Max16Ux4, Iop_Max32Ux2,
906 Iop_Min8Sx8, Iop_Min16Sx4, Iop_Min32Sx2,
907 Iop_Min8Ux8, Iop_Min16Ux4, Iop_Min32Ux2,
908
909 /* COMPARISON */
910 Iop_CmpEQ8x8, Iop_CmpEQ16x4, Iop_CmpEQ32x2,
911 Iop_CmpGT8Ux8, Iop_CmpGT16Ux4, Iop_CmpGT32Ux2,
912 Iop_CmpGT8Sx8, Iop_CmpGT16Sx4, Iop_CmpGT32Sx2,
913
914 /* COUNT ones / leading zeroes / leading sign bits (not including topmost
915 bit) */
916 Iop_Cnt8x8,
917 Iop_Clz8x8, Iop_Clz16x4, Iop_Clz32x2,
918 Iop_Cls8x8, Iop_Cls16x4, Iop_Cls32x2,
919 Iop_Clz64x2,
920
921 /* VECTOR x VECTOR SHIFT / ROTATE */
922 Iop_Shl8x8, Iop_Shl16x4, Iop_Shl32x2,
923 Iop_Shr8x8, Iop_Shr16x4, Iop_Shr32x2,
924 Iop_Sar8x8, Iop_Sar16x4, Iop_Sar32x2,
925 Iop_Sal8x8, Iop_Sal16x4, Iop_Sal32x2, Iop_Sal64x1,
926
927 /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
928 Iop_ShlN8x8, Iop_ShlN16x4, Iop_ShlN32x2,
929 Iop_ShrN8x8, Iop_ShrN16x4, Iop_ShrN32x2,
930 Iop_SarN8x8, Iop_SarN16x4, Iop_SarN32x2,
931
932 /* VECTOR x VECTOR SATURATING SHIFT */
933 Iop_QShl8x8, Iop_QShl16x4, Iop_QShl32x2, Iop_QShl64x1,
934 Iop_QSal8x8, Iop_QSal16x4, Iop_QSal32x2, Iop_QSal64x1,
935 /* VECTOR x INTEGER SATURATING SHIFT */
936 Iop_QShlNsatSU8x8, Iop_QShlNsatSU16x4,
937 Iop_QShlNsatSU32x2, Iop_QShlNsatSU64x1,
938 Iop_QShlNsatUU8x8, Iop_QShlNsatUU16x4,
939 Iop_QShlNsatUU32x2, Iop_QShlNsatUU64x1,
940 Iop_QShlNsatSS8x8, Iop_QShlNsatSS16x4,
941 Iop_QShlNsatSS32x2, Iop_QShlNsatSS64x1,
942
943 /* NARROWING (binary)
944 -- narrow 2xI64 into 1xI64, hi half from left arg */
945 /* For saturated narrowing, I believe there are 4 variants of
946 the basic arithmetic operation, depending on the signedness
947 of argument and result. Here are examples that exemplify
948 what I mean:
949
950 QNarrow16Uto8U ( UShort x ) if (x >u 255) x = 255;
951 return x[7:0];
952
953 QNarrow16Sto8S ( Short x ) if (x <s -128) x = -128;
954 if (x >s 127) x = 127;
955 return x[7:0];
956
957 QNarrow16Uto8S ( UShort x ) if (x >u 127) x = 127;
958 return x[7:0];
959
960 QNarrow16Sto8U ( Short x ) if (x <s 0) x = 0;
961 if (x >s 255) x = 255;
962 return x[7:0];
963 */
964 Iop_QNarrowBin16Sto8Ux8,
965 Iop_QNarrowBin16Sto8Sx8, Iop_QNarrowBin32Sto16Sx4,
966 Iop_NarrowBin16to8x8, Iop_NarrowBin32to16x4,
967
968 /* INTERLEAVING */
969 /* Interleave lanes from low or high halves of
970 operands. Most-significant result lane is from the left
971 arg. */
972 Iop_InterleaveHI8x8, Iop_InterleaveHI16x4, Iop_InterleaveHI32x2,
973 Iop_InterleaveLO8x8, Iop_InterleaveLO16x4, Iop_InterleaveLO32x2,
974 /* Interleave odd/even lanes of operands. Most-significant result lane
975 is from the left arg. Note that Interleave{Odd,Even}Lanes32x2 are
976 identical to Interleave{HI,LO}32x2 and so are omitted.*/
977 Iop_InterleaveOddLanes8x8, Iop_InterleaveEvenLanes8x8,
978 Iop_InterleaveOddLanes16x4, Iop_InterleaveEvenLanes16x4,
979
980 /* CONCATENATION -- build a new value by concatenating either
981 the even or odd lanes of both operands. Note that
982 Cat{Odd,Even}Lanes32x2 are identical to Interleave{HI,LO}32x2
983 and so are omitted. */
984 Iop_CatOddLanes8x8, Iop_CatOddLanes16x4,
985 Iop_CatEvenLanes8x8, Iop_CatEvenLanes16x4,
986
987 /* GET / SET elements of VECTOR
988 GET is binop (I64, I8) -> I<elem_size>
989 SET is triop (I64, I8, I<elem_size>) -> I64 */
990 /* Note: the arm back-end handles only constant second argument */
991 Iop_GetElem8x8, Iop_GetElem16x4, Iop_GetElem32x2,
992 Iop_SetElem8x8, Iop_SetElem16x4, Iop_SetElem32x2,
993
994 /* DUPLICATING -- copy value to all lanes */
995 Iop_Dup8x8, Iop_Dup16x4, Iop_Dup32x2,
996
997 /* SLICE -- produces the lowest 64 bits of (arg1:arg2) >> (8 * arg3).
998 arg3 is a shift amount in bytes and may be between 0 and 8
999 inclusive. When 0, the result is arg2; when 8, the result is arg1.
1000 Not all back ends handle all values. The arm32 and arm64 back
1001 ends handle only immediate arg3 values. */
1002 Iop_Slice64, // (I64, I64, I8) -> I64
1003
1004 /* REVERSE the order of chunks in vector lanes. Chunks must be
1005 smaller than the vector lanes (obviously) and so may be 8-,
1006 16- and 32-bit in size. */
1007 /* Examples:
1008 Reverse8sIn16_x4([a,b,c,d,e,f,g,h]) = [b,a,d,c,f,e,h,g]
1009 Reverse8sIn32_x2([a,b,c,d,e,f,g,h]) = [d,c,b,a,h,g,f,e]
1010 Reverse8sIn64_x1([a,b,c,d,e,f,g,h]) = [h,g,f,e,d,c,b,a] */
1011 Iop_Reverse8sIn16_x4,
1012 Iop_Reverse8sIn32_x2, Iop_Reverse16sIn32_x2,
1013 Iop_Reverse8sIn64_x1, Iop_Reverse16sIn64_x1, Iop_Reverse32sIn64_x1,
1014
1015 /* PERMUTING -- copy src bytes to dst,
1016 as indexed by control vector bytes:
1017 for i in 0 .. 7 . result[i] = argL[ argR[i] ]
1018 argR[i] values may only be in the range 0 .. 7, else behaviour
1019 is undefined. */
1020 Iop_Perm8x8,
1021
1022 /* MISC CONVERSION -- get high bits of each byte lane, a la
1023 x86/amd64 pmovmskb */
1024 Iop_GetMSBs8x8, /* I64 -> I8 */
1025
1026 /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1027 See floating-point equivalents for details. */
1028 Iop_RecipEst32Ux2, Iop_RSqrtEst32Ux2,
1029
1030 /* ------------------ Decimal Floating Point ------------------ */
1031
1032 /* ARITHMETIC INSTRUCTIONS 64-bit
1033 ----------------------------------
1034 IRRoundingMode(I32) X D64 X D64 -> D64
1035 */
1036 Iop_AddD64, Iop_SubD64, Iop_MulD64, Iop_DivD64,
1037
1038 /* ARITHMETIC INSTRUCTIONS 128-bit
1039 ----------------------------------
1040 IRRoundingMode(I32) X D128 X D128 -> D128
1041 */
1042 Iop_AddD128, Iop_SubD128, Iop_MulD128, Iop_DivD128,
1043
1044 /* SHIFT SIGNIFICAND INSTRUCTIONS
1045 * The DFP significand is shifted by the number of digits specified
1046 * by the U8 operand. Digits shifted out of the leftmost digit are
1047 * lost. Zeros are supplied to the vacated positions on the right.
1048 * The sign of the result is the same as the sign of the original
1049 * operand.
1050 *
1051 * D64 x U8 -> D64 left shift and right shift respectively */
1052 Iop_ShlD64, Iop_ShrD64,
1053
1054 /* D128 x U8 -> D128 left shift and right shift respectively */
1055 Iop_ShlD128, Iop_ShrD128,
1056
1057
1058 /* FORMAT CONVERSION INSTRUCTIONS
1059 * D32 -> D64
1060 */
1061 Iop_D32toD64,
1062
1063 /* D64 -> D128 */
1064 Iop_D64toD128,
1065
1066 /* I32S -> D128 */
1067 Iop_I32StoD128,
1068
1069 /* I32U -> D128 */
1070 Iop_I32UtoD128,
1071
1072 /* I64S -> D128 */
1073 Iop_I64StoD128,
1074
1075 /* I64U -> D128 */
1076 Iop_I64UtoD128,
1077
1078 /* IRRoundingMode(I32) x D64 -> D32 */
1079 Iop_D64toD32,
1080
1081 /* IRRoundingMode(I32) x D128 -> D64 */
1082 Iop_D128toD64,
1083
1084 /* I32S -> D64 */
1085 Iop_I32StoD64,
1086
1087 /* I32U -> D64 */
1088 Iop_I32UtoD64,
1089
1090 /* IRRoundingMode(I32) x I64 -> D64 */
1091 Iop_I64StoD64,
1092
1093 /* IRRoundingMode(I32) x I64 -> D64 */
1094 Iop_I64UtoD64,
1095
1096 /* IRRoundingMode(I32) x D64 -> I32 */
1097 Iop_D64toI32S,
1098
1099 /* IRRoundingMode(I32) x D64 -> I32 */
1100 Iop_D64toI32U,
1101
1102 /* IRRoundingMode(I32) x D64 -> I64 */
1103 Iop_D64toI64S,
1104
1105 /* IRRoundingMode(I32) x D64 -> I64 */
1106 Iop_D64toI64U,
1107
1108 /* IRRoundingMode(I32) x D128 -> I32 */
1109 Iop_D128toI32S,
1110
1111 /* IRRoundingMode(I32) x D128 -> I32 */
1112 Iop_D128toI32U,
1113
1114 /* IRRoundingMode(I32) x D128 -> I64 */
1115 Iop_D128toI64S,
1116
1117 /* IRRoundingMode(I32) x D128 -> I64 */
1118 Iop_D128toI64U,
1119
1120 /* IRRoundingMode(I32) x F32 -> D32 */
1121 Iop_F32toD32,
1122
1123 /* IRRoundingMode(I32) x F32 -> D64 */
1124 Iop_F32toD64,
1125
1126 /* IRRoundingMode(I32) x F32 -> D128 */
1127 Iop_F32toD128,
1128
1129 /* IRRoundingMode(I32) x F64 -> D32 */
1130 Iop_F64toD32,
1131
1132 /* IRRoundingMode(I32) x F64 -> D64 */
1133 Iop_F64toD64,
1134
1135 /* IRRoundingMode(I32) x F64 -> D128 */
1136 Iop_F64toD128,
1137
1138 /* IRRoundingMode(I32) x F128 -> D32 */
1139 Iop_F128toD32,
1140
1141 /* IRRoundingMode(I32) x F128 -> D64 */
1142 Iop_F128toD64,
1143
1144 /* IRRoundingMode(I32) x F128 -> D128 */
1145 Iop_F128toD128,
1146
1147 /* IRRoundingMode(I32) x D32 -> F32 */
1148 Iop_D32toF32,
1149
1150 /* IRRoundingMode(I32) x D32 -> F64 */
1151 Iop_D32toF64,
1152
1153 /* IRRoundingMode(I32) x D32 -> F128 */
1154 Iop_D32toF128,
1155
1156 /* IRRoundingMode(I32) x D64 -> F32 */
1157 Iop_D64toF32,
1158
1159 /* IRRoundingMode(I32) x D64 -> F64 */
1160 Iop_D64toF64,
1161
1162 /* IRRoundingMode(I32) x D64 -> F128 */
1163 Iop_D64toF128,
1164
1165 /* IRRoundingMode(I32) x D128 -> F32 */
1166 Iop_D128toF32,
1167
1168 /* IRRoundingMode(I32) x D128 -> F64 */
1169 Iop_D128toF64,
1170
1171 /* IRRoundingMode(I32) x D128 -> F128 */
1172 Iop_D128toF128,
1173
1174 /* ROUNDING INSTRUCTIONS
1175 * IRRoundingMode(I32) x D64 -> D64
1176 * The D64 operand, if a finite number, it is rounded to a
1177 * floating point integer value, i.e. no fractional part.
1178 */
1179 Iop_RoundD64toInt,
1180
1181 /* IRRoundingMode(I32) x D128 -> D128 */
1182 Iop_RoundD128toInt,
1183
1184 /* COMPARE INSTRUCTIONS
1185 * D64 x D64 -> IRCmpD64Result(I32) */
1186 Iop_CmpD64,
1187
1188 /* D128 x D128 -> IRCmpD128Result(I32) */
1189 Iop_CmpD128,
1190
1191 /* COMPARE BIASED EXPONENET INSTRUCTIONS
1192 * D64 x D64 -> IRCmpD64Result(I32) */
1193 Iop_CmpExpD64,
1194
1195 /* D128 x D128 -> IRCmpD128Result(I32) */
1196 Iop_CmpExpD128,
1197
1198 /* QUANTIZE AND ROUND INSTRUCTIONS
1199 * The source operand is converted and rounded to the form with the
1200 * immediate exponent specified by the rounding and exponent parameter.
1201 *
1202 * The second operand is converted and rounded to the form
1203 * of the first operand's exponent and the rounded based on the specified
1204 * rounding mode parameter.
1205 *
1206 * IRRoundingMode(I32) x D64 x D64-> D64 */
1207 Iop_QuantizeD64,
1208
1209 /* IRRoundingMode(I32) x D128 x D128 -> D128 */
1210 Iop_QuantizeD128,
1211
1212 /* IRRoundingMode(I32) x I8 x D64 -> D64
1213 * The Decimal Floating point operand is rounded to the requested
1214 * significance given by the I8 operand as specified by the rounding
1215 * mode.
1216 */
1217 Iop_SignificanceRoundD64,
1218
1219 /* IRRoundingMode(I32) x I8 x D128 -> D128 */
1220 Iop_SignificanceRoundD128,
1221
1222 /* EXTRACT AND INSERT INSTRUCTIONS
1223 * D64 -> I64
1224 * The exponent of the D32 or D64 operand is extracted. The
1225 * extracted exponent is converted to a 64-bit signed binary integer.
1226 */
1227 Iop_ExtractExpD64,
1228
1229 /* D128 -> I64 */
1230 Iop_ExtractExpD128,
1231
1232 /* D64 -> I64
1233 * The number of significand digits of the D64 operand is extracted.
1234 * The number is stored as a 64-bit signed binary integer.
1235 */
1236 Iop_ExtractSigD64,
1237
1238 /* D128 -> I64 */
1239 Iop_ExtractSigD128,
1240
1241 /* I64 x D64 -> D64
1242 * The exponent is specified by the first I64 operand the signed
1243 * significand is given by the second I64 value. The result is a D64
1244 * value consisting of the specified significand and exponent whose
1245 * sign is that of the specified significand.
1246 */
1247 Iop_InsertExpD64,
1248
1249 /* I64 x D128 -> D128 */
1250 Iop_InsertExpD128,
1251
1252 /* Support for 128-bit DFP type */
1253 Iop_D64HLtoD128, Iop_D128HItoD64, Iop_D128LOtoD64,
1254
1255 /* I64 -> I64
1256 * Convert 50-bit densely packed BCD string to 60 bit BCD string
1257 */
1258 Iop_DPBtoBCD,
1259
1260 /* I64 -> I64
1261 * Convert 60 bit BCD string to 50-bit densely packed BCD string
1262 */
1263 Iop_BCDtoDPB,
1264
1265 /* BCD arithmetic instructions, (V128, V128) -> V128
1266 * The BCD format is the same as that used in the BCD<->DPB conversion
1267 * routines, except using 124 digits (vs 60) plus the trailing 4-bit
1268 * signed code. */
1269 Iop_BCDAdd, Iop_BCDSub,
1270
1271 /* Conversion I64 -> D64 */
1272 Iop_ReinterpI64asD64,
1273
1274 /* Conversion D64 -> I64 */
1275 Iop_ReinterpD64asI64,
1276
1277 /* ------------------ 128-bit SIMD FP. ------------------ */
1278
1279 /* --- 32x4 vector FP --- */
1280
1281 /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1282 Iop_Add32Fx4, Iop_Sub32Fx4, Iop_Mul32Fx4, Iop_Div32Fx4,
1283
1284 /* binary */
1285 Iop_Max32Fx4, Iop_Min32Fx4,
1286 Iop_Add32Fx2, Iop_Sub32Fx2,
1287 /* Note: For the following compares, the ppc and arm front-ends assume a
1288 nan in a lane of either argument returns zero for that lane. */
1289 Iop_CmpEQ32Fx4, Iop_CmpLT32Fx4, Iop_CmpLE32Fx4, Iop_CmpUN32Fx4,
1290 Iop_CmpGT32Fx4, Iop_CmpGE32Fx4,
1291
1292 /* Pairwise Max and Min. See integer pairwise operations for details. */
1293 Iop_PwMax32Fx4, Iop_PwMin32Fx4,
1294
1295 /* unary */
1296 Iop_Abs32Fx4,
1297 Iop_Neg32Fx4,
1298
1299 /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1300 Iop_Sqrt32Fx4,
1301
1302 /* Vector Reciprocal Estimate finds an approximate reciprocal of each
1303 element in the operand vector, and places the results in the
1304 destination vector. */
1305 Iop_RecipEst32Fx4,
1306
1307 /* Vector Reciprocal Step computes (2.0 - arg1 * arg2).
1308 Note, that if one of the arguments is zero and another one is infinity
1309 of arbitrary sign the result of the operation is 2.0. */
1310 Iop_RecipStep32Fx4,
1311
1312 /* Vector Reciprocal Square Root Estimate finds an approximate reciprocal
1313 square root of each element in the operand vector. */
1314 Iop_RSqrtEst32Fx4,
1315
1316 /* Vector Reciprocal Square Root Step computes (3.0 - arg1 * arg2) / 2.0.
1317 Note, that of one of the arguments is zero and another one is infiinty
1318 of arbitrary sign the result of the operation is 1.5. */
1319 Iop_RSqrtStep32Fx4,
1320
1321 /* --- Int to/from FP conversion --- */
1322 /* Unlike the standard fp conversions, these irops take no
1323 rounding mode argument. Instead the irop trailers _R{M,P,N,Z}
1324 indicate the mode: {-inf, +inf, nearest, zero} respectively. */
1325 Iop_I32UtoFx4, Iop_I32StoFx4, /* I32x4 -> F32x4 */
1326 Iop_FtoI32Ux4_RZ, Iop_FtoI32Sx4_RZ, /* F32x4 -> I32x4 */
1327 Iop_QFtoI32Ux4_RZ, Iop_QFtoI32Sx4_RZ, /* F32x4 -> I32x4 (saturating) */
1328 Iop_RoundF32x4_RM, Iop_RoundF32x4_RP, /* round to fp integer */
1329 Iop_RoundF32x4_RN, Iop_RoundF32x4_RZ, /* round to fp integer */
1330 /* Fixed32 format is floating-point number with fixed number of fraction
1331 bits. The number of fraction bits is passed as a second argument of
1332 type I8. */
1333 Iop_F32ToFixed32Ux4_RZ, Iop_F32ToFixed32Sx4_RZ, /* fp -> fixed-point */
1334 Iop_Fixed32UToF32x4_RN, Iop_Fixed32SToF32x4_RN, /* fixed-point -> fp */
1335
1336 /* --- Single to/from half conversion --- */
1337 /* FIXME: what kind of rounding in F32x4 -> F16x4 case? */
1338 Iop_F32toF16x4, Iop_F16toF32x4, /* F32x4 <-> F16x4 */
1339
1340 /* --- 32x4 lowest-lane-only scalar FP --- */
1341
1342 /* In binary cases, upper 3/4 is copied from first operand. In
1343 unary cases, upper 3/4 is copied from the operand. */
1344
1345 /* binary */
1346 Iop_Add32F0x4, Iop_Sub32F0x4, Iop_Mul32F0x4, Iop_Div32F0x4,
1347 Iop_Max32F0x4, Iop_Min32F0x4,
1348 Iop_CmpEQ32F0x4, Iop_CmpLT32F0x4, Iop_CmpLE32F0x4, Iop_CmpUN32F0x4,
1349
1350 /* unary */
1351 Iop_RecipEst32F0x4, Iop_Sqrt32F0x4, Iop_RSqrtEst32F0x4,
1352
1353 /* --- 64x2 vector FP --- */
1354
1355 /* ternary :: IRRoundingMode(I32) x V128 x V128 -> V128 */
1356 Iop_Add64Fx2, Iop_Sub64Fx2, Iop_Mul64Fx2, Iop_Div64Fx2,
1357
1358 /* binary */
1359 Iop_Max64Fx2, Iop_Min64Fx2,
1360 Iop_CmpEQ64Fx2, Iop_CmpLT64Fx2, Iop_CmpLE64Fx2, Iop_CmpUN64Fx2,
1361
1362 /* unary */
1363 Iop_Abs64Fx2,
1364 Iop_Neg64Fx2,
1365
1366 /* binary :: IRRoundingMode(I32) x V128 -> V128 */
1367 Iop_Sqrt64Fx2,
1368
1369 /* see 32Fx4 variants for description */
1370 Iop_RecipEst64Fx2, // unary
1371 Iop_RecipStep64Fx2, // binary
1372 Iop_RSqrtEst64Fx2, // unary
1373 Iop_RSqrtStep64Fx2, // binary
1374
1375 /* --- 64x2 lowest-lane-only scalar FP --- */
1376
1377 /* In binary cases, upper half is copied from first operand. In
1378 unary cases, upper half is copied from the operand. */
1379
1380 /* binary */
1381 Iop_Add64F0x2, Iop_Sub64F0x2, Iop_Mul64F0x2, Iop_Div64F0x2,
1382 Iop_Max64F0x2, Iop_Min64F0x2,
1383 Iop_CmpEQ64F0x2, Iop_CmpLT64F0x2, Iop_CmpLE64F0x2, Iop_CmpUN64F0x2,
1384
1385 /* unary */
1386 Iop_Sqrt64F0x2,
1387
1388 /* --- pack / unpack --- */
1389
1390 /* 64 <-> 128 bit vector */
1391 Iop_V128to64, // :: V128 -> I64, low half
1392 Iop_V128HIto64, // :: V128 -> I64, high half
1393 Iop_64HLtoV128, // :: (I64,I64) -> V128
1394
1395 Iop_64UtoV128,
1396 Iop_SetV128lo64,
1397
1398 /* Copies lower 64/32/16/8 bits, zeroes out the rest. */
1399 Iop_ZeroHI64ofV128, // :: V128 -> V128
1400 Iop_ZeroHI96ofV128, // :: V128 -> V128
1401 Iop_ZeroHI112ofV128, // :: V128 -> V128
1402 Iop_ZeroHI120ofV128, // :: V128 -> V128
1403
1404 /* 32 <-> 128 bit vector */
1405 Iop_32UtoV128,
1406 Iop_V128to32, // :: V128 -> I32, lowest lane
1407 Iop_SetV128lo32, // :: (V128,I32) -> V128
1408
1409 /* ------------------ 128-bit SIMD Integer. ------------------ */
1410
1411 /* BITWISE OPS */
1412 Iop_NotV128,
1413 Iop_AndV128, Iop_OrV128, Iop_XorV128,
1414
1415 /* VECTOR SHIFT (shift amt :: Ity_I8) */
1416 Iop_ShlV128, Iop_ShrV128,
1417
1418 /* MISC (vector integer cmp != 0) */
1419 Iop_CmpNEZ8x16, Iop_CmpNEZ16x8, Iop_CmpNEZ32x4, Iop_CmpNEZ64x2,
1420
1421 /* ADDITION (normal / U->U sat / S->S sat) */
1422 Iop_Add8x16, Iop_Add16x8, Iop_Add32x4, Iop_Add64x2,
1423 Iop_QAdd8Ux16, Iop_QAdd16Ux8, Iop_QAdd32Ux4, Iop_QAdd64Ux2,
1424 Iop_QAdd8Sx16, Iop_QAdd16Sx8, Iop_QAdd32Sx4, Iop_QAdd64Sx2,
1425
1426 /* ADDITION, ARM64 specific saturating variants. */
1427 /* Unsigned widen left arg, signed widen right arg, add, saturate S->S.
1428 This corresponds to SUQADD. */
1429 Iop_QAddExtUSsatSS8x16, Iop_QAddExtUSsatSS16x8,
1430 Iop_QAddExtUSsatSS32x4, Iop_QAddExtUSsatSS64x2,
1431 /* Signed widen left arg, unsigned widen right arg, add, saturate U->U.
1432 This corresponds to USQADD. */
1433 Iop_QAddExtSUsatUU8x16, Iop_QAddExtSUsatUU16x8,
1434 Iop_QAddExtSUsatUU32x4, Iop_QAddExtSUsatUU64x2,
1435
1436 /* SUBTRACTION (normal / unsigned sat / signed sat) */
1437 Iop_Sub8x16, Iop_Sub16x8, Iop_Sub32x4, Iop_Sub64x2,
1438 Iop_QSub8Ux16, Iop_QSub16Ux8, Iop_QSub32Ux4, Iop_QSub64Ux2,
1439 Iop_QSub8Sx16, Iop_QSub16Sx8, Iop_QSub32Sx4, Iop_QSub64Sx2,
1440
1441 /* MULTIPLICATION (normal / high half of signed/unsigned) */
1442 Iop_Mul8x16, Iop_Mul16x8, Iop_Mul32x4,
1443 Iop_MulHi16Ux8, Iop_MulHi32Ux4,
1444 Iop_MulHi16Sx8, Iop_MulHi32Sx4,
1445 /* (widening signed/unsigned of even lanes, with lowest lane=zero) */
1446 Iop_MullEven8Ux16, Iop_MullEven16Ux8, Iop_MullEven32Ux4,
1447 Iop_MullEven8Sx16, Iop_MullEven16Sx8, Iop_MullEven32Sx4,
1448
1449 /* Widening multiplies, all of the form (I64, I64) -> V128 */
1450 Iop_Mull8Ux8, Iop_Mull8Sx8,
1451 Iop_Mull16Ux4, Iop_Mull16Sx4,
1452 Iop_Mull32Ux2, Iop_Mull32Sx2,
1453
1454 /* Signed doubling saturating widening multiplies, (I64, I64) -> V128 */
1455 Iop_QDMull16Sx4, Iop_QDMull32Sx2,
1456
1457 /* Vector Saturating Doubling Multiply Returning High Half and
1458 Vector Saturating Rounding Doubling Multiply Returning High Half.
1459 These IROps multiply corresponding elements in two vectors, double
1460 the results, and place the most significant half of the final results
1461 in the destination vector. The results are truncated or rounded. If
1462 any of the results overflow, they are saturated. To be more precise,
1463 for each lane, the computed result is:
1464 QDMulHi:
1465 hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2 )
1466 QRDMulHi:
1467 hi-half( sign-extend(laneL) *q sign-extend(laneR) *q 2
1468 +q (1 << (lane-width-in-bits - 1)) )
1469 */
1470 Iop_QDMulHi16Sx8, Iop_QDMulHi32Sx4, /* (V128, V128) -> V128 */
1471 Iop_QRDMulHi16Sx8, Iop_QRDMulHi32Sx4, /* (V128, V128) -> V128 */
1472
1473 /* Polynomial multiplication treats its arguments as
1474 coefficients of polynomials over {0, 1}. */
1475 Iop_PolynomialMul8x16, /* (V128, V128) -> V128 */
1476 Iop_PolynomialMull8x8, /* (I64, I64) -> V128 */
1477
1478 /* Vector Polynomial multiplication add. (V128, V128) -> V128
1479
1480 *** Below is the algorithm for the instructions. These Iops could
1481 be emulated to get this functionality, but the emulation would
1482 be long and messy.
1483
1484 Example for polynomial multiply add for vector of bytes
1485 do i = 0 to 15
1486 prod[i].bit[0:14] <- 0
1487 srcA <- VR[argL].byte[i]
1488 srcB <- VR[argR].byte[i]
1489 do j = 0 to 7
1490 do k = 0 to j
1491 gbit <- srcA.bit[k] & srcB.bit[j-k]
1492 prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1493 end
1494 end
1495
1496 do j = 8 to 14
1497 do k = j-7 to 7
1498 gbit <- (srcA.bit[k] & srcB.bit[j-k])
1499 prod[i].bit[j] <- prod[i].bit[j] ^ gbit
1500 end
1501 end
1502 end
1503
1504 do i = 0 to 7
1505 VR[dst].hword[i] <- 0b0 || (prod[2×i] ^ prod[2×i+1])
1506 end
1507 */
1508 Iop_PolynomialMulAdd8x16, Iop_PolynomialMulAdd16x8,
1509 Iop_PolynomialMulAdd32x4, Iop_PolynomialMulAdd64x2,
1510
1511 /* PAIRWISE operations */
1512 /* Iop_PwFoo16x4( [a,b,c,d], [e,f,g,h] ) =
1513 [Foo16(a,b), Foo16(c,d), Foo16(e,f), Foo16(g,h)] */
1514 Iop_PwAdd8x16, Iop_PwAdd16x8, Iop_PwAdd32x4,
1515 Iop_PwAdd32Fx2,
1516 /* Longening variant is unary. The resulting vector contains two times
1517 less elements than operand, but they are two times wider.
1518 Example:
1519 Iop_PwAddL16Ux4( [a,b,c,d] ) = [a+b,c+d]
1520 where a+b and c+d are unsigned 32-bit values. */
1521 Iop_PwAddL8Ux16, Iop_PwAddL16Ux8, Iop_PwAddL32Ux4,
1522 Iop_PwAddL8Sx16, Iop_PwAddL16Sx8, Iop_PwAddL32Sx4,
1523
1524 /* Other unary pairwise ops */
1525
1526 /* Vector bit matrix transpose. (V128) -> V128 */
1527 /* For each doubleword element of the source vector, an 8-bit x 8-bit
1528 * matrix transpose is performed. */
1529 Iop_PwBitMtxXpose64x2,
1530
1531 /* ABSOLUTE VALUE */
1532 Iop_Abs8x16, Iop_Abs16x8, Iop_Abs32x4, Iop_Abs64x2,
1533
1534 /* AVERAGING: note: (arg1 + arg2 + 1) >>u 1 */
1535 Iop_Avg8Ux16, Iop_Avg16Ux8, Iop_Avg32Ux4,
1536 Iop_Avg8Sx16, Iop_Avg16Sx8, Iop_Avg32Sx4,
1537
1538 /* MIN/MAX */
1539 Iop_Max8Sx16, Iop_Max16Sx8, Iop_Max32Sx4, Iop_Max64Sx2,
1540 Iop_Max8Ux16, Iop_Max16Ux8, Iop_Max32Ux4, Iop_Max64Ux2,
1541 Iop_Min8Sx16, Iop_Min16Sx8, Iop_Min32Sx4, Iop_Min64Sx2,
1542 Iop_Min8Ux16, Iop_Min16Ux8, Iop_Min32Ux4, Iop_Min64Ux2,
1543
1544 /* COMPARISON */
1545 Iop_CmpEQ8x16, Iop_CmpEQ16x8, Iop_CmpEQ32x4, Iop_CmpEQ64x2,
1546 Iop_CmpGT8Sx16, Iop_CmpGT16Sx8, Iop_CmpGT32Sx4, Iop_CmpGT64Sx2,
1547 Iop_CmpGT8Ux16, Iop_CmpGT16Ux8, Iop_CmpGT32Ux4, Iop_CmpGT64Ux2,
1548
1549 /* COUNT ones / leading zeroes / leading sign bits (not including topmost
1550 bit) */
1551 Iop_Cnt8x16,
1552 Iop_Clz8x16, Iop_Clz16x8, Iop_Clz32x4,
1553 Iop_Cls8x16, Iop_Cls16x8, Iop_Cls32x4,
1554
1555 /* VECTOR x SCALAR SHIFT (shift amt :: Ity_I8) */
1556 Iop_ShlN8x16, Iop_ShlN16x8, Iop_ShlN32x4, Iop_ShlN64x2,
1557 Iop_ShrN8x16, Iop_ShrN16x8, Iop_ShrN32x4, Iop_ShrN64x2,
1558 Iop_SarN8x16, Iop_SarN16x8, Iop_SarN32x4, Iop_SarN64x2,
1559
1560 /* VECTOR x VECTOR SHIFT / ROTATE */
1561 /* FIXME: I'm pretty sure the ARM32 front/back ends interpret these
1562 differently from all other targets. The intention is that
1563 the shift amount (2nd arg) is interpreted as unsigned and
1564 only the lowest log2(lane-bits) bits are relevant. But the
1565 ARM32 versions treat the shift amount as an 8 bit signed
1566 number. The ARM32 uses should be replaced by the relevant
1567 vector x vector bidirectional shifts instead. */
1568 Iop_Shl8x16, Iop_Shl16x8, Iop_Shl32x4, Iop_Shl64x2,
1569 Iop_Shr8x16, Iop_Shr16x8, Iop_Shr32x4, Iop_Shr64x2,
1570 Iop_Sar8x16, Iop_Sar16x8, Iop_Sar32x4, Iop_Sar64x2,
1571 Iop_Sal8x16, Iop_Sal16x8, Iop_Sal32x4, Iop_Sal64x2,
1572 Iop_Rol8x16, Iop_Rol16x8, Iop_Rol32x4, Iop_Rol64x2,
1573
1574 /* VECTOR x VECTOR SATURATING SHIFT */
1575 Iop_QShl8x16, Iop_QShl16x8, Iop_QShl32x4, Iop_QShl64x2,
1576 Iop_QSal8x16, Iop_QSal16x8, Iop_QSal32x4, Iop_QSal64x2,
1577 /* VECTOR x INTEGER SATURATING SHIFT */
1578 Iop_QShlNsatSU8x16, Iop_QShlNsatSU16x8,
1579 Iop_QShlNsatSU32x4, Iop_QShlNsatSU64x2,
1580 Iop_QShlNsatUU8x16, Iop_QShlNsatUU16x8,
1581 Iop_QShlNsatUU32x4, Iop_QShlNsatUU64x2,
1582 Iop_QShlNsatSS8x16, Iop_QShlNsatSS16x8,
1583 Iop_QShlNsatSS32x4, Iop_QShlNsatSS64x2,
1584
1585 /* VECTOR x VECTOR BIDIRECTIONAL SATURATING (& MAYBE ROUNDING) SHIFT */
1586 /* All of type (V128, V128) -> V256. */
1587 /* The least significant 8 bits of each lane of the second
1588 operand are used as the shift amount, and interpreted signedly.
1589 Positive values mean a shift left, negative a shift right. The
1590 result is signedly or unsignedly saturated. There are also
1591 rounding variants, which add 2^(shift_amount-1) to the value before
1592 shifting, but only in the shift-right case. Vacated positions
1593 are filled with zeroes. IOW, it's either SHR or SHL, but not SAR.
1594
1595 These operations return 129 bits: one bit ("Q") indicating whether
1596 saturation occurred, and the shift result. The result type is V256,
1597 of which the lower V128 is the shift result, and Q occupies the
1598 least significant bit of the upper V128. All other bits of the
1599 upper V128 are zero. */
1600 // Unsigned saturation, no rounding
1601 Iop_QandUQsh8x16, Iop_QandUQsh16x8,
1602 Iop_QandUQsh32x4, Iop_QandUQsh64x2,
1603 // Signed saturation, no rounding
1604 Iop_QandSQsh8x16, Iop_QandSQsh16x8,
1605 Iop_QandSQsh32x4, Iop_QandSQsh64x2,
1606
1607 // Unsigned saturation, rounding
1608 Iop_QandUQRsh8x16, Iop_QandUQRsh16x8,
1609 Iop_QandUQRsh32x4, Iop_QandUQRsh64x2,
1610 // Signed saturation, rounding
1611 Iop_QandSQRsh8x16, Iop_QandSQRsh16x8,
1612 Iop_QandSQRsh32x4, Iop_QandSQRsh64x2,
1613
1614 /* VECTOR x VECTOR BIDIRECTIONAL (& MAYBE ROUNDING) SHIFT */
1615 /* All of type (V128, V128) -> V128 */
1616 /* The least significant 8 bits of each lane of the second
1617 operand are used as the shift amount, and interpreted signedly.
1618 Positive values mean a shift left, negative a shift right.
1619 There are also rounding variants, which add 2^(shift_amount-1)
1620 to the value before shifting, but only in the shift-right case.
1621
1622 For left shifts, the vacated places are filled with zeroes.
1623 For right shifts, the vacated places are filled with zeroes
1624 for the U variants and sign bits for the S variants. */
1625 // Signed and unsigned, non-rounding
1626 Iop_Sh8Sx16, Iop_Sh16Sx8, Iop_Sh32Sx4, Iop_Sh64Sx2,
1627 Iop_Sh8Ux16, Iop_Sh16Ux8, Iop_Sh32Ux4, Iop_Sh64Ux2,
1628
1629 // Signed and unsigned, rounding
1630 Iop_Rsh8Sx16, Iop_Rsh16Sx8, Iop_Rsh32Sx4, Iop_Rsh64Sx2,
1631 Iop_Rsh8Ux16, Iop_Rsh16Ux8, Iop_Rsh32Ux4, Iop_Rsh64Ux2,
1632
1633 /* The least significant 8 bits of each lane of the second
1634 operand are used as the shift amount, and interpreted signedly.
1635 Positive values mean a shift left, negative a shift right. The
1636 result is signedly or unsignedly saturated. There are also
1637 rounding variants, which add 2^(shift_amount-1) to the value before
1638 shifting, but only in the shift-right case. Vacated positions
1639 are filled with zeroes. IOW, it's either SHR or SHL, but not SAR.
1640 */
1641
1642 /* VECTOR x SCALAR SATURATING (& MAYBE ROUNDING) NARROWING SHIFT RIGHT */
1643 /* All of type (V128, I8) -> V128 */
1644 /* The first argument is shifted right, then narrowed to half the width
1645 by saturating it. The second argument is a scalar shift amount that
1646 applies to all lanes, and must be a value in the range 1 to lane_width.
1647 The shift may be done signedly (Sar variants) or unsignedly (Shr
1648 variants). The saturation is done according to the two signedness
1649 indicators at the end of the name. For example 64Sto32U means a
1650 signed 64 bit value is saturated into an unsigned 32 bit value.
1651 Additionally, the QRS variants do rounding, that is, they add the
1652 value (1 << (shift_amount-1)) to each source lane before shifting.
1653
1654 These operations return 65 bits: one bit ("Q") indicating whether
1655 saturation occurred, and the shift result. The result type is V128,
1656 of which the lower half is the shift result, and Q occupies the
1657 least significant bit of the upper half. All other bits of the
1658 upper half are zero. */
1659 // No rounding, sat U->U
1660 Iop_QandQShrNnarrow16Uto8Ux8,
1661 Iop_QandQShrNnarrow32Uto16Ux4, Iop_QandQShrNnarrow64Uto32Ux2,
1662 // No rounding, sat S->S
1663 Iop_QandQSarNnarrow16Sto8Sx8,
1664 Iop_QandQSarNnarrow32Sto16Sx4, Iop_QandQSarNnarrow64Sto32Sx2,
1665 // No rounding, sat S->U
1666 Iop_QandQSarNnarrow16Sto8Ux8,
1667 Iop_QandQSarNnarrow32Sto16Ux4, Iop_QandQSarNnarrow64Sto32Ux2,
1668
1669 // Rounding, sat U->U
1670 Iop_QandQRShrNnarrow16Uto8Ux8,
1671 Iop_QandQRShrNnarrow32Uto16Ux4, Iop_QandQRShrNnarrow64Uto32Ux2,
1672 // Rounding, sat S->S
1673 Iop_QandQRSarNnarrow16Sto8Sx8,
1674 Iop_QandQRSarNnarrow32Sto16Sx4, Iop_QandQRSarNnarrow64Sto32Sx2,
1675 // Rounding, sat S->U
1676 Iop_QandQRSarNnarrow16Sto8Ux8,
1677 Iop_QandQRSarNnarrow32Sto16Ux4, Iop_QandQRSarNnarrow64Sto32Ux2,
1678
1679 /* NARROWING (binary)
1680 -- narrow 2xV128 into 1xV128, hi half from left arg */
1681 /* See comments above w.r.t. U vs S issues in saturated narrowing. */
1682 Iop_QNarrowBin16Sto8Ux16, Iop_QNarrowBin32Sto16Ux8,
1683 Iop_QNarrowBin16Sto8Sx16, Iop_QNarrowBin32Sto16Sx8,
1684 Iop_QNarrowBin16Uto8Ux16, Iop_QNarrowBin32Uto16Ux8,
1685 Iop_NarrowBin16to8x16, Iop_NarrowBin32to16x8,
1686 Iop_QNarrowBin64Sto32Sx4, Iop_QNarrowBin64Uto32Ux4,
1687 Iop_NarrowBin64to32x4,
1688
1689 /* NARROWING (unary) -- narrow V128 into I64 */
1690 Iop_NarrowUn16to8x8, Iop_NarrowUn32to16x4, Iop_NarrowUn64to32x2,
1691 /* Saturating narrowing from signed source to signed/unsigned
1692 destination */
1693 Iop_QNarrowUn16Sto8Sx8, Iop_QNarrowUn32Sto16Sx4, Iop_QNarrowUn64Sto32Sx2,
1694 Iop_QNarrowUn16Sto8Ux8, Iop_QNarrowUn32Sto16Ux4, Iop_QNarrowUn64Sto32Ux2,
1695 /* Saturating narrowing from unsigned source to unsigned destination */
1696 Iop_QNarrowUn16Uto8Ux8, Iop_QNarrowUn32Uto16Ux4, Iop_QNarrowUn64Uto32Ux2,
1697
1698 /* WIDENING -- sign or zero extend each element of the argument
1699 vector to the twice original size. The resulting vector consists of
1700 the same number of elements but each element and the vector itself
1701 are twice as wide.
1702 All operations are I64->V128.
1703 Example
1704 Iop_Widen32Sto64x2( [a, b] ) = [c, d]
1705 where c = Iop_32Sto64(a) and d = Iop_32Sto64(b) */
1706 Iop_Widen8Uto16x8, Iop_Widen16Uto32x4, Iop_Widen32Uto64x2,
1707 Iop_Widen8Sto16x8, Iop_Widen16Sto32x4, Iop_Widen32Sto64x2,
1708
1709 /* INTERLEAVING */
1710 /* Interleave lanes from low or high halves of
1711 operands. Most-significant result lane is from the left
1712 arg. */
1713 Iop_InterleaveHI8x16, Iop_InterleaveHI16x8,
1714 Iop_InterleaveHI32x4, Iop_InterleaveHI64x2,
1715 Iop_InterleaveLO8x16, Iop_InterleaveLO16x8,
1716 Iop_InterleaveLO32x4, Iop_InterleaveLO64x2,
1717 /* Interleave odd/even lanes of operands. Most-significant result lane
1718 is from the left arg. */
1719 Iop_InterleaveOddLanes8x16, Iop_InterleaveEvenLanes8x16,
1720 Iop_InterleaveOddLanes16x8, Iop_InterleaveEvenLanes16x8,
1721 Iop_InterleaveOddLanes32x4, Iop_InterleaveEvenLanes32x4,
1722
1723 /* CONCATENATION -- build a new value by concatenating either
1724 the even or odd lanes of both operands. Note that
1725 Cat{Odd,Even}Lanes64x2 are identical to Interleave{HI,LO}64x2
1726 and so are omitted. */
1727 Iop_CatOddLanes8x16, Iop_CatOddLanes16x8, Iop_CatOddLanes32x4,
1728 Iop_CatEvenLanes8x16, Iop_CatEvenLanes16x8, Iop_CatEvenLanes32x4,
1729
1730 /* GET elements of VECTOR
1731 GET is binop (V128, I8) -> I<elem_size> */
1732 /* Note: the arm back-end handles only constant second argument. */
1733 Iop_GetElem8x16, Iop_GetElem16x8, Iop_GetElem32x4, Iop_GetElem64x2,
1734
1735 /* DUPLICATING -- copy value to all lanes */
1736 Iop_Dup8x16, Iop_Dup16x8, Iop_Dup32x4,
1737
1738 /* SLICE -- produces the lowest 128 bits of (arg1:arg2) >> (8 * arg3).
1739 arg3 is a shift amount in bytes and may be between 0 and 16
1740 inclusive. When 0, the result is arg2; when 16, the result is arg1.
1741 Not all back ends handle all values. The arm64 back
1742 end handles only immediate arg3 values. */
1743 Iop_SliceV128, // (V128, V128, I8) -> V128
1744
1745 /* REVERSE the order of chunks in vector lanes. Chunks must be
1746 smaller than the vector lanes (obviously) and so may be 8-,
1747 16- and 32-bit in size. See definitions of 64-bit SIMD
1748 versions above for examples. */
1749 Iop_Reverse8sIn16_x8,
1750 Iop_Reverse8sIn32_x4, Iop_Reverse16sIn32_x4,
1751 Iop_Reverse8sIn64_x2, Iop_Reverse16sIn64_x2, Iop_Reverse32sIn64_x2,
1752 Iop_Reverse1sIn8_x16, /* Reverse bits in each byte lane. */
1753
1754 /* PERMUTING -- copy src bytes to dst,
1755 as indexed by control vector bytes:
1756 for i in 0 .. 15 . result[i] = argL[ argR[i] ]
1757 argR[i] values may only be in the range 0 .. 15, else behaviour
1758 is undefined. */
1759 Iop_Perm8x16,
1760 Iop_Perm32x4, /* ditto, except argR values are restricted to 0 .. 3 */
1761
1762 /* MISC CONVERSION -- get high bits of each byte lane, a la
1763 x86/amd64 pmovmskb */
1764 Iop_GetMSBs8x16, /* V128 -> I16 */
1765
1766 /* Vector Reciprocal Estimate and Vector Reciprocal Square Root Estimate
1767 See floating-point equivalents for details. */
1768 Iop_RecipEst32Ux4, Iop_RSqrtEst32Ux4,
1769
1770 /* ------------------ 256-bit SIMD Integer. ------------------ */
1771
1772 /* Pack/unpack */
1773 Iop_V256to64_0, // V256 -> I64, extract least significant lane
1774 Iop_V256to64_1,
1775 Iop_V256to64_2,
1776 Iop_V256to64_3, // V256 -> I64, extract most significant lane
1777
1778 Iop_64x4toV256, // (I64,I64,I64,I64)->V256
1779 // first arg is most significant lane
1780
1781 Iop_V256toV128_0, // V256 -> V128, less significant lane
1782 Iop_V256toV128_1, // V256 -> V128, more significant lane
1783 Iop_V128HLtoV256, // (V128,V128)->V256, first arg is most signif
1784
1785 Iop_AndV256,
1786 Iop_OrV256,
1787 Iop_XorV256,
1788 Iop_NotV256,
1789
1790 /* MISC (vector integer cmp != 0) */
1791 Iop_CmpNEZ8x32, Iop_CmpNEZ16x16, Iop_CmpNEZ32x8, Iop_CmpNEZ64x4,
1792
1793 Iop_Add8x32, Iop_Add16x16, Iop_Add32x8, Iop_Add64x4,
1794 Iop_Sub8x32, Iop_Sub16x16, Iop_Sub32x8, Iop_Sub64x4,
1795
1796 Iop_CmpEQ8x32, Iop_CmpEQ16x16, Iop_CmpEQ32x8, Iop_CmpEQ64x4,
1797 Iop_CmpGT8Sx32, Iop_CmpGT16Sx16, Iop_CmpGT32Sx8, Iop_CmpGT64Sx4,
1798
1799 Iop_ShlN16x16, Iop_ShlN32x8, Iop_ShlN64x4,
1800 Iop_ShrN16x16, Iop_ShrN32x8, Iop_ShrN64x4,
1801 Iop_SarN16x16, Iop_SarN32x8,
1802
1803 Iop_Max8Sx32, Iop_Max16Sx16, Iop_Max32Sx8,
1804 Iop_Max8Ux32, Iop_Max16Ux16, Iop_Max32Ux8,
1805 Iop_Min8Sx32, Iop_Min16Sx16, Iop_Min32Sx8,
1806 Iop_Min8Ux32, Iop_Min16Ux16, Iop_Min32Ux8,
1807
1808 Iop_Mul16x16, Iop_Mul32x8,
1809 Iop_MulHi16Ux16, Iop_MulHi16Sx16,
1810
1811 Iop_QAdd8Ux32, Iop_QAdd16Ux16,
1812 Iop_QAdd8Sx32, Iop_QAdd16Sx16,
1813 Iop_QSub8Ux32, Iop_QSub16Ux16,
1814 Iop_QSub8Sx32, Iop_QSub16Sx16,
1815
1816 Iop_Avg8Ux32, Iop_Avg16Ux16,
1817
1818 Iop_Perm32x8,
1819
1820 /* (V128, V128) -> V128 */
1821 Iop_CipherV128, Iop_CipherLV128, Iop_CipherSV128,
1822 Iop_NCipherV128, Iop_NCipherLV128,
1823
1824 /* Hash instructions, Federal Information Processing Standards
1825 * Publication 180-3 Secure Hash Standard. */
1826 /* (V128, I8) -> V128; The I8 input arg is (ST | SIX), where ST and
1827 * SIX are fields from the insn. See ISA 2.07 description of
1828 * vshasigmad and vshasigmaw insns.*/
1829 Iop_SHA512, Iop_SHA256,
1830
1831 /* ------------------ 256-bit SIMD FP. ------------------ */
1832
1833 /* ternary :: IRRoundingMode(I32) x V256 x V256 -> V256 */
1834 Iop_Add64Fx4, Iop_Sub64Fx4, Iop_Mul64Fx4, Iop_Div64Fx4,
1835 Iop_Add32Fx8, Iop_Sub32Fx8, Iop_Mul32Fx8, Iop_Div32Fx8,
1836
1837 Iop_Sqrt32Fx8,
1838 Iop_Sqrt64Fx4,
1839 Iop_RSqrtEst32Fx8,
1840 Iop_RecipEst32Fx8,
1841
1842 Iop_Max32Fx8, Iop_Min32Fx8,
1843 Iop_Max64Fx4, Iop_Min64Fx4,
1844 Iop_LAST /* must be the last enumerator */
1845 }
1846 IROp;
1847
1848 /* Pretty-print an op. */
1849 extern void ppIROp ( IROp );
1850
1851
1852 /* Encoding of IEEE754-specified rounding modes.
1853 Note, various front and back ends rely on the actual numerical
1854 values of these, so do not change them. */
1855 typedef
1856 enum {
1857 Irrm_NEAREST = 0, // Round to nearest, ties to even
1858 Irrm_NegINF = 1, // Round to negative infinity
1859 Irrm_PosINF = 2, // Round to positive infinity
1860 Irrm_ZERO = 3, // Round toward zero
1861 Irrm_NEAREST_TIE_AWAY_0 = 4, // Round to nearest, ties away from 0
1862 Irrm_PREPARE_SHORTER = 5, // Round to prepare for shorter
1863 // precision
1864 Irrm_AWAY_FROM_ZERO = 6, // Round to away from 0
1865 Irrm_NEAREST_TIE_TOWARD_0 = 7 // Round to nearest, ties towards 0
1866 }
1867 IRRoundingMode;
1868
1869 /* Binary floating point comparison result values.
1870 This is also derived from what IA32 does. */
1871 typedef
1872 enum {
1873 Ircr_UN = 0x45,
1874 Ircr_LT = 0x01,
1875 Ircr_GT = 0x00,
1876 Ircr_EQ = 0x40
1877 }
1878 IRCmpFResult;
1879
1880 typedef IRCmpFResult IRCmpF32Result;
1881 typedef IRCmpFResult IRCmpF64Result;
1882 typedef IRCmpFResult IRCmpF128Result;
1883
1884 /* Decimal floating point result values. */
1885 typedef IRCmpFResult IRCmpDResult;
1886 typedef IRCmpDResult IRCmpD64Result;
1887 typedef IRCmpDResult IRCmpD128Result;
1888
1889 /* ------------------ Expressions ------------------ */
1890
1891 typedef struct _IRQop IRQop; /* forward declaration */
1892 typedef struct _IRTriop IRTriop; /* forward declaration */
1893
1894
1895 /* The different kinds of expressions. Their meaning is explained below
1896 in the comments for IRExpr. */
1897 typedef
1898 enum {
1899 Iex_Binder=0x1900,
1900 Iex_Get,
1901 Iex_GetI,
1902 Iex_RdTmp,
1903 Iex_Qop,
1904 Iex_Triop,
1905 Iex_Binop,
1906 Iex_Unop,
1907 Iex_Load,
1908 Iex_Const,
1909 Iex_ITE,
1910 Iex_CCall,
1911 Iex_VECRET,
1912 Iex_BBPTR
1913 }
1914 IRExprTag;
1915
1916 /* An expression. Stored as a tagged union. 'tag' indicates what kind
1917 of expression this is. 'Iex' is the union that holds the fields. If
1918 an IRExpr 'e' has e.tag equal to Iex_Load, then it's a load
1919 expression, and the fields can be accessed with
1920 'e.Iex.Load.<fieldname>'.
1921
1922 For each kind of expression, we show what it looks like when
1923 pretty-printed with ppIRExpr().
1924 */
1925 typedef
1926 struct _IRExpr
1927 IRExpr;
1928
1929 struct _IRExpr {
1930 IRExprTag tag;
1931 union {
1932 /* Used only in pattern matching within Vex. Should not be seen
1933 outside of Vex. */
1934 struct {
1935 Int binder;
1936 } Binder;
1937
1938 /* Read a guest register, at a fixed offset in the guest state.
1939 ppIRExpr output: GET:<ty>(<offset>), eg. GET:I32(0)
1940 */
1941 struct {
1942 Int offset; /* Offset into the guest state */
1943 IRType ty; /* Type of the value being read */
1944 } Get;
1945
1946 /* Read a guest register at a non-fixed offset in the guest
1947 state. This allows circular indexing into parts of the guest
1948 state, which is essential for modelling situations where the
1949 identity of guest registers is not known until run time. One
1950 example is the x87 FP register stack.
1951
1952 The part of the guest state to be treated as a circular array
1953 is described in the IRRegArray 'descr' field. It holds the
1954 offset of the first element in the array, the type of each
1955 element, and the number of elements.
1956
1957 The array index is indicated rather indirectly, in a way
1958 which makes optimisation easy: as the sum of variable part
1959 (the 'ix' field) and a constant offset (the 'bias' field).
1960
1961 Since the indexing is circular, the actual array index to use
1962 is computed as (ix + bias) % num-of-elems-in-the-array.
1963
1964 Here's an example. The description
1965
1966 (96:8xF64)[t39,-7]
1967
1968 describes an array of 8 F64-typed values, the
1969 guest-state-offset of the first being 96. This array is
1970 being indexed at (t39 - 7) % 8.
1971
1972 It is important to get the array size/type exactly correct
1973 since IR optimisation looks closely at such info in order to
1974 establish aliasing/non-aliasing between seperate GetI and
1975 PutI events, which is used to establish when they can be
1976 reordered, etc. Putting incorrect info in will lead to
1977 obscure IR optimisation bugs.
1978
1979 ppIRExpr output: GETI<descr>[<ix>,<bias]
1980 eg. GETI(128:8xI8)[t1,0]
1981 */
1982 struct {
1983 IRRegArray* descr; /* Part of guest state treated as circular */
1984 IRExpr* ix; /* Variable part of index into array */
1985 Int bias; /* Constant offset part of index into array */
1986 } GetI;
1987
1988 /* The value held by a temporary.
1989 ppIRExpr output: t<tmp>, eg. t1
1990 */
1991 struct {
1992 IRTemp tmp; /* The temporary number */
1993 } RdTmp;
1994
1995 /* A quaternary operation.
1996 ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>, <arg4>),
1997 eg. MAddF64r32(t1, t2, t3, t4)
1998 */
1999 struct {
2000 IRQop* details;
2001 } Qop;
2002
2003 /* A ternary operation.
2004 ppIRExpr output: <op>(<arg1>, <arg2>, <arg3>),
2005 eg. MulF64(1, 2.0, 3.0)
2006 */
2007 struct {
2008 IRTriop* details;
2009 } Triop;
2010
2011 /* A binary operation.
2012 ppIRExpr output: <op>(<arg1>, <arg2>), eg. Add32(t1,t2)
2013 */
2014 struct {
2015 IROp op; /* op-code */
2016 IRExpr* arg1; /* operand 1 */
2017 IRExpr* arg2; /* operand 2 */
2018 } Binop;
2019
2020 /* A unary operation.
2021 ppIRExpr output: <op>(<arg>), eg. Neg8(t1)
2022 */
2023 struct {
2024 IROp op; /* op-code */
2025 IRExpr* arg; /* operand */
2026 } Unop;
2027
2028 /* A load from memory -- a normal load, not a load-linked.
2029 Load-Linkeds (and Store-Conditionals) are instead represented
2030 by IRStmt.LLSC since Load-Linkeds have side effects and so
2031 are not semantically valid IRExpr's.
2032 ppIRExpr output: LD<end>:<ty>(<addr>), eg. LDle:I32(t1)
2033 */
2034 struct {
2035 IREndness end; /* Endian-ness of the load */
2036 IRType ty; /* Type of the loaded value */
2037 IRExpr* addr; /* Address being loaded from */
2038 } Load;
2039
2040 /* A constant-valued expression.
2041 ppIRExpr output: <con>, eg. 0x4:I32
2042 */
2043 struct {
2044 IRConst* con; /* The constant itself */
2045 } Const;
2046
2047 /* A call to a pure (no side-effects) helper C function.
2048
2049 With the 'cee' field, 'name' is the function's name. It is
2050 only used for pretty-printing purposes. The address to call
2051 (host address, of course) is stored in the 'addr' field
2052 inside 'cee'.
2053
2054 The 'args' field is a NULL-terminated array of arguments.
2055 The stated return IRType, and the implied argument types,
2056 must match that of the function being called well enough so
2057 that the back end can actually generate correct code for the
2058 call.
2059
2060 The called function **must** satisfy the following:
2061
2062 * no side effects -- must be a pure function, the result of
2063 which depends only on the passed parameters.
2064
2065 * it may not look at, nor modify, any of the guest state
2066 since that would hide guest state transitions from
2067 instrumenters
2068
2069 * it may not access guest memory, since that would hide
2070 guest memory transactions from the instrumenters
2071
2072 * it must not assume that arguments are being evaluated in a
2073 particular order. The oder of evaluation is unspecified.
2074
2075 This is restrictive, but makes the semantics clean, and does
2076 not interfere with IR optimisation.
2077
2078 If you want to call a helper which can mess with guest state
2079 and/or memory, instead use Ist_Dirty. This is a lot more
2080 flexible, but you have to give a bunch of details about what
2081 the helper does (and you better be telling the truth,
2082 otherwise any derived instrumentation will be wrong). Also
2083 Ist_Dirty inhibits various IR optimisations and so can cause
2084 quite poor code to be generated. Try to avoid it.
2085
2086 In principle it would be allowable to have the arg vector
2087 contain an IRExpr_VECRET(), although not IRExpr_BBPTR(). However,
2088 at the moment there is no requirement for clean helper calls to
2089 be able to return V128 or V256 values. Hence this is not allowed.
2090
2091 ppIRExpr output: <cee>(<args>):<retty>
2092 eg. foo{0x80489304}(t1, t2):I32
2093 */
2094 struct {
2095 IRCallee* cee; /* Function to call. */
2096 IRType retty; /* Type of return value. */
2097 IRExpr** args; /* Vector of argument expressions. */
2098 } CCall;
2099
2100 /* A ternary if-then-else operator. It returns iftrue if cond is
2101 nonzero, iffalse otherwise. Note that it is STRICT, ie. both
2102 iftrue and iffalse are evaluated in all cases.
2103
2104 ppIRExpr output: ITE(<cond>,<iftrue>,<iffalse>),
2105 eg. ITE(t6,t7,t8)
2106 */
2107 struct {
2108 IRExpr* cond; /* Condition */
2109 IRExpr* iftrue; /* True expression */
2110 IRExpr* iffalse; /* False expression */
2111 } ITE;
2112 } Iex;
2113 };
2114
2115 /* Expression auxiliaries: a ternary expression. */
2116 struct _IRTriop {
2117 IROp op; /* op-code */
2118 IRExpr* arg1; /* operand 1 */
2119 IRExpr* arg2; /* operand 2 */
2120 IRExpr* arg3; /* operand 3 */
2121 };
2122
2123 /* Expression auxiliaries: a quarternary expression. */
2124 struct _IRQop {
2125 IROp op; /* op-code */
2126 IRExpr* arg1; /* operand 1 */
2127 IRExpr* arg2; /* operand 2 */
2128 IRExpr* arg3; /* operand 3 */
2129 IRExpr* arg4; /* operand 4 */
2130 };
2131
2132
2133 /* Two special kinds of IRExpr, which can ONLY be used in
2134 argument lists for dirty helper calls (IRDirty.args) and in NO
2135 OTHER PLACES. And then only in very limited ways. */
2136
2137 /* Denotes an argument which (in the helper) takes a pointer to a
2138 (naturally aligned) V128 or V256, into which the helper is expected
2139 to write its result. Use of IRExpr_VECRET() is strictly
2140 controlled. If the helper returns a V128 or V256 value then
2141 IRExpr_VECRET() must appear exactly once in the arg list, although
2142 it can appear anywhere, and the helper must have a C 'void' return
2143 type. If the helper returns any other type, IRExpr_VECRET() may
2144 not appear in the argument list. */
2145
2146 /* Denotes an void* argument which is passed to the helper, which at
2147 run time will point to the thread's guest state area. This can
2148 only appear at most once in an argument list, and it may not appear
2149 at all in argument lists for clean helper calls. */
2150
is_IRExpr_VECRET_or_BBPTR(const IRExpr * e)2151 static inline Bool is_IRExpr_VECRET_or_BBPTR ( const IRExpr* e ) {
2152 return e->tag == Iex_VECRET || e->tag == Iex_BBPTR;
2153 }
2154
2155
2156 /* Expression constructors. */
2157 extern IRExpr* IRExpr_Binder ( Int binder );
2158 extern IRExpr* IRExpr_Get ( Int off, IRType ty );
2159 extern IRExpr* IRExpr_GetI ( IRRegArray* descr, IRExpr* ix, Int bias );
2160 extern IRExpr* IRExpr_RdTmp ( IRTemp tmp );
2161 extern IRExpr* IRExpr_Qop ( IROp op, IRExpr* arg1, IRExpr* arg2,
2162 IRExpr* arg3, IRExpr* arg4 );
2163 extern IRExpr* IRExpr_Triop ( IROp op, IRExpr* arg1,
2164 IRExpr* arg2, IRExpr* arg3 );
2165 extern IRExpr* IRExpr_Binop ( IROp op, IRExpr* arg1, IRExpr* arg2 );
2166 extern IRExpr* IRExpr_Unop ( IROp op, IRExpr* arg );
2167 extern IRExpr* IRExpr_Load ( IREndness end, IRType ty, IRExpr* addr );
2168 extern IRExpr* IRExpr_Const ( IRConst* con );
2169 extern IRExpr* IRExpr_CCall ( IRCallee* cee, IRType retty, IRExpr** args );
2170 extern IRExpr* IRExpr_ITE ( IRExpr* cond, IRExpr* iftrue, IRExpr* iffalse );
2171 extern IRExpr* IRExpr_VECRET ( void );
2172 extern IRExpr* IRExpr_BBPTR ( void );
2173
2174 /* Deep-copy an IRExpr. */
2175 extern IRExpr* deepCopyIRExpr ( const IRExpr* );
2176
2177 /* Pretty-print an IRExpr. */
2178 extern void ppIRExpr ( const IRExpr* );
2179
2180 /* NULL-terminated IRExpr vector constructors, suitable for
2181 use as arg lists in clean/dirty helper calls. */
2182 extern IRExpr** mkIRExprVec_0 ( void );
2183 extern IRExpr** mkIRExprVec_1 ( IRExpr* );
2184 extern IRExpr** mkIRExprVec_2 ( IRExpr*, IRExpr* );
2185 extern IRExpr** mkIRExprVec_3 ( IRExpr*, IRExpr*, IRExpr* );
2186 extern IRExpr** mkIRExprVec_4 ( IRExpr*, IRExpr*, IRExpr*, IRExpr* );
2187 extern IRExpr** mkIRExprVec_5 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2188 IRExpr* );
2189 extern IRExpr** mkIRExprVec_6 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2190 IRExpr*, IRExpr* );
2191 extern IRExpr** mkIRExprVec_7 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2192 IRExpr*, IRExpr*, IRExpr* );
2193 extern IRExpr** mkIRExprVec_8 ( IRExpr*, IRExpr*, IRExpr*, IRExpr*,
2194 IRExpr*, IRExpr*, IRExpr*, IRExpr*);
2195
2196 /* IRExpr copiers:
2197 - shallowCopy: shallow-copy (ie. create a new vector that shares the
2198 elements with the original).
2199 - deepCopy: deep-copy (ie. create a completely new vector). */
2200 extern IRExpr** shallowCopyIRExprVec ( IRExpr** );
2201 extern IRExpr** deepCopyIRExprVec ( IRExpr *const * );
2202
2203 /* Make a constant expression from the given host word taking into
2204 account (of course) the host word size. */
2205 extern IRExpr* mkIRExpr_HWord ( HWord );
2206
2207 /* Convenience function for constructing clean helper calls. */
2208 extern
2209 IRExpr* mkIRExprCCall ( IRType retty,
2210 Int regparms, const HChar* name, void* addr,
2211 IRExpr** args );
2212
2213
2214 /* Convenience functions for atoms (IRExprs which are either Iex_Tmp or
2215 * Iex_Const). */
isIRAtom(const IRExpr * e)2216 static inline Bool isIRAtom ( const IRExpr* e ) {
2217 return toBool(e->tag == Iex_RdTmp || e->tag == Iex_Const);
2218 }
2219
2220 /* Are these two IR atoms identical? Causes an assertion
2221 failure if they are passed non-atoms. */
2222 extern Bool eqIRAtom ( const IRExpr*, const IRExpr* );
2223
2224
2225 /* ------------------ Jump kinds ------------------ */
2226
2227 /* This describes hints which can be passed to the dispatcher at guest
2228 control-flow transfer points.
2229
2230 Re Ijk_InvalICache and Ijk_FlushDCache: the guest state _must_ have
2231 two pseudo-registers, guest_CMSTART and guest_CMLEN, which specify
2232 the start and length of the region to be invalidated. CM stands
2233 for "Cache Management". These are both the size of a guest word.
2234 It is the responsibility of the relevant toIR.c to ensure that
2235 these are filled in with suitable values before issuing a jump of
2236 kind Ijk_InvalICache or Ijk_FlushDCache.
2237
2238 Ijk_InvalICache requests invalidation of translations taken from
2239 the requested range. Ijk_FlushDCache requests flushing of the D
2240 cache for the specified range.
2241
2242 Re Ijk_EmWarn and Ijk_EmFail: the guest state must have a
2243 pseudo-register guest_EMNOTE, which is 32-bits regardless of the
2244 host or guest word size. That register should be made to hold a
2245 VexEmNote value to indicate the reason for the exit.
2246
2247 In the case of Ijk_EmFail, the exit is fatal (Vex-generated code
2248 cannot continue) and so the jump destination can be anything.
2249
2250 Re Ijk_Sys_ (syscall jumps): the guest state must have a
2251 pseudo-register guest_IP_AT_SYSCALL, which is the size of a guest
2252 word. Front ends should set this to be the IP at the most recently
2253 executed kernel-entering (system call) instruction. This makes it
2254 very much easier (viz, actually possible at all) to back up the
2255 guest to restart a syscall that has been interrupted by a signal.
2256 */
2257 typedef
2258 enum {
2259 Ijk_INVALID=0x1A00,
2260 Ijk_Boring, /* not interesting; just goto next */
2261 Ijk_Call, /* guest is doing a call */
2262 Ijk_Ret, /* guest is doing a return */
2263 Ijk_ClientReq, /* do guest client req before continuing */
2264 Ijk_Yield, /* client is yielding to thread scheduler */
2265 Ijk_EmWarn, /* report emulation warning before continuing */
2266 Ijk_EmFail, /* emulation critical (FATAL) error; give up */
2267 Ijk_NoDecode, /* current instruction cannot be decoded */
2268 Ijk_MapFail, /* Vex-provided address translation failed */
2269 Ijk_InvalICache, /* Inval icache for range [CMSTART, +CMLEN) */
2270 Ijk_FlushDCache, /* Flush dcache for range [CMSTART, +CMLEN) */
2271 Ijk_NoRedir, /* Jump to un-redirected guest addr */
2272 Ijk_SigILL, /* current instruction synths SIGILL */
2273 Ijk_SigTRAP, /* current instruction synths SIGTRAP */
2274 Ijk_SigSEGV, /* current instruction synths SIGSEGV */
2275 Ijk_SigBUS, /* current instruction synths SIGBUS */
2276 Ijk_SigFPE_IntDiv, /* current instruction synths SIGFPE - IntDiv */
2277 Ijk_SigFPE_IntOvf, /* current instruction synths SIGFPE - IntOvf */
2278 /* Unfortunately, various guest-dependent syscall kinds. They
2279 all mean: do a syscall before continuing. */
2280 Ijk_Sys_syscall, /* amd64/x86 'syscall', ppc 'sc', arm 'svc #0' */
2281 Ijk_Sys_int32, /* amd64/x86 'int $0x20' */
2282 Ijk_Sys_int128, /* amd64/x86 'int $0x80' */
2283 Ijk_Sys_int129, /* amd64/x86 'int $0x81' */
2284 Ijk_Sys_int130, /* amd64/x86 'int $0x82' */
2285 Ijk_Sys_sysenter /* x86 'sysenter'. guest_EIP becomes
2286 invalid at the point this happens. */
2287 }
2288 IRJumpKind;
2289
2290 extern void ppIRJumpKind ( IRJumpKind );
2291
2292
2293 /* ------------------ Dirty helper calls ------------------ */
2294
2295 /* A dirty call is a flexible mechanism for calling (possibly
2296 conditionally) a helper function or procedure. The helper function
2297 may read, write or modify client memory, and may read, write or
2298 modify client state. It can take arguments and optionally return a
2299 value. It may return different results and/or do different things
2300 when called repeatedly with the same arguments, by means of storing
2301 private state.
2302
2303 If a value is returned, it is assigned to the nominated return
2304 temporary.
2305
2306 Dirty calls are statements rather than expressions for obvious
2307 reasons. If a dirty call is marked as writing guest state, any
2308 pre-existing values derived from the written parts of the guest
2309 state are invalid. Similarly, if the dirty call is stated as
2310 writing memory, any pre-existing loaded values are invalidated by
2311 it.
2312
2313 In order that instrumentation is possible, the call must state, and
2314 state correctly:
2315
2316 * Whether it reads, writes or modifies memory, and if so where.
2317
2318 * Whether it reads, writes or modifies guest state, and if so which
2319 pieces. Several pieces may be stated, and their extents must be
2320 known at translation-time. Each piece is allowed to repeat some
2321 number of times at a fixed interval, if required.
2322
2323 Normally, code is generated to pass just the args to the helper.
2324 However, if IRExpr_BBPTR() is present in the argument list (at most
2325 one instance is allowed), then the baseblock pointer is passed for
2326 that arg, so that the callee can access the guest state. It is
2327 invalid for .nFxState to be zero but IRExpr_BBPTR() to be present,
2328 since .nFxState==0 is a claim that the call does not access guest
2329 state.
2330
2331 IMPORTANT NOTE re GUARDS: Dirty calls are strict, very strict. The
2332 arguments and 'mFx' are evaluated REGARDLESS of the guard value.
2333 The order of argument evaluation is unspecified. The guard
2334 expression is evaluated AFTER the arguments and 'mFx' have been
2335 evaluated. 'mFx' is expected (by Memcheck) to be a defined value
2336 even if the guard evaluates to false.
2337 */
2338
2339 #define VEX_N_FXSTATE 7 /* enough for FXSAVE/FXRSTOR on x86 */
2340
2341 /* Effects on resources (eg. registers, memory locations) */
2342 typedef
2343 enum {
2344 Ifx_None=0x1B00, /* no effect */
2345 Ifx_Read, /* reads the resource */
2346 Ifx_Write, /* writes the resource */
2347 Ifx_Modify, /* modifies the resource */
2348 }
2349 IREffect;
2350
2351 /* Pretty-print an IREffect */
2352 extern void ppIREffect ( IREffect );
2353
2354 typedef
2355 struct _IRDirty {
2356 /* What to call, and details of args/results. .guard must be
2357 non-NULL. If .tmp is not IRTemp_INVALID, then the call
2358 returns a result which is placed in .tmp. If at runtime the
2359 guard evaluates to false, .tmp has an 0x555..555 bit pattern
2360 written to it. Hence conditional calls that assign .tmp are
2361 allowed. */
2362 IRCallee* cee; /* where to call */
2363 IRExpr* guard; /* :: Ity_Bit. Controls whether call happens */
2364 /* The args vector may contain IRExpr_BBPTR() and/or
2365 IRExpr_VECRET(), in both cases, at most once. */
2366 IRExpr** args; /* arg vector, ends in NULL. */
2367 IRTemp tmp; /* to assign result to, or IRTemp_INVALID if none */
2368
2369 /* Mem effects; we allow only one R/W/M region to be stated */
2370 IREffect mFx; /* indicates memory effects, if any */
2371 IRExpr* mAddr; /* of access, or NULL if mFx==Ifx_None */
2372 Int mSize; /* of access, or zero if mFx==Ifx_None */
2373
2374 /* Guest state effects; up to N allowed */
2375 Int nFxState; /* must be 0 .. VEX_N_FXSTATE */
2376 struct {
2377 IREffect fx:16; /* read, write or modify? Ifx_None is invalid. */
2378 UShort offset;
2379 UShort size;
2380 UChar nRepeats;
2381 UChar repeatLen;
2382 } fxState[VEX_N_FXSTATE];
2383 /* The access can be repeated, as specified by nRepeats and
2384 repeatLen. To describe only a single access, nRepeats and
2385 repeatLen should be zero. Otherwise, repeatLen must be a
2386 multiple of size and greater than size. */
2387 /* Overall, the parts of the guest state denoted by (offset,
2388 size, nRepeats, repeatLen) is
2389 [offset, +size)
2390 and, if nRepeats > 0,
2391 for (i = 1; i <= nRepeats; i++)
2392 [offset + i * repeatLen, +size)
2393 A convenient way to enumerate all segments is therefore
2394 for (i = 0; i < 1 + nRepeats; i++)
2395 [offset + i * repeatLen, +size)
2396 */
2397 }
2398 IRDirty;
2399
2400 /* Pretty-print a dirty call */
2401 extern void ppIRDirty ( const IRDirty* );
2402
2403 /* Allocate an uninitialised dirty call */
2404 extern IRDirty* emptyIRDirty ( void );
2405
2406 /* Deep-copy a dirty call */
2407 extern IRDirty* deepCopyIRDirty ( const IRDirty* );
2408
2409 /* A handy function which takes some of the tedium out of constructing
2410 dirty helper calls. The called function impliedly does not return
2411 any value and has a constant-True guard. The call is marked as
2412 accessing neither guest state nor memory (hence the "unsafe"
2413 designation) -- you can change this marking later if need be. A
2414 suitable IRCallee is constructed from the supplied bits. */
2415 extern
2416 IRDirty* unsafeIRDirty_0_N ( Int regparms, const HChar* name, void* addr,
2417 IRExpr** args );
2418
2419 /* Similarly, make a zero-annotation dirty call which returns a value,
2420 and assign that to the given temp. */
2421 extern
2422 IRDirty* unsafeIRDirty_1_N ( IRTemp dst,
2423 Int regparms, const HChar* name, void* addr,
2424 IRExpr** args );
2425
2426
2427 /* --------------- Memory Bus Events --------------- */
2428
2429 typedef
2430 enum {
2431 Imbe_Fence=0x1C00,
2432 /* Needed only on ARM. It cancels a reservation made by a
2433 preceding Linked-Load, and needs to be handed through to the
2434 back end, just as LL and SC themselves are. */
2435 Imbe_CancelReservation
2436 }
2437 IRMBusEvent;
2438
2439 extern void ppIRMBusEvent ( IRMBusEvent );
2440
2441
2442 /* --------------- Compare and Swap --------------- */
2443
2444 /* This denotes an atomic compare and swap operation, either
2445 a single-element one or a double-element one.
2446
2447 In the single-element case:
2448
2449 .addr is the memory address.
2450 .end is the endianness with which memory is accessed
2451
2452 If .addr contains the same value as .expdLo, then .dataLo is
2453 written there, else there is no write. In both cases, the
2454 original value at .addr is copied into .oldLo.
2455
2456 Types: .expdLo, .dataLo and .oldLo must all have the same type.
2457 It may be any integral type, viz: I8, I16, I32 or, for 64-bit
2458 guests, I64.
2459
2460 .oldHi must be IRTemp_INVALID, and .expdHi and .dataHi must
2461 be NULL.
2462
2463 In the double-element case:
2464
2465 .addr is the memory address.
2466 .end is the endianness with which memory is accessed
2467
2468 The operation is the same:
2469
2470 If .addr contains the same value as .expdHi:.expdLo, then
2471 .dataHi:.dataLo is written there, else there is no write. In
2472 both cases the original value at .addr is copied into
2473 .oldHi:.oldLo.
2474
2475 Types: .expdHi, .expdLo, .dataHi, .dataLo, .oldHi, .oldLo must
2476 all have the same type, which may be any integral type, viz: I8,
2477 I16, I32 or, for 64-bit guests, I64.
2478
2479 The double-element case is complicated by the issue of
2480 endianness. In all cases, the two elements are understood to be
2481 located adjacently in memory, starting at the address .addr.
2482
2483 If .end is Iend_LE, then the .xxxLo component is at the lower
2484 address and the .xxxHi component is at the higher address, and
2485 each component is itself stored little-endianly.
2486
2487 If .end is Iend_BE, then the .xxxHi component is at the lower
2488 address and the .xxxLo component is at the higher address, and
2489 each component is itself stored big-endianly.
2490
2491 This allows representing more cases than most architectures can
2492 handle. For example, x86 cannot do DCAS on 8- or 16-bit elements.
2493
2494 How to know if the CAS succeeded?
2495
2496 * if .oldLo == .expdLo (resp. .oldHi:.oldLo == .expdHi:.expdLo),
2497 then the CAS succeeded, .dataLo (resp. .dataHi:.dataLo) is now
2498 stored at .addr, and the original value there was .oldLo (resp
2499 .oldHi:.oldLo).
2500
2501 * if .oldLo != .expdLo (resp. .oldHi:.oldLo != .expdHi:.expdLo),
2502 then the CAS failed, and the original value at .addr was .oldLo
2503 (resp. .oldHi:.oldLo).
2504
2505 Hence it is easy to know whether or not the CAS succeeded.
2506 */
2507 typedef
2508 struct {
2509 IRTemp oldHi; /* old value of *addr is written here */
2510 IRTemp oldLo;
2511 IREndness end; /* endianness of the data in memory */
2512 IRExpr* addr; /* store address */
2513 IRExpr* expdHi; /* expected old value at *addr */
2514 IRExpr* expdLo;
2515 IRExpr* dataHi; /* new value for *addr */
2516 IRExpr* dataLo;
2517 }
2518 IRCAS;
2519
2520 extern void ppIRCAS ( const IRCAS* cas );
2521
2522 extern IRCAS* mkIRCAS ( IRTemp oldHi, IRTemp oldLo,
2523 IREndness end, IRExpr* addr,
2524 IRExpr* expdHi, IRExpr* expdLo,
2525 IRExpr* dataHi, IRExpr* dataLo );
2526
2527 extern IRCAS* deepCopyIRCAS ( const IRCAS* );
2528
2529
2530 /* ------------------ Circular Array Put ------------------ */
2531
2532 typedef
2533 struct {
2534 IRRegArray* descr; /* Part of guest state treated as circular */
2535 IRExpr* ix; /* Variable part of index into array */
2536 Int bias; /* Constant offset part of index into array */
2537 IRExpr* data; /* The value to write */
2538 } IRPutI;
2539
2540 extern void ppIRPutI ( const IRPutI* puti );
2541
2542 extern IRPutI* mkIRPutI ( IRRegArray* descr, IRExpr* ix,
2543 Int bias, IRExpr* data );
2544
2545 extern IRPutI* deepCopyIRPutI ( const IRPutI* );
2546
2547
2548 /* --------------- Guarded loads and stores --------------- */
2549
2550 /* Conditional stores are straightforward. They are the same as
2551 normal stores, with an extra 'guard' field :: Ity_I1 that
2552 determines whether or not the store actually happens. If not,
2553 memory is unmodified.
2554
2555 The semantics of this is that 'addr' and 'data' are fully evaluated
2556 even in the case where 'guard' evaluates to zero (false).
2557 */
2558 typedef
2559 struct {
2560 IREndness end; /* Endianness of the store */
2561 IRExpr* addr; /* store address */
2562 IRExpr* data; /* value to write */
2563 IRExpr* guard; /* Guarding value */
2564 }
2565 IRStoreG;
2566
2567 /* Conditional loads are a little more complex. 'addr' is the
2568 address, 'guard' is the guarding condition. If the load takes
2569 place, the loaded value is placed in 'dst'. If it does not take
2570 place, 'alt' is copied to 'dst'. However, the loaded value is not
2571 placed directly in 'dst' -- it is first subjected to the conversion
2572 specified by 'cvt'.
2573
2574 For example, imagine doing a conditional 8-bit load, in which the
2575 loaded value is zero extended to 32 bits. Hence:
2576 * 'dst' and 'alt' must have type I32
2577 * 'cvt' must be a unary op which converts I8 to I32. In this
2578 example, it would be ILGop_8Uto32.
2579
2580 There is no explicit indication of the type at which the load is
2581 done, since that is inferrable from the arg type of 'cvt'. Note
2582 that the types of 'alt' and 'dst' and the result type of 'cvt' must
2583 all be the same.
2584
2585 Semantically, 'addr' is evaluated even in the case where 'guard'
2586 evaluates to zero (false), and 'alt' is evaluated even when 'guard'
2587 evaluates to one (true). That is, 'addr' and 'alt' are always
2588 evaluated.
2589 */
2590 typedef
2591 enum {
2592 ILGop_INVALID=0x1D00,
2593 ILGop_Ident64, /* 64 bit, no conversion */
2594 ILGop_Ident32, /* 32 bit, no conversion */
2595 ILGop_16Uto32, /* 16 bit load, Z-widen to 32 */
2596 ILGop_16Sto32, /* 16 bit load, S-widen to 32 */
2597 ILGop_8Uto32, /* 8 bit load, Z-widen to 32 */
2598 ILGop_8Sto32 /* 8 bit load, S-widen to 32 */
2599 }
2600 IRLoadGOp;
2601
2602 typedef
2603 struct {
2604 IREndness end; /* Endianness of the load */
2605 IRLoadGOp cvt; /* Conversion to apply to the loaded value */
2606 IRTemp dst; /* Destination (LHS) of assignment */
2607 IRExpr* addr; /* Address being loaded from */
2608 IRExpr* alt; /* Value if load is not done. */
2609 IRExpr* guard; /* Guarding value */
2610 }
2611 IRLoadG;
2612
2613 extern void ppIRStoreG ( const IRStoreG* sg );
2614
2615 extern void ppIRLoadGOp ( IRLoadGOp cvt );
2616
2617 extern void ppIRLoadG ( const IRLoadG* lg );
2618
2619 extern IRStoreG* mkIRStoreG ( IREndness end,
2620 IRExpr* addr, IRExpr* data,
2621 IRExpr* guard );
2622
2623 extern IRLoadG* mkIRLoadG ( IREndness end, IRLoadGOp cvt,
2624 IRTemp dst, IRExpr* addr, IRExpr* alt,
2625 IRExpr* guard );
2626
2627
2628 /* ------------------ Statements ------------------ */
2629
2630 /* The different kinds of statements. Their meaning is explained
2631 below in the comments for IRStmt.
2632
2633 Those marked META do not represent code, but rather extra
2634 information about the code. These statements can be removed
2635 without affecting the functional behaviour of the code, however
2636 they are required by some IR consumers such as tools that
2637 instrument the code.
2638 */
2639
2640 typedef
2641 enum {
2642 Ist_NoOp=0x1E00,
2643 Ist_IMark, /* META */
2644 Ist_AbiHint, /* META */
2645 Ist_Put,
2646 Ist_PutI,
2647 Ist_WrTmp,
2648 Ist_Store,
2649 Ist_LoadG,
2650 Ist_StoreG,
2651 Ist_CAS,
2652 Ist_LLSC,
2653 Ist_Dirty,
2654 Ist_MBE,
2655 Ist_Exit
2656 }
2657 IRStmtTag;
2658
2659 /* A statement. Stored as a tagged union. 'tag' indicates what kind
2660 of expression this is. 'Ist' is the union that holds the fields.
2661 If an IRStmt 'st' has st.tag equal to Iex_Store, then it's a store
2662 statement, and the fields can be accessed with
2663 'st.Ist.Store.<fieldname>'.
2664
2665 For each kind of statement, we show what it looks like when
2666 pretty-printed with ppIRStmt().
2667 */
2668 typedef
2669 struct _IRStmt {
2670 IRStmtTag tag;
2671 union {
2672 /* A no-op (usually resulting from IR optimisation). Can be
2673 omitted without any effect.
2674
2675 ppIRStmt output: IR-NoOp
2676 */
2677 struct {
2678 } NoOp;
2679
2680 /* META: instruction mark. Marks the start of the statements
2681 that represent a single machine instruction (the end of
2682 those statements is marked by the next IMark or the end of
2683 the IRSB). Contains the address and length of the
2684 instruction.
2685
2686 It also contains a delta value. The delta must be
2687 subtracted from a guest program counter value before
2688 attempting to establish, by comparison with the address
2689 and length values, whether or not that program counter
2690 value refers to this instruction. For x86, amd64, ppc32,
2691 ppc64 and arm, the delta value is zero. For Thumb
2692 instructions, the delta value is one. This is because, on
2693 Thumb, guest PC values (guest_R15T) are encoded using the
2694 top 31 bits of the instruction address and a 1 in the lsb;
2695 hence they appear to be (numerically) 1 past the start of
2696 the instruction they refer to. IOW, guest_R15T on ARM
2697 holds a standard ARM interworking address.
2698
2699 ppIRStmt output: ------ IMark(<addr>, <len>, <delta>) ------,
2700 eg. ------ IMark(0x4000792, 5, 0) ------,
2701 */
2702 struct {
2703 Addr addr; /* instruction address */
2704 UInt len; /* instruction length */
2705 UChar delta; /* addr = program counter as encoded in guest state
2706 - delta */
2707 } IMark;
2708
2709 /* META: An ABI hint, which says something about this
2710 platform's ABI.
2711
2712 At the moment, the only AbiHint is one which indicates
2713 that a given chunk of address space, [base .. base+len-1],
2714 has become undefined. This is used on amd64-linux and
2715 some ppc variants to pass stack-redzoning hints to whoever
2716 wants to see them. It also indicates the address of the
2717 next (dynamic) instruction that will be executed. This is
2718 to help Memcheck to origin tracking.
2719
2720 ppIRStmt output: ====== AbiHint(<base>, <len>, <nia>) ======
2721 eg. ====== AbiHint(t1, 16, t2) ======
2722 */
2723 struct {
2724 IRExpr* base; /* Start of undefined chunk */
2725 Int len; /* Length of undefined chunk */
2726 IRExpr* nia; /* Address of next (guest) insn */
2727 } AbiHint;
2728
2729 /* Write a guest register, at a fixed offset in the guest state.
2730 ppIRStmt output: PUT(<offset>) = <data>, eg. PUT(60) = t1
2731 */
2732 struct {
2733 Int offset; /* Offset into the guest state */
2734 IRExpr* data; /* The value to write */
2735 } Put;
2736
2737 /* Write a guest register, at a non-fixed offset in the guest
2738 state. See the comment for GetI expressions for more
2739 information.
2740
2741 ppIRStmt output: PUTI<descr>[<ix>,<bias>] = <data>,
2742 eg. PUTI(64:8xF64)[t5,0] = t1
2743 */
2744 struct {
2745 IRPutI* details;
2746 } PutI;
2747
2748 /* Assign a value to a temporary. Note that SSA rules require
2749 each tmp is only assigned to once. IR sanity checking will
2750 reject any block containing a temporary which is not assigned
2751 to exactly once.
2752
2753 ppIRStmt output: t<tmp> = <data>, eg. t1 = 3
2754 */
2755 struct {
2756 IRTemp tmp; /* Temporary (LHS of assignment) */
2757 IRExpr* data; /* Expression (RHS of assignment) */
2758 } WrTmp;
2759
2760 /* Write a value to memory. This is a normal store, not a
2761 Store-Conditional. To represent a Store-Conditional,
2762 instead use IRStmt.LLSC.
2763 ppIRStmt output: ST<end>(<addr>) = <data>, eg. STle(t1) = t2
2764 */
2765 struct {
2766 IREndness end; /* Endianness of the store */
2767 IRExpr* addr; /* store address */
2768 IRExpr* data; /* value to write */
2769 } Store;
2770
2771 /* Guarded store. Note that this is defined to evaluate all
2772 expression fields (addr, data) even if the guard evaluates
2773 to false.
2774 ppIRStmt output:
2775 if (<guard>) ST<end>(<addr>) = <data> */
2776 struct {
2777 IRStoreG* details;
2778 } StoreG;
2779
2780 /* Guarded load. Note that this is defined to evaluate all
2781 expression fields (addr, alt) even if the guard evaluates
2782 to false.
2783 ppIRStmt output:
2784 t<tmp> = if (<guard>) <cvt>(LD<end>(<addr>)) else <alt> */
2785 struct {
2786 IRLoadG* details;
2787 } LoadG;
2788
2789 /* Do an atomic compare-and-swap operation. Semantics are
2790 described above on a comment at the definition of IRCAS.
2791
2792 ppIRStmt output:
2793 t<tmp> = CAS<end>(<addr> :: <expected> -> <new>)
2794 eg
2795 t1 = CASle(t2 :: t3->Add32(t3,1))
2796 which denotes a 32-bit atomic increment
2797 of a value at address t2
2798
2799 A double-element CAS may also be denoted, in which case <tmp>,
2800 <expected> and <new> are all pairs of items, separated by
2801 commas.
2802 */
2803 struct {
2804 IRCAS* details;
2805 } CAS;
2806
2807 /* Either Load-Linked or Store-Conditional, depending on
2808 STOREDATA.
2809
2810 If STOREDATA is NULL then this is a Load-Linked, meaning
2811 that data is loaded from memory as normal, but a
2812 'reservation' for the address is also lodged in the
2813 hardware.
2814
2815 result = Load-Linked(addr, end)
2816
2817 The data transfer type is the type of RESULT (I32, I64,
2818 etc). ppIRStmt output:
2819
2820 result = LD<end>-Linked(<addr>), eg. LDbe-Linked(t1)
2821
2822 If STOREDATA is not NULL then this is a Store-Conditional,
2823 hence:
2824
2825 result = Store-Conditional(addr, storedata, end)
2826
2827 The data transfer type is the type of STOREDATA and RESULT
2828 has type Ity_I1. The store may fail or succeed depending
2829 on the state of a previously lodged reservation on this
2830 address. RESULT is written 1 if the store succeeds and 0
2831 if it fails. eg ppIRStmt output:
2832
2833 result = ( ST<end>-Cond(<addr>) = <storedata> )
2834 eg t3 = ( STbe-Cond(t1, t2) )
2835
2836 In all cases, the address must be naturally aligned for
2837 the transfer type -- any misaligned addresses should be
2838 caught by a dominating IR check and side exit. This
2839 alignment restriction exists because on at least some
2840 LL/SC platforms (ppc), stwcx. etc will trap w/ SIGBUS on
2841 misaligned addresses, and we have to actually generate
2842 stwcx. on the host, and we don't want it trapping on the
2843 host.
2844
2845 Summary of rules for transfer type:
2846 STOREDATA == NULL (LL):
2847 transfer type = type of RESULT
2848 STOREDATA != NULL (SC):
2849 transfer type = type of STOREDATA, and RESULT :: Ity_I1
2850 */
2851 struct {
2852 IREndness end;
2853 IRTemp result;
2854 IRExpr* addr;
2855 IRExpr* storedata; /* NULL => LL, non-NULL => SC */
2856 } LLSC;
2857
2858 /* Call (possibly conditionally) a C function that has side
2859 effects (ie. is "dirty"). See the comments above the
2860 IRDirty type declaration for more information.
2861
2862 ppIRStmt output:
2863 t<tmp> = DIRTY <guard> <effects>
2864 ::: <callee>(<args>)
2865 eg.
2866 t1 = DIRTY t27 RdFX-gst(16,4) RdFX-gst(60,4)
2867 ::: foo{0x380035f4}(t2)
2868 */
2869 struct {
2870 IRDirty* details;
2871 } Dirty;
2872
2873 /* A memory bus event - a fence, or acquisition/release of the
2874 hardware bus lock. IR optimisation treats all these as fences
2875 across which no memory references may be moved.
2876 ppIRStmt output: MBusEvent-Fence,
2877 MBusEvent-BusLock, MBusEvent-BusUnlock.
2878 */
2879 struct {
2880 IRMBusEvent event;
2881 } MBE;
2882
2883 /* Conditional exit from the middle of an IRSB.
2884 ppIRStmt output: if (<guard>) goto {<jk>} <dst>
2885 eg. if (t69) goto {Boring} 0x4000AAA:I32
2886 If <guard> is true, the guest state is also updated by
2887 PUT-ing <dst> at <offsIP>. This is done because a
2888 taken exit must update the guest program counter.
2889 */
2890 struct {
2891 IRExpr* guard; /* Conditional expression */
2892 IRConst* dst; /* Jump target (constant only) */
2893 IRJumpKind jk; /* Jump kind */
2894 Int offsIP; /* Guest state offset for IP */
2895 } Exit;
2896 } Ist;
2897 }
2898 IRStmt;
2899
2900 /* Statement constructors. */
2901 extern IRStmt* IRStmt_NoOp ( void );
2902 extern IRStmt* IRStmt_IMark ( Addr addr, UInt len, UChar delta );
2903 extern IRStmt* IRStmt_AbiHint ( IRExpr* base, Int len, IRExpr* nia );
2904 extern IRStmt* IRStmt_Put ( Int off, IRExpr* data );
2905 extern IRStmt* IRStmt_PutI ( IRPutI* details );
2906 extern IRStmt* IRStmt_WrTmp ( IRTemp tmp, IRExpr* data );
2907 extern IRStmt* IRStmt_Store ( IREndness end, IRExpr* addr, IRExpr* data );
2908 extern IRStmt* IRStmt_StoreG ( IREndness end, IRExpr* addr, IRExpr* data,
2909 IRExpr* guard );
2910 extern IRStmt* IRStmt_LoadG ( IREndness end, IRLoadGOp cvt, IRTemp dst,
2911 IRExpr* addr, IRExpr* alt, IRExpr* guard );
2912 extern IRStmt* IRStmt_CAS ( IRCAS* details );
2913 extern IRStmt* IRStmt_LLSC ( IREndness end, IRTemp result,
2914 IRExpr* addr, IRExpr* storedata );
2915 extern IRStmt* IRStmt_Dirty ( IRDirty* details );
2916 extern IRStmt* IRStmt_MBE ( IRMBusEvent event );
2917 extern IRStmt* IRStmt_Exit ( IRExpr* guard, IRJumpKind jk, IRConst* dst,
2918 Int offsIP );
2919
2920 /* Deep-copy an IRStmt. */
2921 extern IRStmt* deepCopyIRStmt ( const IRStmt* );
2922
2923 /* Pretty-print an IRStmt. */
2924 extern void ppIRStmt ( const IRStmt* );
2925
2926
2927 /* ------------------ Basic Blocks ------------------ */
2928
2929 /* Type environments: a bunch of statements, expressions, etc, are
2930 incomplete without an environment indicating the type of each
2931 IRTemp. So this provides one. IR temporaries are really just
2932 unsigned ints and so this provides an array, 0 .. n_types_used-1 of
2933 them.
2934 */
2935 typedef
2936 struct {
2937 IRType* types;
2938 Int types_size;
2939 Int types_used;
2940 }
2941 IRTypeEnv;
2942
2943 /* Obtain a new IRTemp */
2944 extern IRTemp newIRTemp ( IRTypeEnv*, IRType );
2945
2946 /* Deep-copy a type environment */
2947 extern IRTypeEnv* deepCopyIRTypeEnv ( const IRTypeEnv* );
2948
2949 /* Pretty-print a type environment */
2950 extern void ppIRTypeEnv ( const IRTypeEnv* );
2951
2952
2953 /* Code blocks, which in proper compiler terminology are superblocks
2954 (single entry, multiple exit code sequences) contain:
2955
2956 - A table giving a type for each temp (the "type environment")
2957 - An expandable array of statements
2958 - An expression of type 32 or 64 bits, depending on the
2959 guest's word size, indicating the next destination if the block
2960 executes all the way to the end, without a side exit
2961 - An indication of any special actions (JumpKind) needed
2962 for this final jump.
2963 - Offset of the IP field in the guest state. This will be
2964 updated before the final jump is done.
2965
2966 "IRSB" stands for "IR Super Block".
2967 */
2968 typedef
2969 struct {
2970 IRTypeEnv* tyenv;
2971 IRStmt** stmts;
2972 Int stmts_size;
2973 Int stmts_used;
2974 IRExpr* next;
2975 IRJumpKind jumpkind;
2976 Int offsIP;
2977 }
2978 IRSB;
2979
2980 /* Allocate a new, uninitialised IRSB */
2981 extern IRSB* emptyIRSB ( void );
2982
2983 /* Deep-copy an IRSB */
2984 extern IRSB* deepCopyIRSB ( const IRSB* );
2985
2986 /* Deep-copy an IRSB, except for the statements list, which set to be
2987 a new, empty, list of statements. */
2988 extern IRSB* deepCopyIRSBExceptStmts ( const IRSB* );
2989
2990 /* Pretty-print an IRSB */
2991 extern void ppIRSB ( const IRSB* );
2992
2993 /* Append an IRStmt to an IRSB */
2994 extern void addStmtToIRSB ( IRSB*, IRStmt* );
2995
2996
2997 /*---------------------------------------------------------------*/
2998 /*--- Helper functions for the IR ---*/
2999 /*---------------------------------------------------------------*/
3000
3001 /* For messing with IR type environments */
3002 extern IRTypeEnv* emptyIRTypeEnv ( void );
3003
3004 /* What is the type of this expression? */
3005 extern IRType typeOfIRConst ( const IRConst* );
3006 extern IRType typeOfIRTemp ( const IRTypeEnv*, IRTemp );
3007 extern IRType typeOfIRExpr ( const IRTypeEnv*, const IRExpr* );
3008
3009 /* What are the arg and result type for this IRLoadGOp? */
3010 extern void typeOfIRLoadGOp ( IRLoadGOp cvt,
3011 /*OUT*/IRType* t_res,
3012 /*OUT*/IRType* t_arg );
3013
3014 /* Sanity check a BB of IR */
3015 extern void sanityCheckIRSB ( const IRSB* bb,
3016 const HChar* caller,
3017 Bool require_flatness,
3018 IRType guest_word_size );
3019 extern Bool isFlatIRStmt ( const IRStmt* );
3020
3021 /* Is this any value actually in the enumeration 'IRType' ? */
3022 extern Bool isPlausibleIRType ( IRType ty );
3023
3024
3025 /*---------------------------------------------------------------*/
3026 /*--- IR injection ---*/
3027 /*---------------------------------------------------------------*/
3028
3029 void vex_inject_ir(IRSB *, IREndness);
3030
3031
3032 #endif /* ndef __LIBVEX_IR_H */
3033
3034 /*---------------------------------------------------------------*/
3035 /*--- libvex_ir.h ---*/
3036 /*---------------------------------------------------------------*/
3037