• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                guest_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2017 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 /* Only to be used within the guest-amd64 directory. */
37 
38 #ifndef __VEX_GUEST_AMD64_DEFS_H
39 #define __VEX_GUEST_AMD64_DEFS_H
40 
41 #include "libvex_basictypes.h"
42 #include "libvex_emnote.h"              // VexEmNote
43 #include "libvex_guest_amd64.h"         // VexGuestAMD64State
44 #include "guest_generic_bb_to_IR.h"     // DisResult
45 
46 /*---------------------------------------------------------*/
47 /*--- amd64 to IR conversion                            ---*/
48 /*---------------------------------------------------------*/
49 
50 /* Convert one amd64 insn to IR.  See the type DisOneInstrFn in
51    guest_generic_bb_to_IR.h. */
52 extern
53 DisResult disInstr_AMD64 ( IRSB*        irbb,
54                            Bool         (*resteerOkFn) ( void*, Addr ),
55                            Bool         resteerCisOk,
56                            void*        callback_opaque,
57                            const UChar* guest_code,
58                            Long         delta,
59                            Addr         guest_IP,
60                            VexArch      guest_arch,
61                            const VexArchInfo* archinfo,
62                            const VexAbiInfo*  abiinfo,
63                            VexEndness   host_endness,
64                            Bool         sigill_diag );
65 
66 /* Used by the optimiser to specialise calls to helpers. */
67 extern
68 IRExpr* guest_amd64_spechelper ( const HChar* function_name,
69                                  IRExpr** args,
70                                  IRStmt** precedingStmts,
71                                  Int      n_precedingStmts );
72 
73 /* Describes to the optimiser which part of the guest state require
74    precise memory exceptions.  This is logically part of the guest
75    state description. */
76 extern
77 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int,
78                                                    VexRegisterUpdates );
79 
80 extern
81 VexGuestLayout amd64guest_layout;
82 
83 
84 /*---------------------------------------------------------*/
85 /*--- amd64 guest helpers                               ---*/
86 /*---------------------------------------------------------*/
87 
88 /* --- CLEAN HELPERS --- */
89 
90 extern ULong amd64g_calculate_rflags_all (
91                 ULong cc_op,
92                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
93              );
94 
95 extern ULong amd64g_calculate_rflags_c (
96                 ULong cc_op,
97                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
98              );
99 
100 extern ULong amd64g_calculate_condition (
101                 ULong/*AMD64Condcode*/ cond,
102                 ULong cc_op,
103                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
104              );
105 
106 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
107 
108 extern ULong amd64g_calculate_RCR  (
109                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
110              );
111 
112 extern ULong amd64g_calculate_RCL  (
113                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
114              );
115 
116 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
117 
118 extern ULong amd64g_check_fldcw ( ULong fpucw );
119 
120 extern ULong amd64g_create_fpucw ( ULong fpround );
121 
122 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
123 
124 extern ULong amd64g_create_mxcsr ( ULong sseround );
125 
126 extern VexEmNote amd64g_dirtyhelper_FLDENV  ( VexGuestAMD64State*, HWord );
127 extern VexEmNote amd64g_dirtyhelper_FRSTOR  ( VexGuestAMD64State*, HWord );
128 extern VexEmNote amd64g_dirtyhelper_FRSTORS ( VexGuestAMD64State*, HWord );
129 
130 extern void amd64g_dirtyhelper_FSTENV  ( VexGuestAMD64State*, HWord );
131 extern void amd64g_dirtyhelper_FNSAVE  ( VexGuestAMD64State*, HWord );
132 extern void amd64g_dirtyhelper_FNSAVES ( VexGuestAMD64State*, HWord );
133 
134 /* Translate a guest virtual_addr into a guest linear address by
135    consulting the supplied LDT/GDT structures.  Their representation
136    must be as specified in pub/libvex_guest_amd64.h.  To indicate a
137    translation failure, 1<<32 is returned.  On success, the lower 32
138    bits of the returned result indicate the linear address.
139 */
140 //extern
141 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
142 //                              UInt seg_selector, UInt virtual_addr );
143 
144 extern ULong amd64g_calculate_mmx_pmaddwd  ( ULong, ULong );
145 extern ULong amd64g_calculate_mmx_psadbw   ( ULong, ULong );
146 
147 extern ULong amd64g_calculate_sse_phminposuw ( ULong sLo, ULong sHi );
148 
149 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
150 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
151 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
152 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
153 
154 extern ULong amd64g_calc_mpsadbw ( ULong sHi, ULong sLo,
155                                    ULong dHi, ULong dLo,
156                                    ULong imm_and_return_control_bit );
157 
158 extern ULong amd64g_calculate_pext  ( ULong, ULong );
159 extern ULong amd64g_calculate_pdep  ( ULong, ULong );
160 
161 /* --- DIRTY HELPERS --- */
162 
163 extern ULong amd64g_dirtyhelper_loadF80le  ( Addr/*addr*/ );
164 
165 extern void  amd64g_dirtyhelper_storeF80le ( Addr/*addr*/, ULong/*data*/ );
166 
167 extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
168 extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
169 extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
170 extern void  amd64g_dirtyhelper_CPUID_avx_and_cx16 ( VexGuestAMD64State* st );
171 extern void  amd64g_dirtyhelper_CPUID_avx2 ( VexGuestAMD64State* st );
172 
173 extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
174 
175 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_0
176                ( VexGuestAMD64State* gst, HWord addr );
177 extern void amd64g_dirtyhelper_XSAVE_COMPONENT_1_EXCLUDING_XMMREGS
178                ( VexGuestAMD64State* gst, HWord addr );
179 
180 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_0
181                     ( VexGuestAMD64State* gst, HWord addr );
182 extern VexEmNote amd64g_dirtyhelper_XRSTOR_COMPONENT_1_EXCLUDING_XMMREGS
183                     ( VexGuestAMD64State* gst, HWord addr );
184 
185 extern ULong amd64g_dirtyhelper_RDTSC ( void );
186 extern void  amd64g_dirtyhelper_RDTSCP ( VexGuestAMD64State* st );
187 
188 extern ULong amd64g_dirtyhelper_IN  ( ULong portno, ULong sz/*1,2 or 4*/ );
189 extern void  amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
190                                       ULong sz/*1,2 or 4*/ );
191 
192 extern void amd64g_dirtyhelper_SxDT ( void* address,
193                                       ULong op /* 0 or 1 */ );
194 
195 /* Helps with PCMP{I,E}STR{I,M}.
196 
197    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
198    actually it could be a clean helper, but for the fact that we can't
199    pass by value 2 x V128 to a clean helper, nor have one returned.)
200    Reads guest state, writes to guest state for the xSTRM cases, no
201    accesses of memory, is a pure function.
202 
203    opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
204    the callee knows which I/E and I/M variant it is dealing with and
205    what the specific operation is.  4th byte of opcode is in the range
206    0x60 to 0x63:
207        istri  66 0F 3A 63
208        istrm  66 0F 3A 62
209        estri  66 0F 3A 61
210        estrm  66 0F 3A 60
211 
212    gstOffL and gstOffR are the guest state offsets for the two XMM
213    register inputs.  We never have to deal with the memory case since
214    that is handled by pre-loading the relevant value into the fake
215    XMM16 register.
216 
217    For ESTRx variants, edxIN and eaxIN hold the values of those two
218    registers.
219 
220    In all cases, the bottom 16 bits of the result contain the new
221    OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
222    result hold the new %ecx value.  For xSTRM variants, the helper
223    writes the result directly to the guest XMM0.
224 
225    Declarable side effects: in all cases, reads guest state at
226    [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
227    guest_XMM0.
228 
229    Is expected to be called with opc_and_imm combinations which have
230    actually been validated, and will assert if otherwise.  The front
231    end should ensure we're only called with verified values.
232 */
233 extern ULong amd64g_dirtyhelper_PCMPxSTRx (
234           VexGuestAMD64State*,
235           HWord opc4_and_imm,
236           HWord gstOffL, HWord gstOffR,
237           HWord edxIN, HWord eaxIN
238        );
239 
240 /* Implementation of intel AES instructions as described in
241    Intel  Advanced Vector Extensions
242           Programming Reference
243           MARCH 2008
244           319433-002.
245 
246    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
247    actually it could be a clean helper, but for the fact that we can't
248    pass by value 2 x V128 to a clean helper, nor have one returned.)
249    Reads guest state, writes to guest state, no
250    accesses of memory, is a pure function.
251 
252    opc4 contains the 4th byte of opcode. Front-end should only
253    give opcode corresponding to AESENC/AESENCLAST/AESDEC/AESDECLAST/AESIMC.
254    (will assert otherwise).
255 
256    gstOffL and gstOffR are the guest state offsets for the two XMM
257    register inputs, gstOffD is the guest state offset for the XMM register
258    output.  We never have to deal with the memory case since that is handled
259    by pre-loading the relevant value into the fake XMM16 register.
260 
261 */
262 extern void amd64g_dirtyhelper_AES (
263           VexGuestAMD64State* gst,
264           HWord opc4, HWord gstOffD,
265           HWord gstOffL, HWord gstOffR
266        );
267 
268 /* Implementation of AESKEYGENASSIST.
269 
270    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
271    actually it could be a clean helper, but for the fact that we can't
272    pass by value 1 x V128 to a clean helper, nor have one returned.)
273    Reads guest state, writes to guest state, no
274    accesses of memory, is a pure function.
275 
276    imm8 is the Round Key constant.
277 
278    gstOffL and gstOffR are the guest state offsets for the two XMM
279    register input and output.  We never have to deal with the memory case since
280    that is handled by pre-loading the relevant value into the fake
281    XMM16 register.
282 
283 */
284 extern void amd64g_dirtyhelper_AESKEYGENASSIST (
285           VexGuestAMD64State* gst,
286           HWord imm8,
287           HWord gstOffL, HWord gstOffR
288        );
289 
290 //extern void  amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
291 //extern void  amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
292 //extern void  amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
293 
294 //extern void  amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
295 
296 //extern VexEmNote
297 //            amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
298 
299 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
300 
301 //extern VexEmNote
302 //            amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
303 
304 
305 
306 /*---------------------------------------------------------*/
307 /*--- Condition code stuff                              ---*/
308 /*---------------------------------------------------------*/
309 
310 /* rflags masks */
311 #define AMD64G_CC_SHIFT_O   11
312 #define AMD64G_CC_SHIFT_S   7
313 #define AMD64G_CC_SHIFT_Z   6
314 #define AMD64G_CC_SHIFT_A   4
315 #define AMD64G_CC_SHIFT_C   0
316 #define AMD64G_CC_SHIFT_P   2
317 
318 #define AMD64G_CC_MASK_O    (1ULL << AMD64G_CC_SHIFT_O)
319 #define AMD64G_CC_MASK_S    (1ULL << AMD64G_CC_SHIFT_S)
320 #define AMD64G_CC_MASK_Z    (1ULL << AMD64G_CC_SHIFT_Z)
321 #define AMD64G_CC_MASK_A    (1ULL << AMD64G_CC_SHIFT_A)
322 #define AMD64G_CC_MASK_C    (1ULL << AMD64G_CC_SHIFT_C)
323 #define AMD64G_CC_MASK_P    (1ULL << AMD64G_CC_SHIFT_P)
324 
325 /* additional rflags masks */
326 #define AMD64G_CC_SHIFT_ID  21
327 #define AMD64G_CC_SHIFT_AC  18
328 #define AMD64G_CC_SHIFT_D   10
329 
330 #define AMD64G_CC_MASK_ID   (1ULL << AMD64G_CC_SHIFT_ID)
331 #define AMD64G_CC_MASK_AC   (1ULL << AMD64G_CC_SHIFT_AC)
332 #define AMD64G_CC_MASK_D    (1ULL << AMD64G_CC_SHIFT_D)
333 
334 /* FPU flag masks */
335 #define AMD64G_FC_SHIFT_C3   14
336 #define AMD64G_FC_SHIFT_C2   10
337 #define AMD64G_FC_SHIFT_C1   9
338 #define AMD64G_FC_SHIFT_C0   8
339 
340 #define AMD64G_FC_MASK_C3    (1ULL << AMD64G_FC_SHIFT_C3)
341 #define AMD64G_FC_MASK_C2    (1ULL << AMD64G_FC_SHIFT_C2)
342 #define AMD64G_FC_MASK_C1    (1ULL << AMD64G_FC_SHIFT_C1)
343 #define AMD64G_FC_MASK_C0    (1ULL << AMD64G_FC_SHIFT_C0)
344 
345 
346 /* %RFLAGS thunk descriptors.  A four-word thunk is used to record
347    details of the most recent flag-setting operation, so the flags can
348    be computed later if needed.  It is possible to do this a little
349    more efficiently using a 3-word thunk, but that makes it impossible
350    to describe the flag data dependencies sufficiently accurately for
351    Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
352 
353    The four words are:
354 
355       CC_OP, which describes the operation.
356 
357       CC_DEP1 and CC_DEP2.  These are arguments to the operation.
358          We want Memcheck to believe that the resulting flags are
359          data-dependent on both CC_DEP1 and CC_DEP2, hence the
360          name DEP.
361 
362       CC_NDEP.  This is a 3rd argument to the operation which is
363          sometimes needed.  We arrange things so that Memcheck does
364          not believe the resulting flags are data-dependent on CC_NDEP
365          ("not dependent").
366 
367    To make Memcheck believe that (the definedness of) the encoded
368    flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
369    requires two things:
370 
371    (1) In the guest state layout info (amd64guest_layout), CC_OP and
372        CC_NDEP are marked as always defined.
373 
374    (2) When passing the thunk components to an evaluation function
375        (calculate_condition, calculate_eflags, calculate_eflags_c) the
376        IRCallee's mcx_mask must be set so as to exclude from
377        consideration all passed args except CC_DEP1 and CC_DEP2.
378 
379    Strictly speaking only (2) is necessary for correctness.  However,
380    (1) helps efficiency in that since (2) means we never ask about the
381    definedness of CC_OP or CC_NDEP, we may as well not even bother to
382    track their definedness.
383 
384    When building the thunk, it is always necessary to write words into
385    CC_DEP1 and CC_DEP2, even if those args are not used given the
386    CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
387    This is important because otherwise Memcheck could give false
388    positives as it does not understand the relationship between the
389    CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
390    definedness of the stored flags always depends on both CC_DEP1 and
391    CC_DEP2.
392 
393    However, it is only necessary to set CC_NDEP when the CC_OP value
394    requires it, because Memcheck ignores CC_NDEP, and the evaluation
395    functions do understand the CC_OP fields and will only examine
396    CC_NDEP for suitable values of CC_OP.
397 
398    A summary of the field usages is:
399 
400    Operation          DEP1               DEP2               NDEP
401    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
402 
403    add/sub/mul        first arg          second arg         unused
404 
405    adc/sbb            first arg          (second arg)
406                                          XOR old_carry      old_carry
407 
408    and/or/xor         result             zero               unused
409 
410    inc/dec            result             zero               old_carry
411 
412    shl/shr/sar        result             subshifted-        unused
413                                          result
414 
415    rol/ror            result             zero               old_flags
416 
417    copy               old_flags          zero               unused.
418 
419 
420    Therefore Memcheck will believe the following:
421 
422    * add/sub/mul -- definedness of result flags depends on definedness
423      of both args.
424 
425    * adc/sbb -- definedness of result flags depends on definedness of
426      both args and definedness of the old C flag.  Because only two
427      DEP fields are available, the old C flag is XOR'd into the second
428      arg so that Memcheck sees the data dependency on it.  That means
429      the NDEP field must contain a second copy of the old C flag
430      so that the evaluation functions can correctly recover the second
431      arg.
432 
433    * and/or/xor are straightforward -- definedness of result flags
434      depends on definedness of result value.
435 
436    * inc/dec -- definedness of result flags depends only on
437      definedness of result.  This isn't really true -- it also depends
438      on the old C flag.  However, we don't want Memcheck to see that,
439      and so the old C flag must be passed in NDEP and not in DEP2.
440      It's inconceivable that a compiler would generate code that puts
441      the C flag in an undefined state, then does an inc/dec, which
442      leaves C unchanged, and then makes a conditional jump/move based
443      on C.  So our fiction seems a good approximation.
444 
445    * shl/shr/sar -- straightforward, again, definedness of result
446      flags depends on definedness of result value.  The subshifted
447      value (value shifted one less) is also needed, but its
448      definedness is the same as the definedness of the shifted value.
449 
450    * rol/ror -- these only set O and C, and leave A Z C P alone.
451      However it seems prudent (as per inc/dec) to say the definedness
452      of all resulting flags depends on the definedness of the result,
453      hence the old flags must go in as NDEP and not DEP2.
454 
455    * rcl/rcr are too difficult to do in-line, and so are done by a
456      helper function.  They are not part of this scheme.  The helper
457      function takes the value to be rotated, the rotate amount and the
458      old flags, and returns the new flags and the rotated value.
459      Since the helper's mcx_mask does not have any set bits, Memcheck
460      will lazily propagate undefinedness from any of the 3 args into
461      both results (flags and actual value).
462 */
463 enum {
464     AMD64G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
465                           /* just copy DEP1 to output */
466 
467     AMD64G_CC_OP_ADDB,    /* 1 */
468     AMD64G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
469     AMD64G_CC_OP_ADDL,    /* 3 */
470     AMD64G_CC_OP_ADDQ,    /* 4 */
471 
472     AMD64G_CC_OP_SUBB,    /* 5 */
473     AMD64G_CC_OP_SUBW,    /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
474     AMD64G_CC_OP_SUBL,    /* 7 */
475     AMD64G_CC_OP_SUBQ,    /* 8 */
476 
477     AMD64G_CC_OP_ADCB,    /* 9 */
478     AMD64G_CC_OP_ADCW,    /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
479     AMD64G_CC_OP_ADCL,    /* 11 */
480     AMD64G_CC_OP_ADCQ,    /* 12 */
481 
482     AMD64G_CC_OP_SBBB,    /* 13 */
483     AMD64G_CC_OP_SBBW,    /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
484     AMD64G_CC_OP_SBBL,    /* 15 */
485     AMD64G_CC_OP_SBBQ,    /* 16 */
486 
487     AMD64G_CC_OP_LOGICB,  /* 17 */
488     AMD64G_CC_OP_LOGICW,  /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
489     AMD64G_CC_OP_LOGICL,  /* 19 */
490     AMD64G_CC_OP_LOGICQ,  /* 20 */
491 
492     AMD64G_CC_OP_INCB,    /* 21 */
493     AMD64G_CC_OP_INCW,    /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
494     AMD64G_CC_OP_INCL,    /* 23 */
495     AMD64G_CC_OP_INCQ,    /* 24 */
496 
497     AMD64G_CC_OP_DECB,    /* 25 */
498     AMD64G_CC_OP_DECW,    /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
499     AMD64G_CC_OP_DECL,    /* 27 */
500     AMD64G_CC_OP_DECQ,    /* 28 */
501 
502     AMD64G_CC_OP_SHLB,    /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
503     AMD64G_CC_OP_SHLW,    /* 30 where res' is like res but shifted one bit less */
504     AMD64G_CC_OP_SHLL,    /* 31 */
505     AMD64G_CC_OP_SHLQ,    /* 32 */
506 
507     AMD64G_CC_OP_SHRB,    /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
508     AMD64G_CC_OP_SHRW,    /* 34 where res' is like res but shifted one bit less */
509     AMD64G_CC_OP_SHRL,    /* 35 */
510     AMD64G_CC_OP_SHRQ,    /* 36 */
511 
512     AMD64G_CC_OP_ROLB,    /* 37 */
513     AMD64G_CC_OP_ROLW,    /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
514     AMD64G_CC_OP_ROLL,    /* 39 */
515     AMD64G_CC_OP_ROLQ,    /* 40 */
516 
517     AMD64G_CC_OP_RORB,    /* 41 */
518     AMD64G_CC_OP_RORW,    /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
519     AMD64G_CC_OP_RORL,    /* 43 */
520     AMD64G_CC_OP_RORQ,    /* 44 */
521 
522     AMD64G_CC_OP_UMULB,   /* 45 */
523     AMD64G_CC_OP_UMULW,   /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
524     AMD64G_CC_OP_UMULL,   /* 47 */
525     AMD64G_CC_OP_UMULQ,   /* 48 */
526 
527     AMD64G_CC_OP_SMULB,   /* 49 */
528     AMD64G_CC_OP_SMULW,   /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
529     AMD64G_CC_OP_SMULL,   /* 51 */
530     AMD64G_CC_OP_SMULQ,   /* 52 */
531 
532     AMD64G_CC_OP_ANDN32,  /* 53 */
533     AMD64G_CC_OP_ANDN64,  /* 54 DEP1 = res, DEP2 = 0, NDEP = unused */
534 
535     AMD64G_CC_OP_BLSI32,  /* 55 */
536     AMD64G_CC_OP_BLSI64,  /* 56 DEP1 = res, DEP2 = arg, NDEP = unused */
537 
538     AMD64G_CC_OP_BLSMSK32,/* 57 */
539     AMD64G_CC_OP_BLSMSK64,/* 58 DEP1 = res, DEP2 = arg, NDEP = unused */
540 
541     AMD64G_CC_OP_BLSR32,  /* 59 */
542     AMD64G_CC_OP_BLSR64,  /* 60 DEP1 = res, DEP2 = arg, NDEP = unused */
543 
544     AMD64G_CC_OP_ADCX32,  /* 61 DEP1 = argL, DEP2 = argR ^ oldCarry, .. */
545     AMD64G_CC_OP_ADCX64,  /* 62 .. NDEP = old flags */
546 
547     AMD64G_CC_OP_ADOX32,  /* 63 DEP1 = argL, DEP2 = argR ^ oldOverflow, .. */
548     AMD64G_CC_OP_ADOX64,  /* 64 .. NDEP = old flags */
549 
550     AMD64G_CC_OP_NUMBER
551 };
552 
553 typedef
554    enum {
555       AMD64CondO      = 0,  /* overflow           */
556       AMD64CondNO     = 1,  /* no overflow        */
557 
558       AMD64CondB      = 2,  /* below              */
559       AMD64CondNB     = 3,  /* not below          */
560 
561       AMD64CondZ      = 4,  /* zero               */
562       AMD64CondNZ     = 5,  /* not zero           */
563 
564       AMD64CondBE     = 6,  /* below or equal     */
565       AMD64CondNBE    = 7,  /* not below or equal */
566 
567       AMD64CondS      = 8,  /* negative           */
568       AMD64CondNS     = 9,  /* not negative       */
569 
570       AMD64CondP      = 10, /* parity even        */
571       AMD64CondNP     = 11, /* not parity even    */
572 
573       AMD64CondL      = 12, /* less               */
574       AMD64CondNL     = 13, /* not less           */
575 
576       AMD64CondLE     = 14, /* less or equal      */
577       AMD64CondNLE    = 15, /* not less or equal  */
578 
579       AMD64CondAlways = 16  /* HACK */
580    }
581    AMD64Condcode;
582 
583 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
584 
585 /*---------------------------------------------------------------*/
586 /*--- end                                  guest_amd64_defs.h ---*/
587 /*---------------------------------------------------------------*/
588