• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  
2  /*---------------------------------------------------------------*/
3  /*--- begin                                 host_amd64_defs.h ---*/
4  /*---------------------------------------------------------------*/
5  
6  /*
7     This file is part of Valgrind, a dynamic binary instrumentation
8     framework.
9  
10     Copyright (C) 2004-2013 OpenWorks LLP
11        info@open-works.net
12  
13     This program is free software; you can redistribute it and/or
14     modify it under the terms of the GNU General Public License as
15     published by the Free Software Foundation; either version 2 of the
16     License, or (at your option) any later version.
17  
18     This program is distributed in the hope that it will be useful, but
19     WITHOUT ANY WARRANTY; without even the implied warranty of
20     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21     General Public License for more details.
22  
23     You should have received a copy of the GNU General Public License
24     along with this program; if not, write to the Free Software
25     Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26     02110-1301, USA.
27  
28     The GNU General Public License is contained in the file COPYING.
29  
30     Neither the names of the U.S. Department of Energy nor the
31     University of California nor the names of its contributors may be
32     used to endorse or promote products derived from this software
33     without prior written permission.
34  */
35  
36  #ifndef __VEX_HOST_AMD64_DEFS_H
37  #define __VEX_HOST_AMD64_DEFS_H
38  
39  #include "libvex_basictypes.h"
40  #include "libvex.h"                      // VexArch
41  #include "host_generic_regs.h"           // HReg
42  
43  /* --------- Registers. --------- */
44  
45  /* The usual HReg abstraction.  There are 16 real int regs, 6 real
46     float regs, and 16 real vector regs.
47  */
48  
49  extern void ppHRegAMD64 ( HReg );
50  
51  extern HReg hregAMD64_RAX ( void );
52  extern HReg hregAMD64_RBX ( void );
53  extern HReg hregAMD64_RCX ( void );
54  extern HReg hregAMD64_RDX ( void );
55  extern HReg hregAMD64_RSP ( void );
56  extern HReg hregAMD64_RBP ( void );
57  extern HReg hregAMD64_RSI ( void );
58  extern HReg hregAMD64_RDI ( void );
59  extern HReg hregAMD64_R8  ( void );
60  extern HReg hregAMD64_R9  ( void );
61  extern HReg hregAMD64_R10 ( void );
62  extern HReg hregAMD64_R11 ( void );
63  extern HReg hregAMD64_R12 ( void );
64  extern HReg hregAMD64_R13 ( void );
65  extern HReg hregAMD64_R14 ( void );
66  extern HReg hregAMD64_R15 ( void );
67  
68  extern HReg hregAMD64_FAKE0 ( void );
69  extern HReg hregAMD64_FAKE1 ( void );
70  extern HReg hregAMD64_FAKE2 ( void );
71  extern HReg hregAMD64_FAKE3 ( void );
72  extern HReg hregAMD64_FAKE4 ( void );
73  extern HReg hregAMD64_FAKE5 ( void );
74  
75  extern HReg hregAMD64_XMM0  ( void );
76  extern HReg hregAMD64_XMM1  ( void );
77  extern HReg hregAMD64_XMM3  ( void );
78  extern HReg hregAMD64_XMM4  ( void );
79  extern HReg hregAMD64_XMM5  ( void );
80  extern HReg hregAMD64_XMM6  ( void );
81  extern HReg hregAMD64_XMM7  ( void );
82  extern HReg hregAMD64_XMM8  ( void );
83  extern HReg hregAMD64_XMM9  ( void );
84  extern HReg hregAMD64_XMM10 ( void );
85  extern HReg hregAMD64_XMM11 ( void );
86  extern HReg hregAMD64_XMM12 ( void );
87  
88  
89  /* --------- Condition codes, AMD encoding. --------- */
90  
91  typedef
92     enum {
93        Acc_O      = 0,  /* overflow           */
94        Acc_NO     = 1,  /* no overflow        */
95  
96        Acc_B      = 2,  /* below              */
97        Acc_NB     = 3,  /* not below          */
98  
99        Acc_Z      = 4,  /* zero               */
100        Acc_NZ     = 5,  /* not zero           */
101  
102        Acc_BE     = 6,  /* below or equal     */
103        Acc_NBE    = 7,  /* not below or equal */
104  
105        Acc_S      = 8,  /* negative           */
106        Acc_NS     = 9,  /* not negative       */
107  
108        Acc_P      = 10, /* parity even        */
109        Acc_NP     = 11, /* not parity even    */
110  
111        Acc_L      = 12, /* jump less          */
112        Acc_NL     = 13, /* not less           */
113  
114        Acc_LE     = 14, /* less or equal      */
115        Acc_NLE    = 15, /* not less or equal  */
116  
117        Acc_ALWAYS = 16  /* the usual hack     */
118     }
119     AMD64CondCode;
120  
121  extern const HChar* showAMD64CondCode ( AMD64CondCode );
122  
123  
124  /* --------- Memory address expressions (amodes). --------- */
125  
126  typedef
127     enum {
128       Aam_IR,        /* Immediate + Reg */
129       Aam_IRRS       /* Immediate + Reg1 + (Reg2 << Shift) */
130     }
131     AMD64AModeTag;
132  
133  typedef
134     struct {
135        AMD64AModeTag tag;
136        union {
137           struct {
138              UInt imm;
139              HReg reg;
140           } IR;
141           struct {
142              UInt imm;
143              HReg base;
144              HReg index;
145              Int  shift; /* 0, 1, 2 or 3 only */
146           } IRRS;
147        } Aam;
148     }
149     AMD64AMode;
150  
151  extern AMD64AMode* AMD64AMode_IR   ( UInt, HReg );
152  extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
153  
154  extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
155  
156  extern void ppAMD64AMode ( AMD64AMode* );
157  
158  
159  /* --------- Operand, which can be reg, immediate or memory. --------- */
160  
161  typedef
162     enum {
163        Armi_Imm,
164        Armi_Reg,
165        Armi_Mem
166     }
167     AMD64RMITag;
168  
169  typedef
170     struct {
171        AMD64RMITag tag;
172        union {
173           struct {
174              UInt imm32;
175           } Imm;
176           struct {
177              HReg reg;
178           } Reg;
179           struct {
180              AMD64AMode* am;
181           } Mem;
182        }
183        Armi;
184     }
185     AMD64RMI;
186  
187  extern AMD64RMI* AMD64RMI_Imm ( UInt );
188  extern AMD64RMI* AMD64RMI_Reg ( HReg );
189  extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
190  
191  extern void ppAMD64RMI      ( AMD64RMI* );
192  extern void ppAMD64RMI_lo32 ( AMD64RMI* );
193  
194  
195  /* --------- Operand, which can be reg or immediate only. --------- */
196  
197  typedef
198     enum {
199        Ari_Imm,
200        Ari_Reg
201     }
202     AMD64RITag;
203  
204  typedef
205     struct {
206        AMD64RITag tag;
207        union {
208           struct {
209              UInt imm32;
210           } Imm;
211           struct {
212              HReg reg;
213           } Reg;
214        }
215        Ari;
216     }
217     AMD64RI;
218  
219  extern AMD64RI* AMD64RI_Imm ( UInt );
220  extern AMD64RI* AMD64RI_Reg ( HReg );
221  
222  extern void ppAMD64RI ( AMD64RI* );
223  
224  
225  /* --------- Operand, which can be reg or memory only. --------- */
226  
227  typedef
228     enum {
229        Arm_Reg,
230        Arm_Mem
231     }
232     AMD64RMTag;
233  
234  typedef
235     struct {
236        AMD64RMTag tag;
237        union {
238           struct {
239              HReg reg;
240           } Reg;
241           struct {
242              AMD64AMode* am;
243           } Mem;
244        }
245        Arm;
246     }
247     AMD64RM;
248  
249  extern AMD64RM* AMD64RM_Reg ( HReg );
250  extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
251  
252  extern void ppAMD64RM ( AMD64RM* );
253  
254  
255  /* --------- Instructions. --------- */
256  
257  /* --------- */
258  typedef
259     enum {
260        Aun_NEG,
261        Aun_NOT
262     }
263     AMD64UnaryOp;
264  
265  extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
266  
267  
268  /* --------- */
269  typedef
270     enum {
271        Aalu_INVALID,
272        Aalu_MOV,
273        Aalu_CMP,
274        Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
275        Aalu_AND, Aalu_OR, Aalu_XOR,
276        Aalu_MUL
277     }
278     AMD64AluOp;
279  
280  extern const HChar* showAMD64AluOp ( AMD64AluOp );
281  
282  
283  /* --------- */
284  typedef
285     enum {
286        Ash_INVALID,
287        Ash_SHL, Ash_SHR, Ash_SAR
288     }
289     AMD64ShiftOp;
290  
291  extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
292  
293  
294  /* --------- */
295  typedef
296     enum {
297        Afp_INVALID,
298        /* Binary */
299        Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
300        /* Unary */
301        Afp_SQRT,
302        Afp_SIN, Afp_COS, Afp_TAN,
303        Afp_ROUND, Afp_2XM1
304     }
305     A87FpOp;
306  
307  extern const HChar* showA87FpOp ( A87FpOp );
308  
309  
310  /* --------- */
311  typedef
312     enum {
313        Asse_INVALID,
314        /* mov */
315        Asse_MOV,
316        /* Floating point binary */
317        Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
318        Asse_MAXF, Asse_MINF,
319        Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
320        /* Floating point unary */
321        Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
322        /* Bitwise */
323        Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
324        Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
325        Asse_QADD8U, Asse_QADD16U,
326        Asse_QADD8S, Asse_QADD16S,
327        Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
328        Asse_QSUB8U, Asse_QSUB16U,
329        Asse_QSUB8S, Asse_QSUB16S,
330        Asse_MUL16,
331        Asse_MULHI16U,
332        Asse_MULHI16S,
333        Asse_AVG8U, Asse_AVG16U,
334        Asse_MAX16S,
335        Asse_MAX8U,
336        Asse_MIN16S,
337        Asse_MIN8U,
338        Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
339        Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
340        Asse_SHL16, Asse_SHL32, Asse_SHL64,
341        Asse_SHR16, Asse_SHR32, Asse_SHR64,
342        Asse_SAR16, Asse_SAR32,
343        Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
344        Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
345        Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ
346     }
347     AMD64SseOp;
348  
349  extern const HChar* showAMD64SseOp ( AMD64SseOp );
350  
351  
352  /* --------- */
353  typedef
354     enum {
355        Ain_Imm64,       /* Generate 64-bit literal to register */
356        Ain_Alu64R,      /* 64-bit mov/arith/logical, dst=REG */
357        Ain_Alu64M,      /* 64-bit mov/arith/logical, dst=MEM */
358        Ain_Sh64,        /* 64-bit shift/rotate, dst=REG or MEM */
359        Ain_Test64,      /* 64-bit test (AND, set flags, discard result) */
360        Ain_Unary64,     /* 64-bit not and neg */
361        Ain_Lea64,       /* 64-bit compute EA into a reg */
362        Ain_Alu32R,      /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
363        Ain_MulL,        /* widening multiply */
364        Ain_Div,         /* div and mod */
365        Ain_Push,        /* push 64-bit value on stack */
366        Ain_Call,        /* call to address in register */
367        Ain_XDirect,     /* direct transfer to GA */
368        Ain_XIndir,      /* indirect transfer to GA */
369        Ain_XAssisted,   /* assisted transfer to GA */
370        Ain_CMov64,      /* conditional move */
371        Ain_MovxLQ,      /* reg-reg move, zx-ing/sx-ing top half */
372        Ain_LoadEX,      /* mov{s,z}{b,w,l}q from mem to reg */
373        Ain_Store,       /* store 32/16/8 bit value in memory */
374        Ain_Set64,       /* convert condition code to 64-bit value */
375        Ain_Bsfr64,      /* 64-bit bsf/bsr */
376        Ain_MFence,      /* mem fence */
377        Ain_ACAS,        /* 8/16/32/64-bit lock;cmpxchg */
378        Ain_DACAS,       /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
379                            32-bit or 2 x 64-bit only) */
380        Ain_A87Free,     /* free up x87 registers */
381        Ain_A87PushPop,  /* x87 loads/stores */
382        Ain_A87FpOp,     /* x87 operations */
383        Ain_A87LdCW,     /* load x87 control word */
384        Ain_A87StSW,     /* store x87 status word */
385        Ain_LdMXCSR,     /* load %mxcsr */
386        Ain_SseUComIS,   /* ucomisd/ucomiss, then get %rflags into int
387                            register */
388        Ain_SseSI2SF,    /* scalar 32/64 int to 32/64 float conversion */
389        Ain_SseSF2SI,    /* scalar 32/64 float to 32/64 int conversion */
390        Ain_SseSDSS,     /* scalar float32 to/from float64 */
391        Ain_SseLdSt,     /* SSE load/store 32/64/128 bits, no alignment
392                            constraints, upper 96/64/0 bits arbitrary */
393        Ain_SseLdzLO,    /* SSE load low 32/64 bits, zero remainder of reg */
394        Ain_Sse32Fx4,    /* SSE binary, 32Fx4 */
395        Ain_Sse32FLo,    /* SSE binary, 32F in lowest lane only */
396        Ain_Sse64Fx2,    /* SSE binary, 64Fx2 */
397        Ain_Sse64FLo,    /* SSE binary, 64F in lowest lane only */
398        Ain_SseReRg,     /* SSE binary general reg-reg, Re, Rg */
399        Ain_SseCMov,     /* SSE conditional move */
400        Ain_SseShuf,     /* SSE2 shuffle (pshufd) */
401        //uu Ain_AvxLdSt,     /* AVX load/store 256 bits,
402        //uu                     no alignment constraints */
403        //uu Ain_AvxReRg,     /* AVX binary general reg-reg, Re, Rg */
404        Ain_EvCheck,     /* Event check */
405        Ain_ProfInc      /* 64-bit profile counter increment */
406     }
407     AMD64InstrTag;
408  
409  /* Destinations are on the RIGHT (second operand) */
410  
411  typedef
412     struct {
413        AMD64InstrTag tag;
414        union {
415           struct {
416              ULong imm64;
417              HReg  dst;
418           } Imm64;
419           struct {
420              AMD64AluOp op;
421              AMD64RMI*  src;
422              HReg       dst;
423           } Alu64R;
424           struct {
425              AMD64AluOp  op;
426              AMD64RI*    src;
427              AMD64AMode* dst;
428           } Alu64M;
429           struct {
430              AMD64ShiftOp op;
431              UInt         src;  /* shift amount, or 0 means %cl */
432              HReg         dst;
433           } Sh64;
434           struct {
435              UInt   imm32;
436              HReg   dst;
437           } Test64;
438           /* Not and Neg */
439           struct {
440              AMD64UnaryOp op;
441              HReg         dst;
442           } Unary64;
443           /* 64-bit compute EA into a reg */
444           struct {
445              AMD64AMode* am;
446              HReg        dst;
447           } Lea64;
448           /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
449           struct {
450              AMD64AluOp op;
451              AMD64RMI*  src;
452              HReg       dst;
453           } Alu32R;
454           /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
455              r/m64 */
456           struct {
457              Bool     syned;
458              AMD64RM* src;
459           } MulL;
460            /* amd64 div/idiv instruction.  Modifies RDX and RAX and
461  	     reads src. */
462           struct {
463              Bool     syned;
464              Int      sz; /* 4 or 8 only */
465              AMD64RM* src;
466           } Div;
467           struct {
468              AMD64RMI* src;
469           } Push;
470           /* Pseudo-insn.  Call target (an absolute address), on given
471              condition (which could be Xcc_ALWAYS). */
472           struct {
473              AMD64CondCode cond;
474              Addr64        target;
475              Int           regparms; /* 0 .. 6 */
476              RetLoc        rloc;     /* where the return value will be */
477           } Call;
478           /* Update the guest RIP value, then exit requesting to chain
479              to it.  May be conditional. */
480           struct {
481              Addr64        dstGA;    /* next guest address */
482              AMD64AMode*   amRIP;    /* amode in guest state for RIP */
483              AMD64CondCode cond;     /* can be Acc_ALWAYS */
484              Bool          toFastEP; /* chain to the slow or fast point? */
485           } XDirect;
486           /* Boring transfer to a guest address not known at JIT time.
487              Not chainable.  May be conditional. */
488           struct {
489              HReg          dstGA;
490              AMD64AMode*   amRIP;
491              AMD64CondCode cond; /* can be Acc_ALWAYS */
492           } XIndir;
493           /* Assisted transfer to a guest address, most general case.
494              Not chainable.  May be conditional. */
495           struct {
496              HReg          dstGA;
497              AMD64AMode*   amRIP;
498              AMD64CondCode cond; /* can be Acc_ALWAYS */
499              IRJumpKind    jk;
500           } XAssisted;
501           /* Mov src to dst on the given condition, which may not
502              be the bogus Acc_ALWAYS. */
503           struct {
504              AMD64CondCode cond;
505              AMD64RM*      src;
506              HReg          dst;
507           } CMov64;
508           /* reg-reg move, sx-ing/zx-ing top half */
509           struct {
510              Bool syned;
511              HReg src;
512              HReg dst;
513           } MovxLQ;
514           /* Sign/Zero extending loads.  Dst size is always 64 bits. */
515           struct {
516              UChar       szSmall; /* only 1, 2 or 4 */
517              Bool        syned;
518              AMD64AMode* src;
519              HReg        dst;
520           } LoadEX;
521           /* 32/16/8 bit stores. */
522           struct {
523              UChar       sz; /* only 1, 2 or 4 */
524              HReg        src;
525              AMD64AMode* dst;
526           } Store;
527           /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
528           struct {
529              AMD64CondCode cond;
530              HReg          dst;
531           } Set64;
532           /* 64-bit bsf or bsr. */
533           struct {
534              Bool isFwds;
535              HReg src;
536              HReg dst;
537           } Bsfr64;
538           /* Mem fence.  In short, an insn which flushes all preceding
539              loads and stores as much as possible before continuing.
540              On AMD64 we emit a real "mfence". */
541           struct {
542           } MFence;
543           struct {
544              AMD64AMode* addr;
545              UChar       sz; /* 1, 2, 4 or 8 */
546           } ACAS;
547           struct {
548              AMD64AMode* addr;
549              UChar       sz; /* 4 or 8 only */
550           } DACAS;
551  
552           /* --- X87 --- */
553  
554           /* A very minimal set of x87 insns, that operate exactly in a
555              stack-like way so no need to think about x87 registers. */
556  
557           /* Do 'ffree' on %st(7) .. %st(7-nregs) */
558           struct {
559              Int nregs; /* 1 <= nregs <= 7 */
560           } A87Free;
561  
562           /* Push a 32- or 64-bit FP value from memory onto the stack,
563              or move a value from the stack to memory and remove it
564              from the stack. */
565           struct {
566              AMD64AMode* addr;
567              Bool        isPush;
568              UChar       szB; /* 4 or 8 */
569           } A87PushPop;
570  
571           /* Do an operation on the top-of-stack.  This can be unary, in
572              which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
573              %st0, %st1 ). */
574           struct {
575              A87FpOp op;
576           } A87FpOp;
577  
578           /* Load the FPU control word. */
579           struct {
580              AMD64AMode* addr;
581           } A87LdCW;
582  
583           /* Store the FPU status word (fstsw m16) */
584           struct {
585              AMD64AMode* addr;
586           } A87StSW;
587  
588           /* --- SSE --- */
589  
590           /* Load 32 bits into %mxcsr. */
591           struct {
592              AMD64AMode* addr;
593           }
594           LdMXCSR;
595           /* ucomisd/ucomiss, then get %rflags into int register */
596           struct {
597              UChar   sz;   /* 4 or 8 only */
598              HReg    srcL; /* xmm */
599              HReg    srcR; /* xmm */
600              HReg    dst;  /* int */
601           } SseUComIS;
602           /* scalar 32/64 int to 32/64 float conversion */
603           struct {
604              UChar szS; /* 4 or 8 */
605              UChar szD; /* 4 or 8 */
606              HReg  src; /* i class */
607              HReg  dst; /* v class */
608           } SseSI2SF;
609           /* scalar 32/64 float to 32/64 int conversion */
610           struct {
611              UChar szS; /* 4 or 8 */
612              UChar szD; /* 4 or 8 */
613              HReg  src; /* v class */
614              HReg  dst; /* i class */
615           } SseSF2SI;
616           /* scalar float32 to/from float64 */
617           struct {
618              Bool from64; /* True: 64->32; False: 32->64 */
619              HReg src;
620              HReg dst;
621           } SseSDSS;
622           struct {
623              Bool        isLoad;
624              UChar       sz; /* 4, 8 or 16 only */
625              HReg        reg;
626              AMD64AMode* addr;
627           } SseLdSt;
628           struct {
629              Int         sz; /* 4 or 8 only */
630              HReg        reg;
631              AMD64AMode* addr;
632           } SseLdzLO;
633           struct {
634              AMD64SseOp op;
635              HReg       src;
636              HReg       dst;
637           } Sse32Fx4;
638           struct {
639              AMD64SseOp op;
640              HReg       src;
641              HReg       dst;
642           } Sse32FLo;
643           struct {
644              AMD64SseOp op;
645              HReg       src;
646              HReg       dst;
647           } Sse64Fx2;
648           struct {
649              AMD64SseOp op;
650              HReg       src;
651              HReg       dst;
652           } Sse64FLo;
653           struct {
654              AMD64SseOp op;
655              HReg       src;
656              HReg       dst;
657           } SseReRg;
658           /* Mov src to dst on the given condition, which may not
659              be the bogus Xcc_ALWAYS. */
660           struct {
661              AMD64CondCode cond;
662              HReg          src;
663              HReg          dst;
664           } SseCMov;
665           struct {
666              Int    order; /* 0 <= order <= 0xFF */
667              HReg   src;
668              HReg   dst;
669           } SseShuf;
670           //uu struct {
671           //uu    Bool        isLoad;
672           //uu    HReg        reg;
673           //uu    AMD64AMode* addr;
674           //uu } AvxLdSt;
675           //uu struct {
676           //uu    AMD64SseOp op;
677           //uu    HReg       src;
678           //uu    HReg       dst;
679           //uu } AvxReRg;
680           struct {
681              AMD64AMode* amCounter;
682              AMD64AMode* amFailAddr;
683           } EvCheck;
684           struct {
685              /* No fields.  The address of the counter to inc is
686                 installed later, post-translation, by patching it in,
687                 as it is not known at translation time. */
688           } ProfInc;
689  
690        } Ain;
691     }
692     AMD64Instr;
693  
694  extern AMD64Instr* AMD64Instr_Imm64      ( ULong imm64, HReg dst );
695  extern AMD64Instr* AMD64Instr_Alu64R     ( AMD64AluOp, AMD64RMI*, HReg );
696  extern AMD64Instr* AMD64Instr_Alu64M     ( AMD64AluOp, AMD64RI*,  AMD64AMode* );
697  extern AMD64Instr* AMD64Instr_Unary64    ( AMD64UnaryOp op, HReg dst );
698  extern AMD64Instr* AMD64Instr_Lea64      ( AMD64AMode* am, HReg dst );
699  extern AMD64Instr* AMD64Instr_Alu32R     ( AMD64AluOp, AMD64RMI*, HReg );
700  extern AMD64Instr* AMD64Instr_Sh64       ( AMD64ShiftOp, UInt, HReg );
701  extern AMD64Instr* AMD64Instr_Test64     ( UInt imm32, HReg dst );
702  extern AMD64Instr* AMD64Instr_MulL       ( Bool syned, AMD64RM* );
703  extern AMD64Instr* AMD64Instr_Div        ( Bool syned, Int sz, AMD64RM* );
704  extern AMD64Instr* AMD64Instr_Push       ( AMD64RMI* );
705  extern AMD64Instr* AMD64Instr_Call       ( AMD64CondCode, Addr64, Int, RetLoc );
706  extern AMD64Instr* AMD64Instr_XDirect    ( Addr64 dstGA, AMD64AMode* amRIP,
707                                             AMD64CondCode cond, Bool toFastEP );
708  extern AMD64Instr* AMD64Instr_XIndir     ( HReg dstGA, AMD64AMode* amRIP,
709                                             AMD64CondCode cond );
710  extern AMD64Instr* AMD64Instr_XAssisted  ( HReg dstGA, AMD64AMode* amRIP,
711                                             AMD64CondCode cond, IRJumpKind jk );
712  extern AMD64Instr* AMD64Instr_CMov64     ( AMD64CondCode, AMD64RM* src, HReg dst );
713  extern AMD64Instr* AMD64Instr_MovxLQ     ( Bool syned, HReg src, HReg dst );
714  extern AMD64Instr* AMD64Instr_LoadEX     ( UChar szSmall, Bool syned,
715                                             AMD64AMode* src, HReg dst );
716  extern AMD64Instr* AMD64Instr_Store      ( UChar sz, HReg src, AMD64AMode* dst );
717  extern AMD64Instr* AMD64Instr_Set64      ( AMD64CondCode cond, HReg dst );
718  extern AMD64Instr* AMD64Instr_Bsfr64     ( Bool isFwds, HReg src, HReg dst );
719  extern AMD64Instr* AMD64Instr_MFence     ( void );
720  extern AMD64Instr* AMD64Instr_ACAS       ( AMD64AMode* addr, UChar sz );
721  extern AMD64Instr* AMD64Instr_DACAS      ( AMD64AMode* addr, UChar sz );
722  
723  extern AMD64Instr* AMD64Instr_A87Free    ( Int nregs );
724  extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
725  extern AMD64Instr* AMD64Instr_A87FpOp    ( A87FpOp op );
726  extern AMD64Instr* AMD64Instr_A87LdCW    ( AMD64AMode* addr );
727  extern AMD64Instr* AMD64Instr_A87StSW    ( AMD64AMode* addr );
728  extern AMD64Instr* AMD64Instr_LdMXCSR    ( AMD64AMode* );
729  extern AMD64Instr* AMD64Instr_SseUComIS  ( Int sz, HReg srcL, HReg srcR, HReg dst );
730  extern AMD64Instr* AMD64Instr_SseSI2SF   ( Int szS, Int szD, HReg src, HReg dst );
731  extern AMD64Instr* AMD64Instr_SseSF2SI   ( Int szS, Int szD, HReg src, HReg dst );
732  extern AMD64Instr* AMD64Instr_SseSDSS    ( Bool from64, HReg src, HReg dst );
733  extern AMD64Instr* AMD64Instr_SseLdSt    ( Bool isLoad, Int sz, HReg, AMD64AMode* );
734  extern AMD64Instr* AMD64Instr_SseLdzLO   ( Int sz, HReg, AMD64AMode* );
735  extern AMD64Instr* AMD64Instr_Sse32Fx4   ( AMD64SseOp, HReg, HReg );
736  extern AMD64Instr* AMD64Instr_Sse32FLo   ( AMD64SseOp, HReg, HReg );
737  extern AMD64Instr* AMD64Instr_Sse64Fx2   ( AMD64SseOp, HReg, HReg );
738  extern AMD64Instr* AMD64Instr_Sse64FLo   ( AMD64SseOp, HReg, HReg );
739  extern AMD64Instr* AMD64Instr_SseReRg    ( AMD64SseOp, HReg, HReg );
740  extern AMD64Instr* AMD64Instr_SseCMov    ( AMD64CondCode, HReg src, HReg dst );
741  extern AMD64Instr* AMD64Instr_SseShuf    ( Int order, HReg src, HReg dst );
742  //uu extern AMD64Instr* AMD64Instr_AvxLdSt    ( Bool isLoad, HReg, AMD64AMode* );
743  //uu extern AMD64Instr* AMD64Instr_AvxReRg    ( AMD64SseOp, HReg, HReg );
744  extern AMD64Instr* AMD64Instr_EvCheck    ( AMD64AMode* amCounter,
745                                             AMD64AMode* amFailAddr );
746  extern AMD64Instr* AMD64Instr_ProfInc    ( void );
747  
748  
749  extern void ppAMD64Instr ( AMD64Instr*, Bool );
750  
751  /* Some functions that insulate the register allocator from details
752     of the underlying instruction set. */
753  extern void         getRegUsage_AMD64Instr ( HRegUsage*, AMD64Instr*, Bool );
754  extern void         mapRegs_AMD64Instr     ( HRegRemap*, AMD64Instr*, Bool );
755  extern Bool         isMove_AMD64Instr      ( AMD64Instr*, HReg*, HReg* );
756  extern Int          emit_AMD64Instr        ( /*MB_MOD*/Bool* is_profInc,
757                                               UChar* buf, Int nbuf, AMD64Instr* i,
758                                               Bool mode64,
759                                               void* disp_cp_chain_me_to_slowEP,
760                                               void* disp_cp_chain_me_to_fastEP,
761                                               void* disp_cp_xindir,
762                                               void* disp_cp_xassisted );
763  
764  extern void genSpill_AMD64  ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
765                                HReg rreg, Int offset, Bool );
766  extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
767                                HReg rreg, Int offset, Bool );
768  
769  extern void         getAllocableRegs_AMD64 ( Int*, HReg** );
770  extern HInstrArray* iselSB_AMD64           ( IRSB*,
771                                               VexArch,
772                                               VexArchInfo*,
773                                               VexAbiInfo*,
774                                               Int offs_Host_EvC_Counter,
775                                               Int offs_Host_EvC_FailAddr,
776                                               Bool chainingAllowed,
777                                               Bool addProfInc,
778                                               Addr64 max_ga );
779  
780  /* How big is an event check?  This is kind of a kludge because it
781     depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
782     and so assumes that they are both <= 128, and so can use the short
783     offset encoding.  This is all checked with assertions, so in the
784     worst case we will merely assert at startup. */
785  extern Int evCheckSzB_AMD64 ( void );
786  
787  /* Perform a chaining and unchaining of an XDirect jump. */
788  extern VexInvalRange chainXDirect_AMD64 ( void* place_to_chain,
789                                            void* disp_cp_chain_me_EXPECTED,
790                                            void* place_to_jump_to );
791  
792  extern VexInvalRange unchainXDirect_AMD64 ( void* place_to_unchain,
793                                              void* place_to_jump_to_EXPECTED,
794                                              void* disp_cp_chain_me );
795  
796  /* Patch the counter location into an existing ProfInc point. */
797  extern VexInvalRange patchProfInc_AMD64 ( void*  place_to_patch,
798                                            ULong* location_of_counter );
799  
800  
801  #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
802  
803  /*---------------------------------------------------------------*/
804  /*--- end                                   host_amd64_defs.h ---*/
805  /*---------------------------------------------------------------*/
806