• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*---------------------------------------------------------------*/
3 /*--- begin                                guest_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2004-2011 OpenWorks LLP
11       info@open-works.net
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
26    02110-1301, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 
30    Neither the names of the U.S. Department of Energy nor the
31    University of California nor the names of its contributors may be
32    used to endorse or promote products derived from this software
33    without prior written permission.
34 */
35 
36 /* Only to be used within the guest-amd64 directory. */
37 
38 #ifndef __VEX_GUEST_AMD64_DEFS_H
39 #define __VEX_GUEST_AMD64_DEFS_H
40 
41 
42 /*---------------------------------------------------------*/
43 /*--- amd64 to IR conversion                            ---*/
44 /*---------------------------------------------------------*/
45 
46 /* Convert one amd64 insn to IR.  See the type DisOneInstrFn in
47    bb_to_IR.h. */
48 extern
49 DisResult disInstr_AMD64 ( IRSB*        irbb,
50                            Bool         put_IP,
51                            Bool         (*resteerOkFn) ( void*, Addr64 ),
52                            Bool         resteerCisOk,
53                            void*        callback_opaque,
54                            UChar*       guest_code,
55                            Long         delta,
56                            Addr64       guest_IP,
57                            VexArch      guest_arch,
58                            VexArchInfo* archinfo,
59                            VexAbiInfo*  abiinfo,
60                            Bool         host_bigendian );
61 
62 /* Used by the optimiser to specialise calls to helpers. */
63 extern
64 IRExpr* guest_amd64_spechelper ( HChar*   function_name,
65                                  IRExpr** args,
66                                  IRStmt** precedingStmts,
67                                  Int      n_precedingStmts );
68 
69 /* Describes to the optimiser which part of the guest state require
70    precise memory exceptions.  This is logically part of the guest
71    state description. */
72 extern
73 Bool guest_amd64_state_requires_precise_mem_exns ( Int, Int );
74 
75 extern
76 VexGuestLayout amd64guest_layout;
77 
78 
79 /*---------------------------------------------------------*/
80 /*--- amd64 guest helpers                               ---*/
81 /*---------------------------------------------------------*/
82 
83 /* --- CLEAN HELPERS --- */
84 
85 extern ULong amd64g_calculate_rflags_all (
86                 ULong cc_op,
87                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
88              );
89 
90 extern ULong amd64g_calculate_rflags_c (
91                 ULong cc_op,
92                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
93              );
94 
95 extern ULong amd64g_calculate_condition (
96                 ULong/*AMD64Condcode*/ cond,
97                 ULong cc_op,
98                 ULong cc_dep1, ULong cc_dep2, ULong cc_ndep
99              );
100 
101 extern ULong amd64g_calculate_FXAM ( ULong tag, ULong dbl );
102 
103 extern ULong amd64g_calculate_RCR  (
104                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
105              );
106 
107 extern ULong amd64g_calculate_RCL  (
108                 ULong arg, ULong rot_amt, ULong rflags_in, Long sz
109              );
110 
111 extern ULong amd64g_calculate_pclmul(ULong s1, ULong s2, ULong which);
112 
113 extern ULong amd64g_check_fldcw ( ULong fpucw );
114 
115 extern ULong amd64g_create_fpucw ( ULong fpround );
116 
117 extern ULong amd64g_check_ldmxcsr ( ULong mxcsr );
118 
119 extern ULong amd64g_create_mxcsr ( ULong sseround );
120 
121 extern VexEmWarn amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
122 
123 extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
124 
125 /* Translate a guest virtual_addr into a guest linear address by
126    consulting the supplied LDT/GDT structures.  Their representation
127    must be as specified in pub/libvex_guest_amd64.h.  To indicate a
128    translation failure, 1<<32 is returned.  On success, the lower 32
129    bits of the returned result indicate the linear address.
130 */
131 //extern
132 //ULong amd64g_use_seg_selector ( HWord ldt, HWord gdt,
133 //                              UInt seg_selector, UInt virtual_addr );
134 
135 extern ULong amd64g_calculate_mmx_pmaddwd  ( ULong, ULong );
136 extern ULong amd64g_calculate_mmx_psadbw   ( ULong, ULong );
137 extern ULong amd64g_calculate_mmx_pmovmskb ( ULong );
138 extern ULong amd64g_calculate_sse_pmovmskb ( ULong w64hi, ULong w64lo );
139 
140 extern ULong amd64g_calc_crc32b ( ULong crcIn, ULong b );
141 extern ULong amd64g_calc_crc32w ( ULong crcIn, ULong w );
142 extern ULong amd64g_calc_crc32l ( ULong crcIn, ULong l );
143 extern ULong amd64g_calc_crc32q ( ULong crcIn, ULong q );
144 
145 /* --- DIRTY HELPERS --- */
146 
147 extern ULong amd64g_dirtyhelper_loadF80le  ( ULong/*addr*/ );
148 
149 extern void  amd64g_dirtyhelper_storeF80le ( ULong/*addr*/, ULong/*data*/ );
150 
151 extern void  amd64g_dirtyhelper_CPUID_baseline ( VexGuestAMD64State* st );
152 extern void  amd64g_dirtyhelper_CPUID_sse3_and_cx16 ( VexGuestAMD64State* st );
153 extern void  amd64g_dirtyhelper_CPUID_sse42_and_cx16 ( VexGuestAMD64State* st );
154 
155 extern void  amd64g_dirtyhelper_FINIT ( VexGuestAMD64State* );
156 
157 extern void      amd64g_dirtyhelper_FXSAVE  ( VexGuestAMD64State*, HWord );
158 extern VexEmWarn amd64g_dirtyhelper_FXRSTOR ( VexGuestAMD64State*, HWord );
159 
160 extern ULong amd64g_dirtyhelper_RDTSC ( void );
161 
162 extern ULong amd64g_dirtyhelper_IN  ( ULong portno, ULong sz/*1,2 or 4*/ );
163 extern void  amd64g_dirtyhelper_OUT ( ULong portno, ULong data,
164                                       ULong sz/*1,2 or 4*/ );
165 
166 extern void amd64g_dirtyhelper_SxDT ( void* address,
167                                       ULong op /* 0 or 1 */ );
168 
169 /* Helps with PCMP{I,E}STR{I,M}.
170 
171    CALLED FROM GENERATED CODE: DIRTY HELPER(s).  (But not really,
172    actually it could be a clean helper, but for the fact that we can't
173    pass by value 2 x V128 to a clean helper, nor have one returned.)
174    Reads guest state, writes to guest state for the xSTRM cases, no
175    accesses of memory, is a pure function.
176 
177    opc_and_imm contains (4th byte of opcode << 8) | the-imm8-byte so
178    the callee knows which I/E and I/M variant it is dealing with and
179    what the specific operation is.  4th byte of opcode is in the range
180    0x60 to 0x63:
181        istri  66 0F 3A 63
182        istrm  66 0F 3A 62
183        estri  66 0F 3A 61
184        estrm  66 0F 3A 60
185 
186    gstOffL and gstOffR are the guest state offsets for the two XMM
187    register inputs.  We never have to deal with the memory case since
188    that is handled by pre-loading the relevant value into the fake
189    XMM16 register.
190 
191    For ESTRx variants, edxIN and eaxIN hold the values of those two
192    registers.
193 
194    In all cases, the bottom 16 bits of the result contain the new
195    OSZACP %rflags values.  For xSTRI variants, bits[31:16] of the
196    result hold the new %ecx value.  For xSTRM variants, the helper
197    writes the result directly to the guest XMM0.
198 
199    Declarable side effects: in all cases, reads guest state at
200    [gstOffL, +16) and [gstOffR, +16).  For xSTRM variants, also writes
201    guest_XMM0.
202 
203    Is expected to be called with opc_and_imm combinations which have
204    actually been validated, and will assert if otherwise.  The front
205    end should ensure we're only called with verified values.
206 */
207 extern ULong amd64g_dirtyhelper_PCMPxSTRx (
208           VexGuestAMD64State*,
209           HWord opc4_and_imm,
210           HWord gstOffL, HWord gstOffR,
211           HWord edxIN, HWord eaxIN
212        );
213 
214 
215 //extern void  amd64g_dirtyhelper_CPUID_sse0 ( VexGuestAMD64State* );
216 //extern void  amd64g_dirtyhelper_CPUID_sse1 ( VexGuestAMD64State* );
217 //extern void  amd64g_dirtyhelper_CPUID_sse2 ( VexGuestAMD64State* );
218 
219 //extern void  amd64g_dirtyhelper_FSAVE ( VexGuestAMD64State*, HWord );
220 
221 //extern VexEmWarn
222 //            amd64g_dirtyhelper_FRSTOR ( VexGuestAMD64State*, HWord );
223 
224 //extern void amd64g_dirtyhelper_FSTENV ( VexGuestAMD64State*, HWord );
225 
226 //extern VexEmWarn
227 //            amd64g_dirtyhelper_FLDENV ( VexGuestAMD64State*, HWord );
228 
229 
230 
231 /*---------------------------------------------------------*/
232 /*--- Condition code stuff                              ---*/
233 /*---------------------------------------------------------*/
234 
235 /* rflags masks */
236 #define AMD64G_CC_SHIFT_O   11
237 #define AMD64G_CC_SHIFT_S   7
238 #define AMD64G_CC_SHIFT_Z   6
239 #define AMD64G_CC_SHIFT_A   4
240 #define AMD64G_CC_SHIFT_C   0
241 #define AMD64G_CC_SHIFT_P   2
242 
243 #define AMD64G_CC_MASK_O    (1ULL << AMD64G_CC_SHIFT_O)
244 #define AMD64G_CC_MASK_S    (1ULL << AMD64G_CC_SHIFT_S)
245 #define AMD64G_CC_MASK_Z    (1ULL << AMD64G_CC_SHIFT_Z)
246 #define AMD64G_CC_MASK_A    (1ULL << AMD64G_CC_SHIFT_A)
247 #define AMD64G_CC_MASK_C    (1ULL << AMD64G_CC_SHIFT_C)
248 #define AMD64G_CC_MASK_P    (1ULL << AMD64G_CC_SHIFT_P)
249 
250 /* FPU flag masks */
251 #define AMD64G_FC_SHIFT_C3   14
252 #define AMD64G_FC_SHIFT_C2   10
253 #define AMD64G_FC_SHIFT_C1   9
254 #define AMD64G_FC_SHIFT_C0   8
255 
256 #define AMD64G_FC_MASK_C3    (1ULL << AMD64G_FC_SHIFT_C3)
257 #define AMD64G_FC_MASK_C2    (1ULL << AMD64G_FC_SHIFT_C2)
258 #define AMD64G_FC_MASK_C1    (1ULL << AMD64G_FC_SHIFT_C1)
259 #define AMD64G_FC_MASK_C0    (1ULL << AMD64G_FC_SHIFT_C0)
260 
261 
262 /* %RFLAGS thunk descriptors.  A four-word thunk is used to record
263    details of the most recent flag-setting operation, so the flags can
264    be computed later if needed.  It is possible to do this a little
265    more efficiently using a 3-word thunk, but that makes it impossible
266    to describe the flag data dependencies sufficiently accurately for
267    Memcheck.  Hence 4 words are used, with minimal loss of efficiency.
268 
269    The four words are:
270 
271       CC_OP, which describes the operation.
272 
273       CC_DEP1 and CC_DEP2.  These are arguments to the operation.
274          We want Memcheck to believe that the resulting flags are
275          data-dependent on both CC_DEP1 and CC_DEP2, hence the
276          name DEP.
277 
278       CC_NDEP.  This is a 3rd argument to the operation which is
279          sometimes needed.  We arrange things so that Memcheck does
280          not believe the resulting flags are data-dependent on CC_NDEP
281          ("not dependent").
282 
283    To make Memcheck believe that (the definedness of) the encoded
284    flags depends only on (the definedness of) CC_DEP1 and CC_DEP2
285    requires two things:
286 
287    (1) In the guest state layout info (amd64guest_layout), CC_OP and
288        CC_NDEP are marked as always defined.
289 
290    (2) When passing the thunk components to an evaluation function
291        (calculate_condition, calculate_eflags, calculate_eflags_c) the
292        IRCallee's mcx_mask must be set so as to exclude from
293        consideration all passed args except CC_DEP1 and CC_DEP2.
294 
295    Strictly speaking only (2) is necessary for correctness.  However,
296    (1) helps efficiency in that since (2) means we never ask about the
297    definedness of CC_OP or CC_NDEP, we may as well not even bother to
298    track their definedness.
299 
300    When building the thunk, it is always necessary to write words into
301    CC_DEP1 and CC_DEP2, even if those args are not used given the
302    CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4).
303    This is important because otherwise Memcheck could give false
304    positives as it does not understand the relationship between the
305    CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the
306    definedness of the stored flags always depends on both CC_DEP1 and
307    CC_DEP2.
308 
309    However, it is only necessary to set CC_NDEP when the CC_OP value
310    requires it, because Memcheck ignores CC_NDEP, and the evaluation
311    functions do understand the CC_OP fields and will only examine
312    CC_NDEP for suitable values of CC_OP.
313 
314    A summary of the field usages is:
315 
316    Operation          DEP1               DEP2               NDEP
317    ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
318 
319    add/sub/mul        first arg          second arg         unused
320 
321    adc/sbb            first arg          (second arg)
322                                          XOR old_carry      old_carry
323 
324    and/or/xor         result             zero               unused
325 
326    inc/dec            result             zero               old_carry
327 
328    shl/shr/sar        result             subshifted-        unused
329                                          result
330 
331    rol/ror            result             zero               old_flags
332 
333    copy               old_flags          zero               unused.
334 
335 
336    Therefore Memcheck will believe the following:
337 
338    * add/sub/mul -- definedness of result flags depends on definedness
339      of both args.
340 
341    * adc/sbb -- definedness of result flags depends on definedness of
342      both args and definedness of the old C flag.  Because only two
343      DEP fields are available, the old C flag is XOR'd into the second
344      arg so that Memcheck sees the data dependency on it.  That means
345      the NDEP field must contain a second copy of the old C flag
346      so that the evaluation functions can correctly recover the second
347      arg.
348 
349    * and/or/xor are straightforward -- definedness of result flags
350      depends on definedness of result value.
351 
352    * inc/dec -- definedness of result flags depends only on
353      definedness of result.  This isn't really true -- it also depends
354      on the old C flag.  However, we don't want Memcheck to see that,
355      and so the old C flag must be passed in NDEP and not in DEP2.
356      It's inconceivable that a compiler would generate code that puts
357      the C flag in an undefined state, then does an inc/dec, which
358      leaves C unchanged, and then makes a conditional jump/move based
359      on C.  So our fiction seems a good approximation.
360 
361    * shl/shr/sar -- straightforward, again, definedness of result
362      flags depends on definedness of result value.  The subshifted
363      value (value shifted one less) is also needed, but its
364      definedness is the same as the definedness of the shifted value.
365 
366    * rol/ror -- these only set O and C, and leave A Z C P alone.
367      However it seems prudent (as per inc/dec) to say the definedness
368      of all resulting flags depends on the definedness of the result,
369      hence the old flags must go in as NDEP and not DEP2.
370 
371    * rcl/rcr are too difficult to do in-line, and so are done by a
372      helper function.  They are not part of this scheme.  The helper
373      function takes the value to be rotated, the rotate amount and the
374      old flags, and returns the new flags and the rotated value.
375      Since the helper's mcx_mask does not have any set bits, Memcheck
376      will lazily propagate undefinedness from any of the 3 args into
377      both results (flags and actual value).
378 */
379 enum {
380     AMD64G_CC_OP_COPY=0,  /* DEP1 = current flags, DEP2 = 0, NDEP = unused */
381                           /* just copy DEP1 to output */
382 
383     AMD64G_CC_OP_ADDB,    /* 1 */
384     AMD64G_CC_OP_ADDW,    /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */
385     AMD64G_CC_OP_ADDL,    /* 3 */
386     AMD64G_CC_OP_ADDQ,    /* 4 */
387 
388     AMD64G_CC_OP_SUBB,    /* 5 */
389     AMD64G_CC_OP_SUBW,    /* 6 DEP1 = argL, DEP2 = argR, NDEP = unused */
390     AMD64G_CC_OP_SUBL,    /* 7 */
391     AMD64G_CC_OP_SUBQ,    /* 8 */
392 
393     AMD64G_CC_OP_ADCB,    /* 9 */
394     AMD64G_CC_OP_ADCW,    /* 10 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
395     AMD64G_CC_OP_ADCL,    /* 11 */
396     AMD64G_CC_OP_ADCQ,    /* 12 */
397 
398     AMD64G_CC_OP_SBBB,    /* 13 */
399     AMD64G_CC_OP_SBBW,    /* 14 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */
400     AMD64G_CC_OP_SBBL,    /* 15 */
401     AMD64G_CC_OP_SBBQ,    /* 16 */
402 
403     AMD64G_CC_OP_LOGICB,  /* 17 */
404     AMD64G_CC_OP_LOGICW,  /* 18 DEP1 = result, DEP2 = 0, NDEP = unused */
405     AMD64G_CC_OP_LOGICL,  /* 19 */
406     AMD64G_CC_OP_LOGICQ,  /* 20 */
407 
408     AMD64G_CC_OP_INCB,    /* 21 */
409     AMD64G_CC_OP_INCW,    /* 22 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
410     AMD64G_CC_OP_INCL,    /* 23 */
411     AMD64G_CC_OP_INCQ,    /* 24 */
412 
413     AMD64G_CC_OP_DECB,    /* 25 */
414     AMD64G_CC_OP_DECW,    /* 26 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */
415     AMD64G_CC_OP_DECL,    /* 27 */
416     AMD64G_CC_OP_DECQ,    /* 28 */
417 
418     AMD64G_CC_OP_SHLB,    /* 29 DEP1 = res, DEP2 = res', NDEP = unused */
419     AMD64G_CC_OP_SHLW,    /* 30 where res' is like res but shifted one bit less */
420     AMD64G_CC_OP_SHLL,    /* 31 */
421     AMD64G_CC_OP_SHLQ,    /* 32 */
422 
423     AMD64G_CC_OP_SHRB,    /* 33 DEP1 = res, DEP2 = res', NDEP = unused */
424     AMD64G_CC_OP_SHRW,    /* 34 where res' is like res but shifted one bit less */
425     AMD64G_CC_OP_SHRL,    /* 35 */
426     AMD64G_CC_OP_SHRQ,    /* 36 */
427 
428     AMD64G_CC_OP_ROLB,    /* 37 */
429     AMD64G_CC_OP_ROLW,    /* 38 DEP1 = res, DEP2 = 0, NDEP = old flags */
430     AMD64G_CC_OP_ROLL,    /* 39 */
431     AMD64G_CC_OP_ROLQ,    /* 40 */
432 
433     AMD64G_CC_OP_RORB,    /* 41 */
434     AMD64G_CC_OP_RORW,    /* 42 DEP1 = res, DEP2 = 0, NDEP = old flags */
435     AMD64G_CC_OP_RORL,    /* 43 */
436     AMD64G_CC_OP_RORQ,    /* 44 */
437 
438     AMD64G_CC_OP_UMULB,   /* 45 */
439     AMD64G_CC_OP_UMULW,   /* 46 DEP1 = argL, DEP2 = argR, NDEP = unused */
440     AMD64G_CC_OP_UMULL,   /* 47 */
441     AMD64G_CC_OP_UMULQ,   /* 48 */
442 
443     AMD64G_CC_OP_SMULB,   /* 49 */
444     AMD64G_CC_OP_SMULW,   /* 50 DEP1 = argL, DEP2 = argR, NDEP = unused */
445     AMD64G_CC_OP_SMULL,   /* 51 */
446     AMD64G_CC_OP_SMULQ,   /* 52 */
447 
448     AMD64G_CC_OP_NUMBER
449 };
450 
451 typedef
452    enum {
453       AMD64CondO      = 0,  /* overflow           */
454       AMD64CondNO     = 1,  /* no overflow        */
455 
456       AMD64CondB      = 2,  /* below              */
457       AMD64CondNB     = 3,  /* not below          */
458 
459       AMD64CondZ      = 4,  /* zero               */
460       AMD64CondNZ     = 5,  /* not zero           */
461 
462       AMD64CondBE     = 6,  /* below or equal     */
463       AMD64CondNBE    = 7,  /* not below or equal */
464 
465       AMD64CondS      = 8,  /* negative           */
466       AMD64CondNS     = 9,  /* not negative       */
467 
468       AMD64CondP      = 10, /* parity even        */
469       AMD64CondNP     = 11, /* not parity even    */
470 
471       AMD64CondL      = 12, /* jump less          */
472       AMD64CondNL     = 13, /* not less           */
473 
474       AMD64CondLE     = 14, /* less or equal      */
475       AMD64CondNLE    = 15, /* not less or equal  */
476 
477       AMD64CondAlways = 16  /* HACK */
478    }
479    AMD64Condcode;
480 
481 #endif /* ndef __VEX_GUEST_AMD64_DEFS_H */
482 
483 /*---------------------------------------------------------------*/
484 /*--- end                                  guest_amd64_defs.h ---*/
485 /*---------------------------------------------------------------*/
486