1 2 /*---------------------------------------------------------------*/ 3 /*--- begin guest_x86_defs.h ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2013 OpenWorks LLP 11 info@open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34 */ 35 36 /* Only to be used within the guest-x86 directory. */ 37 38 #ifndef __VEX_GUEST_X86_DEFS_H 39 #define __VEX_GUEST_X86_DEFS_H 40 41 #include "libvex_basictypes.h" 42 #include "libvex_guest_x86.h" // VexGuestX86State 43 #include "libvex_emnote.h" // VexEmNote 44 #include "guest_generic_bb_to_IR.h" // DisResult 45 46 /*---------------------------------------------------------*/ 47 /*--- x86 to IR conversion ---*/ 48 /*---------------------------------------------------------*/ 49 50 /* Convert one x86 insn to IR. See the type DisOneInstrFn in 51 bb_to_IR.h. */ 52 extern 53 DisResult disInstr_X86 ( IRSB* irbb, 54 Bool (*resteerOkFn) ( void*, Addr64 ), 55 Bool resteerCisOk, 56 void* callback_opaque, 57 UChar* guest_code, 58 Long delta, 59 Addr64 guest_IP, 60 VexArch guest_arch, 61 VexArchInfo* archinfo, 62 VexAbiInfo* abiinfo, 63 Bool host_bigendian, 64 Bool sigill_diag ); 65 66 /* Used by the optimiser to specialise calls to helpers. */ 67 extern 68 IRExpr* guest_x86_spechelper ( const HChar* function_name, 69 IRExpr** args, 70 IRStmt** precedingStmts, 71 Int n_precedingStmts ); 72 73 /* Describes to the optimiser which part of the guest state require 74 precise memory exceptions. This is logically part of the guest 75 state description. */ 76 extern 77 Bool guest_x86_state_requires_precise_mem_exns ( Int, Int ); 78 79 extern 80 VexGuestLayout x86guest_layout; 81 82 83 /*---------------------------------------------------------*/ 84 /*--- x86 guest helpers ---*/ 85 /*---------------------------------------------------------*/ 86 87 /* --- CLEAN HELPERS --- */ 88 89 extern UInt x86g_calculate_eflags_all ( 90 UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 91 ); 92 93 VEX_REGPARM(3) 94 extern UInt x86g_calculate_eflags_c ( 95 UInt cc_op, UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 96 ); 97 98 extern UInt x86g_calculate_condition ( 99 UInt/*X86Condcode*/ cond, 100 UInt cc_op, 101 UInt cc_dep1, UInt cc_dep2, UInt cc_ndep 102 ); 103 104 extern UInt x86g_calculate_FXAM ( UInt tag, ULong dbl ); 105 106 extern ULong x86g_calculate_RCR ( 107 UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 108 ); 109 extern ULong x86g_calculate_RCL ( 110 UInt arg, UInt rot_amt, UInt eflags_in, UInt sz 111 ); 112 113 extern UInt x86g_calculate_daa_das_aaa_aas ( UInt AX_and_flags, UInt opcode ); 114 115 extern UInt x86g_calculate_aad_aam ( UInt AX_and_flags, UInt opcode ); 116 117 extern ULong x86g_check_fldcw ( UInt fpucw ); 118 119 extern UInt x86g_create_fpucw ( UInt fpround ); 120 121 extern ULong x86g_check_ldmxcsr ( UInt mxcsr ); 122 123 extern UInt x86g_create_mxcsr ( UInt sseround ); 124 125 126 /* Translate a guest virtual_addr into a guest linear address by 127 consulting the supplied LDT/GDT structures. Their representation 128 must be as specified in pub/libvex_guest_x86.h. To indicate a 129 translation failure, 1<<32 is returned. On success, the lower 32 130 bits of the returned result indicate the linear address. 131 */ 132 extern 133 ULong x86g_use_seg_selector ( HWord ldt, HWord gdt, 134 UInt seg_selector, UInt virtual_addr ); 135 136 extern ULong x86g_calculate_mmx_pmaddwd ( ULong, ULong ); 137 extern ULong x86g_calculate_mmx_psadbw ( ULong, ULong ); 138 139 140 /* --- DIRTY HELPERS --- */ 141 142 extern ULong x86g_dirtyhelper_loadF80le ( UInt ); 143 144 extern void x86g_dirtyhelper_storeF80le ( UInt, ULong ); 145 146 extern void x86g_dirtyhelper_CPUID_sse0 ( VexGuestX86State* ); 147 extern void x86g_dirtyhelper_CPUID_mmxext ( VexGuestX86State* ); 148 extern void x86g_dirtyhelper_CPUID_sse1 ( VexGuestX86State* ); 149 extern void x86g_dirtyhelper_CPUID_sse2 ( VexGuestX86State* ); 150 151 extern void x86g_dirtyhelper_FINIT ( VexGuestX86State* ); 152 153 extern void x86g_dirtyhelper_FXSAVE ( VexGuestX86State*, HWord ); 154 extern void x86g_dirtyhelper_FSAVE ( VexGuestX86State*, HWord ); 155 extern void x86g_dirtyhelper_FSTENV ( VexGuestX86State*, HWord ); 156 157 extern ULong x86g_dirtyhelper_RDTSC ( void ); 158 159 extern UInt x86g_dirtyhelper_IN ( UInt portno, UInt sz/*1,2 or 4*/ ); 160 extern void x86g_dirtyhelper_OUT ( UInt portno, UInt data, 161 UInt sz/*1,2 or 4*/ ); 162 163 extern void x86g_dirtyhelper_SxDT ( void* address, 164 UInt op /* 0 or 1 */ ); 165 166 extern VexEmNote 167 x86g_dirtyhelper_FXRSTOR ( VexGuestX86State*, HWord ); 168 169 extern VexEmNote 170 x86g_dirtyhelper_FRSTOR ( VexGuestX86State*, HWord ); 171 172 extern VexEmNote 173 x86g_dirtyhelper_FLDENV ( VexGuestX86State*, HWord ); 174 175 176 /*---------------------------------------------------------*/ 177 /*--- Condition code stuff ---*/ 178 /*---------------------------------------------------------*/ 179 180 /* eflags masks */ 181 #define X86G_CC_SHIFT_O 11 182 #define X86G_CC_SHIFT_S 7 183 #define X86G_CC_SHIFT_Z 6 184 #define X86G_CC_SHIFT_A 4 185 #define X86G_CC_SHIFT_C 0 186 #define X86G_CC_SHIFT_P 2 187 188 #define X86G_CC_MASK_O (1 << X86G_CC_SHIFT_O) 189 #define X86G_CC_MASK_S (1 << X86G_CC_SHIFT_S) 190 #define X86G_CC_MASK_Z (1 << X86G_CC_SHIFT_Z) 191 #define X86G_CC_MASK_A (1 << X86G_CC_SHIFT_A) 192 #define X86G_CC_MASK_C (1 << X86G_CC_SHIFT_C) 193 #define X86G_CC_MASK_P (1 << X86G_CC_SHIFT_P) 194 195 /* FPU flag masks */ 196 #define X86G_FC_SHIFT_C3 14 197 #define X86G_FC_SHIFT_C2 10 198 #define X86G_FC_SHIFT_C1 9 199 #define X86G_FC_SHIFT_C0 8 200 201 #define X86G_FC_MASK_C3 (1 << X86G_FC_SHIFT_C3) 202 #define X86G_FC_MASK_C2 (1 << X86G_FC_SHIFT_C2) 203 #define X86G_FC_MASK_C1 (1 << X86G_FC_SHIFT_C1) 204 #define X86G_FC_MASK_C0 (1 << X86G_FC_SHIFT_C0) 205 206 207 /* %EFLAGS thunk descriptors. A four-word thunk is used to record 208 details of the most recent flag-setting operation, so the flags can 209 be computed later if needed. It is possible to do this a little 210 more efficiently using a 3-word thunk, but that makes it impossible 211 to describe the flag data dependencies sufficiently accurately for 212 Memcheck. Hence 4 words are used, with minimal loss of efficiency. 213 214 The four words are: 215 216 CC_OP, which describes the operation. 217 218 CC_DEP1 and CC_DEP2. These are arguments to the operation. 219 We want Memcheck to believe that the resulting flags are 220 data-dependent on both CC_DEP1 and CC_DEP2, hence the 221 name DEP. 222 223 CC_NDEP. This is a 3rd argument to the operation which is 224 sometimes needed. We arrange things so that Memcheck does 225 not believe the resulting flags are data-dependent on CC_NDEP 226 ("not dependent"). 227 228 To make Memcheck believe that (the definedness of) the encoded 229 flags depends only on (the definedness of) CC_DEP1 and CC_DEP2 230 requires two things: 231 232 (1) In the guest state layout info (x86guest_layout), CC_OP and 233 CC_NDEP are marked as always defined. 234 235 (2) When passing the thunk components to an evaluation function 236 (calculate_condition, calculate_eflags, calculate_eflags_c) the 237 IRCallee's mcx_mask must be set so as to exclude from 238 consideration all passed args except CC_DEP1 and CC_DEP2. 239 240 Strictly speaking only (2) is necessary for correctness. However, 241 (1) helps efficiency in that since (2) means we never ask about the 242 definedness of CC_OP or CC_NDEP, we may as well not even bother to 243 track their definedness. 244 245 When building the thunk, it is always necessary to write words into 246 CC_DEP1 and CC_DEP2, even if those args are not used given the 247 CC_OP field (eg, CC_DEP2 is not used if CC_OP is CC_LOGIC1/2/4). 248 This is important because otherwise Memcheck could give false 249 positives as it does not understand the relationship between the 250 CC_OP field and CC_DEP1 and CC_DEP2, and so believes that the 251 definedness of the stored flags always depends on both CC_DEP1 and 252 CC_DEP2. 253 254 However, it is only necessary to set CC_NDEP when the CC_OP value 255 requires it, because Memcheck ignores CC_NDEP, and the evaluation 256 functions do understand the CC_OP fields and will only examine 257 CC_NDEP for suitable values of CC_OP. 258 259 A summary of the field usages is: 260 261 Operation DEP1 DEP2 NDEP 262 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 263 264 add/sub/mul first arg second arg unused 265 266 adc/sbb first arg (second arg) 267 XOR old_carry old_carry 268 269 and/or/xor result zero unused 270 271 inc/dec result zero old_carry 272 273 shl/shr/sar result subshifted- unused 274 result 275 276 rol/ror result zero old_flags 277 278 copy old_flags zero unused. 279 280 281 Therefore Memcheck will believe the following: 282 283 * add/sub/mul -- definedness of result flags depends on definedness 284 of both args. 285 286 * adc/sbb -- definedness of result flags depends on definedness of 287 both args and definedness of the old C flag. Because only two 288 DEP fields are available, the old C flag is XOR'd into the second 289 arg so that Memcheck sees the data dependency on it. That means 290 the NDEP field must contain a second copy of the old C flag 291 so that the evaluation functions can correctly recover the second 292 arg. 293 294 * and/or/xor are straightforward -- definedness of result flags 295 depends on definedness of result value. 296 297 * inc/dec -- definedness of result flags depends only on 298 definedness of result. This isn't really true -- it also depends 299 on the old C flag. However, we don't want Memcheck to see that, 300 and so the old C flag must be passed in NDEP and not in DEP2. 301 It's inconceivable that a compiler would generate code that puts 302 the C flag in an undefined state, then does an inc/dec, which 303 leaves C unchanged, and then makes a conditional jump/move based 304 on C. So our fiction seems a good approximation. 305 306 * shl/shr/sar -- straightforward, again, definedness of result 307 flags depends on definedness of result value. The subshifted 308 value (value shifted one less) is also needed, but its 309 definedness is the same as the definedness of the shifted value. 310 311 * rol/ror -- these only set O and C, and leave A Z C P alone. 312 However it seems prudent (as per inc/dec) to say the definedness 313 of all resulting flags depends on the definedness of the result, 314 hence the old flags must go in as NDEP and not DEP2. 315 316 * rcl/rcr are too difficult to do in-line, and so are done by a 317 helper function. They are not part of this scheme. The helper 318 function takes the value to be rotated, the rotate amount and the 319 old flags, and returns the new flags and the rotated value. 320 Since the helper's mcx_mask does not have any set bits, Memcheck 321 will lazily propagate undefinedness from any of the 3 args into 322 both results (flags and actual value). 323 */ 324 enum { 325 X86G_CC_OP_COPY=0, /* DEP1 = current flags, DEP2 = 0, NDEP = unused */ 326 /* just copy DEP1 to output */ 327 328 X86G_CC_OP_ADDB, /* 1 */ 329 X86G_CC_OP_ADDW, /* 2 DEP1 = argL, DEP2 = argR, NDEP = unused */ 330 X86G_CC_OP_ADDL, /* 3 */ 331 332 X86G_CC_OP_SUBB, /* 4 */ 333 X86G_CC_OP_SUBW, /* 5 DEP1 = argL, DEP2 = argR, NDEP = unused */ 334 X86G_CC_OP_SUBL, /* 6 */ 335 336 X86G_CC_OP_ADCB, /* 7 */ 337 X86G_CC_OP_ADCW, /* 8 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 338 X86G_CC_OP_ADCL, /* 9 */ 339 340 X86G_CC_OP_SBBB, /* 10 */ 341 X86G_CC_OP_SBBW, /* 11 DEP1 = argL, DEP2 = argR ^ oldCarry, NDEP = oldCarry */ 342 X86G_CC_OP_SBBL, /* 12 */ 343 344 X86G_CC_OP_LOGICB, /* 13 */ 345 X86G_CC_OP_LOGICW, /* 14 DEP1 = result, DEP2 = 0, NDEP = unused */ 346 X86G_CC_OP_LOGICL, /* 15 */ 347 348 X86G_CC_OP_INCB, /* 16 */ 349 X86G_CC_OP_INCW, /* 17 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 350 X86G_CC_OP_INCL, /* 18 */ 351 352 X86G_CC_OP_DECB, /* 19 */ 353 X86G_CC_OP_DECW, /* 20 DEP1 = result, DEP2 = 0, NDEP = oldCarry (0 or 1) */ 354 X86G_CC_OP_DECL, /* 21 */ 355 356 X86G_CC_OP_SHLB, /* 22 DEP1 = res, DEP2 = res', NDEP = unused */ 357 X86G_CC_OP_SHLW, /* 23 where res' is like res but shifted one bit less */ 358 X86G_CC_OP_SHLL, /* 24 */ 359 360 X86G_CC_OP_SHRB, /* 25 DEP1 = res, DEP2 = res', NDEP = unused */ 361 X86G_CC_OP_SHRW, /* 26 where res' is like res but shifted one bit less */ 362 X86G_CC_OP_SHRL, /* 27 */ 363 364 X86G_CC_OP_ROLB, /* 28 */ 365 X86G_CC_OP_ROLW, /* 29 DEP1 = res, DEP2 = 0, NDEP = old flags */ 366 X86G_CC_OP_ROLL, /* 30 */ 367 368 X86G_CC_OP_RORB, /* 31 */ 369 X86G_CC_OP_RORW, /* 32 DEP1 = res, DEP2 = 0, NDEP = old flags */ 370 X86G_CC_OP_RORL, /* 33 */ 371 372 X86G_CC_OP_UMULB, /* 34 */ 373 X86G_CC_OP_UMULW, /* 35 DEP1 = argL, DEP2 = argR, NDEP = unused */ 374 X86G_CC_OP_UMULL, /* 36 */ 375 376 X86G_CC_OP_SMULB, /* 37 */ 377 X86G_CC_OP_SMULW, /* 38 DEP1 = argL, DEP2 = argR, NDEP = unused */ 378 X86G_CC_OP_SMULL, /* 39 */ 379 380 X86G_CC_OP_NUMBER 381 }; 382 383 typedef 384 enum { 385 X86CondO = 0, /* overflow */ 386 X86CondNO = 1, /* no overflow */ 387 388 X86CondB = 2, /* below */ 389 X86CondNB = 3, /* not below */ 390 391 X86CondZ = 4, /* zero */ 392 X86CondNZ = 5, /* not zero */ 393 394 X86CondBE = 6, /* below or equal */ 395 X86CondNBE = 7, /* not below or equal */ 396 397 X86CondS = 8, /* negative */ 398 X86CondNS = 9, /* not negative */ 399 400 X86CondP = 10, /* parity even */ 401 X86CondNP = 11, /* not parity even */ 402 403 X86CondL = 12, /* jump less */ 404 X86CondNL = 13, /* not less */ 405 406 X86CondLE = 14, /* less or equal */ 407 X86CondNLE = 15, /* not less or equal */ 408 409 X86CondAlways = 16 /* HACK */ 410 } 411 X86Condcode; 412 413 #endif /* ndef __VEX_GUEST_X86_DEFS_H */ 414 415 /*---------------------------------------------------------------*/ 416 /*--- end guest_x86_defs.h ---*/ 417 /*---------------------------------------------------------------*/ 418