1 2 /*---------------------------------------------------------------*/ 3 /*--- begin host_x86_defs.h ---*/ 4 /*---------------------------------------------------------------*/ 5 6 /* 7 This file is part of Valgrind, a dynamic binary instrumentation 8 framework. 9 10 Copyright (C) 2004-2012 OpenWorks LLP 11 info@open-works.net 12 13 This program is free software; you can redistribute it and/or 14 modify it under the terms of the GNU General Public License as 15 published by the Free Software Foundation; either version 2 of the 16 License, or (at your option) any later version. 17 18 This program is distributed in the hope that it will be useful, but 19 WITHOUT ANY WARRANTY; without even the implied warranty of 20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 21 General Public License for more details. 22 23 You should have received a copy of the GNU General Public License 24 along with this program; if not, write to the Free Software 25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 26 02110-1301, USA. 27 28 The GNU General Public License is contained in the file COPYING. 29 30 Neither the names of the U.S. Department of Energy nor the 31 University of California nor the names of its contributors may be 32 used to endorse or promote products derived from this software 33 without prior written permission. 34 */ 35 36 #ifndef __VEX_HOST_X86_DEFS_H 37 #define __VEX_HOST_X86_DEFS_H 38 39 40 /* --------- Registers. --------- */ 41 42 /* The usual HReg abstraction. There are 8 real int regs, 43 6 real float regs, and 8 real vector regs. 44 */ 45 46 extern void ppHRegX86 ( HReg ); 47 48 extern HReg hregX86_EAX ( void ); 49 extern HReg hregX86_EBX ( void ); 50 extern HReg hregX86_ECX ( void ); 51 extern HReg hregX86_EDX ( void ); 52 extern HReg hregX86_ESP ( void ); 53 extern HReg hregX86_EBP ( void ); 54 extern HReg hregX86_ESI ( void ); 55 extern HReg hregX86_EDI ( void ); 56 57 extern HReg hregX86_FAKE0 ( void ); 58 extern HReg hregX86_FAKE1 ( void ); 59 extern HReg hregX86_FAKE2 ( void ); 60 extern HReg hregX86_FAKE3 ( void ); 61 extern HReg hregX86_FAKE4 ( void ); 62 extern HReg hregX86_FAKE5 ( void ); 63 64 extern HReg hregX86_XMM0 ( void ); 65 extern HReg hregX86_XMM1 ( void ); 66 extern HReg hregX86_XMM2 ( void ); 67 extern HReg hregX86_XMM3 ( void ); 68 extern HReg hregX86_XMM4 ( void ); 69 extern HReg hregX86_XMM5 ( void ); 70 extern HReg hregX86_XMM6 ( void ); 71 extern HReg hregX86_XMM7 ( void ); 72 73 74 /* --------- Condition codes, Intel encoding. --------- */ 75 76 typedef 77 enum { 78 Xcc_O = 0, /* overflow */ 79 Xcc_NO = 1, /* no overflow */ 80 81 Xcc_B = 2, /* below */ 82 Xcc_NB = 3, /* not below */ 83 84 Xcc_Z = 4, /* zero */ 85 Xcc_NZ = 5, /* not zero */ 86 87 Xcc_BE = 6, /* below or equal */ 88 Xcc_NBE = 7, /* not below or equal */ 89 90 Xcc_S = 8, /* negative */ 91 Xcc_NS = 9, /* not negative */ 92 93 Xcc_P = 10, /* parity even */ 94 Xcc_NP = 11, /* not parity even */ 95 96 Xcc_L = 12, /* jump less */ 97 Xcc_NL = 13, /* not less */ 98 99 Xcc_LE = 14, /* less or equal */ 100 Xcc_NLE = 15, /* not less or equal */ 101 102 Xcc_ALWAYS = 16 /* the usual hack */ 103 } 104 X86CondCode; 105 106 extern HChar* showX86CondCode ( X86CondCode ); 107 108 109 /* --------- Memory address expressions (amodes). --------- */ 110 111 typedef 112 enum { 113 Xam_IR, /* Immediate + Reg */ 114 Xam_IRRS /* Immediate + Reg1 + (Reg2 << Shift) */ 115 } 116 X86AModeTag; 117 118 typedef 119 struct { 120 X86AModeTag tag; 121 union { 122 struct { 123 UInt imm; 124 HReg reg; 125 } IR; 126 struct { 127 UInt imm; 128 HReg base; 129 HReg index; 130 Int shift; /* 0, 1, 2 or 3 only */ 131 } IRRS; 132 } Xam; 133 } 134 X86AMode; 135 136 extern X86AMode* X86AMode_IR ( UInt, HReg ); 137 extern X86AMode* X86AMode_IRRS ( UInt, HReg, HReg, Int ); 138 139 extern X86AMode* dopyX86AMode ( X86AMode* ); 140 141 extern void ppX86AMode ( X86AMode* ); 142 143 144 /* --------- Operand, which can be reg, immediate or memory. --------- */ 145 146 typedef 147 enum { 148 Xrmi_Imm, 149 Xrmi_Reg, 150 Xrmi_Mem 151 } 152 X86RMITag; 153 154 typedef 155 struct { 156 X86RMITag tag; 157 union { 158 struct { 159 UInt imm32; 160 } Imm; 161 struct { 162 HReg reg; 163 } Reg; 164 struct { 165 X86AMode* am; 166 } Mem; 167 } 168 Xrmi; 169 } 170 X86RMI; 171 172 extern X86RMI* X86RMI_Imm ( UInt ); 173 extern X86RMI* X86RMI_Reg ( HReg ); 174 extern X86RMI* X86RMI_Mem ( X86AMode* ); 175 176 extern void ppX86RMI ( X86RMI* ); 177 178 179 /* --------- Operand, which can be reg or immediate only. --------- */ 180 181 typedef 182 enum { 183 Xri_Imm, 184 Xri_Reg 185 } 186 X86RITag; 187 188 typedef 189 struct { 190 X86RITag tag; 191 union { 192 struct { 193 UInt imm32; 194 } Imm; 195 struct { 196 HReg reg; 197 } Reg; 198 } 199 Xri; 200 } 201 X86RI; 202 203 extern X86RI* X86RI_Imm ( UInt ); 204 extern X86RI* X86RI_Reg ( HReg ); 205 206 extern void ppX86RI ( X86RI* ); 207 208 209 /* --------- Operand, which can be reg or memory only. --------- */ 210 211 typedef 212 enum { 213 Xrm_Reg, 214 Xrm_Mem 215 } 216 X86RMTag; 217 218 typedef 219 struct { 220 X86RMTag tag; 221 union { 222 struct { 223 HReg reg; 224 } Reg; 225 struct { 226 X86AMode* am; 227 } Mem; 228 } 229 Xrm; 230 } 231 X86RM; 232 233 extern X86RM* X86RM_Reg ( HReg ); 234 extern X86RM* X86RM_Mem ( X86AMode* ); 235 236 extern void ppX86RM ( X86RM* ); 237 238 239 /* --------- Instructions. --------- */ 240 241 /* --------- */ 242 typedef 243 enum { 244 Xun_NEG, 245 Xun_NOT 246 } 247 X86UnaryOp; 248 249 extern HChar* showX86UnaryOp ( X86UnaryOp ); 250 251 252 /* --------- */ 253 typedef 254 enum { 255 Xalu_INVALID, 256 Xalu_MOV, 257 Xalu_CMP, 258 Xalu_ADD, Xalu_SUB, Xalu_ADC, Xalu_SBB, 259 Xalu_AND, Xalu_OR, Xalu_XOR, 260 Xalu_MUL 261 } 262 X86AluOp; 263 264 extern HChar* showX86AluOp ( X86AluOp ); 265 266 267 /* --------- */ 268 typedef 269 enum { 270 Xsh_INVALID, 271 Xsh_SHL, Xsh_SHR, Xsh_SAR 272 } 273 X86ShiftOp; 274 275 extern HChar* showX86ShiftOp ( X86ShiftOp ); 276 277 278 /* --------- */ 279 typedef 280 enum { 281 Xfp_INVALID, 282 /* Binary */ 283 Xfp_ADD, Xfp_SUB, Xfp_MUL, Xfp_DIV, 284 Xfp_SCALE, Xfp_ATAN, Xfp_YL2X, Xfp_YL2XP1, Xfp_PREM, Xfp_PREM1, 285 /* Unary */ 286 Xfp_SQRT, Xfp_ABS, Xfp_NEG, Xfp_MOV, Xfp_SIN, Xfp_COS, Xfp_TAN, 287 Xfp_ROUND, Xfp_2XM1 288 } 289 X86FpOp; 290 291 extern HChar* showX86FpOp ( X86FpOp ); 292 293 294 /* --------- */ 295 typedef 296 enum { 297 Xsse_INVALID, 298 /* mov */ 299 Xsse_MOV, 300 /* Floating point binary */ 301 Xsse_ADDF, Xsse_SUBF, Xsse_MULF, Xsse_DIVF, 302 Xsse_MAXF, Xsse_MINF, 303 Xsse_CMPEQF, Xsse_CMPLTF, Xsse_CMPLEF, Xsse_CMPUNF, 304 /* Floating point unary */ 305 Xsse_RCPF, Xsse_RSQRTF, Xsse_SQRTF, 306 /* Bitwise */ 307 Xsse_AND, Xsse_OR, Xsse_XOR, Xsse_ANDN, 308 /* Integer binary */ 309 Xsse_ADD8, Xsse_ADD16, Xsse_ADD32, Xsse_ADD64, 310 Xsse_QADD8U, Xsse_QADD16U, 311 Xsse_QADD8S, Xsse_QADD16S, 312 Xsse_SUB8, Xsse_SUB16, Xsse_SUB32, Xsse_SUB64, 313 Xsse_QSUB8U, Xsse_QSUB16U, 314 Xsse_QSUB8S, Xsse_QSUB16S, 315 Xsse_MUL16, 316 Xsse_MULHI16U, 317 Xsse_MULHI16S, 318 Xsse_AVG8U, Xsse_AVG16U, 319 Xsse_MAX16S, 320 Xsse_MAX8U, 321 Xsse_MIN16S, 322 Xsse_MIN8U, 323 Xsse_CMPEQ8, Xsse_CMPEQ16, Xsse_CMPEQ32, 324 Xsse_CMPGT8S, Xsse_CMPGT16S, Xsse_CMPGT32S, 325 Xsse_SHL16, Xsse_SHL32, Xsse_SHL64, 326 Xsse_SHR16, Xsse_SHR32, Xsse_SHR64, 327 Xsse_SAR16, Xsse_SAR32, 328 Xsse_PACKSSD, Xsse_PACKSSW, Xsse_PACKUSW, 329 Xsse_UNPCKHB, Xsse_UNPCKHW, Xsse_UNPCKHD, Xsse_UNPCKHQ, 330 Xsse_UNPCKLB, Xsse_UNPCKLW, Xsse_UNPCKLD, Xsse_UNPCKLQ 331 } 332 X86SseOp; 333 334 extern HChar* showX86SseOp ( X86SseOp ); 335 336 337 /* --------- */ 338 typedef 339 enum { 340 Xin_Alu32R, /* 32-bit mov/arith/logical, dst=REG */ 341 Xin_Alu32M, /* 32-bit mov/arith/logical, dst=MEM */ 342 Xin_Sh32, /* 32-bit shift/rotate, dst=REG */ 343 Xin_Test32, /* 32-bit test of REG or MEM against imm32 (AND, set 344 flags, discard result) */ 345 Xin_Unary32, /* 32-bit not and neg */ 346 Xin_Lea32, /* 32-bit compute EA into a reg */ 347 Xin_MulL, /* 32 x 32 -> 64 multiply */ 348 Xin_Div, /* 64/32 -> (32,32) div and mod */ 349 Xin_Sh3232, /* shldl or shrdl */ 350 Xin_Push, /* push (32-bit?) value on stack */ 351 Xin_Call, /* call to address in register */ 352 Xin_XDirect, /* direct transfer to GA */ 353 Xin_XIndir, /* indirect transfer to GA */ 354 Xin_XAssisted, /* assisted transfer to GA */ 355 Xin_CMov32, /* conditional move */ 356 Xin_LoadEX, /* mov{s,z}{b,w}l from mem to reg */ 357 Xin_Store, /* store 16/8 bit value in memory */ 358 Xin_Set32, /* convert condition code to 32-bit value */ 359 Xin_Bsfr32, /* 32-bit bsf/bsr */ 360 Xin_MFence, /* mem fence (not just sse2, but sse0 and 1 too) */ 361 Xin_ACAS, /* 8/16/32-bit lock;cmpxchg */ 362 Xin_DACAS, /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */ 363 364 Xin_FpUnary, /* FP fake unary op */ 365 Xin_FpBinary, /* FP fake binary op */ 366 Xin_FpLdSt, /* FP fake load/store */ 367 Xin_FpLdStI, /* FP fake load/store, converting to/from Int */ 368 Xin_Fp64to32, /* FP round IEEE754 double to IEEE754 single */ 369 Xin_FpCMov, /* FP fake floating point conditional move */ 370 Xin_FpLdCW, /* fldcw */ 371 Xin_FpStSW_AX, /* fstsw %ax */ 372 Xin_FpCmp, /* FP compare, generating a C320 value into int reg */ 373 374 Xin_SseConst, /* Generate restricted SSE literal */ 375 Xin_SseLdSt, /* SSE load/store, no alignment constraints */ 376 Xin_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of reg */ 377 Xin_Sse32Fx4, /* SSE binary, 32Fx4 */ 378 Xin_Sse32FLo, /* SSE binary, 32F in lowest lane only */ 379 Xin_Sse64Fx2, /* SSE binary, 64Fx2 */ 380 Xin_Sse64FLo, /* SSE binary, 64F in lowest lane only */ 381 Xin_SseReRg, /* SSE binary general reg-reg, Re, Rg */ 382 Xin_SseCMov, /* SSE conditional move */ 383 Xin_SseShuf, /* SSE2 shuffle (pshufd) */ 384 Xin_EvCheck, /* Event check */ 385 Xin_ProfInc /* 64-bit profile counter increment */ 386 } 387 X86InstrTag; 388 389 /* Destinations are on the RIGHT (second operand) */ 390 391 typedef 392 struct { 393 X86InstrTag tag; 394 union { 395 struct { 396 X86AluOp op; 397 X86RMI* src; 398 HReg dst; 399 } Alu32R; 400 struct { 401 X86AluOp op; 402 X86RI* src; 403 X86AMode* dst; 404 } Alu32M; 405 struct { 406 X86ShiftOp op; 407 UInt src; /* shift amount, or 0 means %cl */ 408 HReg dst; 409 } Sh32; 410 struct { 411 UInt imm32; 412 X86RM* dst; /* not written, only read */ 413 } Test32; 414 /* Not and Neg */ 415 struct { 416 X86UnaryOp op; 417 HReg dst; 418 } Unary32; 419 /* 32-bit compute EA into a reg */ 420 struct { 421 X86AMode* am; 422 HReg dst; 423 } Lea32; 424 /* EDX:EAX = EAX *s/u r/m32 */ 425 struct { 426 Bool syned; 427 X86RM* src; 428 } MulL; 429 /* x86 div/idiv instruction. Modifies EDX and EAX and reads src. */ 430 struct { 431 Bool syned; 432 X86RM* src; 433 } Div; 434 /* shld/shrd. op may only be Xsh_SHL or Xsh_SHR */ 435 struct { 436 X86ShiftOp op; 437 UInt amt; /* shift amount, or 0 means %cl */ 438 HReg src; 439 HReg dst; 440 } Sh3232; 441 struct { 442 X86RMI* src; 443 } Push; 444 /* Pseudo-insn. Call target (an absolute address), on given 445 condition (which could be Xcc_ALWAYS). */ 446 struct { 447 X86CondCode cond; 448 Addr32 target; 449 Int regparms; /* 0 .. 3 */ 450 } Call; 451 /* Update the guest EIP value, then exit requesting to chain 452 to it. May be conditional. Urr, use of Addr32 implicitly 453 assumes that wordsize(guest) == wordsize(host). */ 454 struct { 455 Addr32 dstGA; /* next guest address */ 456 X86AMode* amEIP; /* amode in guest state for EIP */ 457 X86CondCode cond; /* can be Xcc_ALWAYS */ 458 Bool toFastEP; /* chain to the slow or fast point? */ 459 } XDirect; 460 /* Boring transfer to a guest address not known at JIT time. 461 Not chainable. May be conditional. */ 462 struct { 463 HReg dstGA; 464 X86AMode* amEIP; 465 X86CondCode cond; /* can be Xcc_ALWAYS */ 466 } XIndir; 467 /* Assisted transfer to a guest address, most general case. 468 Not chainable. May be conditional. */ 469 struct { 470 HReg dstGA; 471 X86AMode* amEIP; 472 X86CondCode cond; /* can be Xcc_ALWAYS */ 473 IRJumpKind jk; 474 } XAssisted; 475 /* Mov src to dst on the given condition, which may not 476 be the bogus Xcc_ALWAYS. */ 477 struct { 478 X86CondCode cond; 479 X86RM* src; 480 HReg dst; 481 } CMov32; 482 /* Sign/Zero extending loads. Dst size is always 32 bits. */ 483 struct { 484 UChar szSmall; 485 Bool syned; 486 X86AMode* src; 487 HReg dst; 488 } LoadEX; 489 /* 16/8 bit stores, which are troublesome (particularly 490 8-bit) */ 491 struct { 492 UChar sz; /* only 1 or 2 */ 493 HReg src; 494 X86AMode* dst; 495 } Store; 496 /* Convert a x86 condition code to a 32-bit value (0 or 1). */ 497 struct { 498 X86CondCode cond; 499 HReg dst; 500 } Set32; 501 /* 32-bit bsf or bsr. */ 502 struct { 503 Bool isFwds; 504 HReg src; 505 HReg dst; 506 } Bsfr32; 507 /* Mem fence (not just sse2, but sse0 and 1 too). In short, 508 an insn which flushes all preceding loads and stores as 509 much as possible before continuing. On SSE2 we emit a 510 real "mfence", on SSE1 "sfence ; lock addl $0,0(%esp)" and 511 on SSE0 "lock addl $0,0(%esp)". This insn therefore 512 carries the host's hwcaps so the assembler knows what to 513 emit. */ 514 struct { 515 UInt hwcaps; 516 } MFence; 517 /* "lock;cmpxchg": mem address in .addr, 518 expected value in %eax, new value in %ebx */ 519 struct { 520 X86AMode* addr; 521 UChar sz; /* 1, 2 or 4 */ 522 } ACAS; 523 /* "lock;cmpxchg8b": mem address in .addr, expected value in 524 %edx:%eax, new value in %ecx:%ebx */ 525 struct { 526 X86AMode* addr; 527 } DACAS; 528 529 /* X86 Floating point (fake 3-operand, "flat reg file" insns) */ 530 struct { 531 X86FpOp op; 532 HReg src; 533 HReg dst; 534 } FpUnary; 535 struct { 536 X86FpOp op; 537 HReg srcL; 538 HReg srcR; 539 HReg dst; 540 } FpBinary; 541 struct { 542 Bool isLoad; 543 UChar sz; /* only 4 (IEEE single) or 8 (IEEE double) */ 544 HReg reg; 545 X86AMode* addr; 546 } FpLdSt; 547 /* Move 64-bit float to/from memory, converting to/from 548 signed int on the way. Note the conversions will observe 549 the host FPU rounding mode currently in force. */ 550 struct { 551 Bool isLoad; 552 UChar sz; /* only 2, 4 or 8 */ 553 HReg reg; 554 X86AMode* addr; 555 } FpLdStI; 556 /* By observing the current FPU rounding mode, round (etc) 557 src into dst given that dst should be interpreted as an 558 IEEE754 32-bit (float) type. */ 559 struct { 560 HReg src; 561 HReg dst; 562 } Fp64to32; 563 /* Mov src to dst on the given condition, which may not 564 be the bogus Xcc_ALWAYS. */ 565 struct { 566 X86CondCode cond; 567 HReg src; 568 HReg dst; 569 } FpCMov; 570 /* Load the FPU's 16-bit control word (fldcw) */ 571 struct { 572 X86AMode* addr; 573 } 574 FpLdCW; 575 /* fstsw %ax */ 576 struct { 577 /* no fields */ 578 } 579 FpStSW_AX; 580 /* Do a compare, generating the C320 bits into the dst. */ 581 struct { 582 HReg srcL; 583 HReg srcR; 584 HReg dst; 585 } FpCmp; 586 587 /* Simplistic SSE[123] */ 588 struct { 589 UShort con; 590 HReg dst; 591 } SseConst; 592 struct { 593 Bool isLoad; 594 HReg reg; 595 X86AMode* addr; 596 } SseLdSt; 597 struct { 598 UChar sz; /* 4 or 8 only */ 599 HReg reg; 600 X86AMode* addr; 601 } SseLdzLO; 602 struct { 603 X86SseOp op; 604 HReg src; 605 HReg dst; 606 } Sse32Fx4; 607 struct { 608 X86SseOp op; 609 HReg src; 610 HReg dst; 611 } Sse32FLo; 612 struct { 613 X86SseOp op; 614 HReg src; 615 HReg dst; 616 } Sse64Fx2; 617 struct { 618 X86SseOp op; 619 HReg src; 620 HReg dst; 621 } Sse64FLo; 622 struct { 623 X86SseOp op; 624 HReg src; 625 HReg dst; 626 } SseReRg; 627 /* Mov src to dst on the given condition, which may not 628 be the bogus Xcc_ALWAYS. */ 629 struct { 630 X86CondCode cond; 631 HReg src; 632 HReg dst; 633 } SseCMov; 634 struct { 635 Int order; /* 0 <= order <= 0xFF */ 636 HReg src; 637 HReg dst; 638 } SseShuf; 639 struct { 640 X86AMode* amCounter; 641 X86AMode* amFailAddr; 642 } EvCheck; 643 struct { 644 /* No fields. The address of the counter to inc is 645 installed later, post-translation, by patching it in, 646 as it is not known at translation time. */ 647 } ProfInc; 648 649 } Xin; 650 } 651 X86Instr; 652 653 extern X86Instr* X86Instr_Alu32R ( X86AluOp, X86RMI*, HReg ); 654 extern X86Instr* X86Instr_Alu32M ( X86AluOp, X86RI*, X86AMode* ); 655 extern X86Instr* X86Instr_Unary32 ( X86UnaryOp op, HReg dst ); 656 extern X86Instr* X86Instr_Lea32 ( X86AMode* am, HReg dst ); 657 658 extern X86Instr* X86Instr_Sh32 ( X86ShiftOp, UInt, HReg ); 659 extern X86Instr* X86Instr_Test32 ( UInt imm32, X86RM* dst ); 660 extern X86Instr* X86Instr_MulL ( Bool syned, X86RM* ); 661 extern X86Instr* X86Instr_Div ( Bool syned, X86RM* ); 662 extern X86Instr* X86Instr_Sh3232 ( X86ShiftOp, UInt amt, HReg src, HReg dst ); 663 extern X86Instr* X86Instr_Push ( X86RMI* ); 664 extern X86Instr* X86Instr_Call ( X86CondCode, Addr32, Int ); 665 extern X86Instr* X86Instr_XDirect ( Addr32 dstGA, X86AMode* amEIP, 666 X86CondCode cond, Bool toFastEP ); 667 extern X86Instr* X86Instr_XIndir ( HReg dstGA, X86AMode* amEIP, 668 X86CondCode cond ); 669 extern X86Instr* X86Instr_XAssisted ( HReg dstGA, X86AMode* amEIP, 670 X86CondCode cond, IRJumpKind jk ); 671 extern X86Instr* X86Instr_CMov32 ( X86CondCode, X86RM* src, HReg dst ); 672 extern X86Instr* X86Instr_LoadEX ( UChar szSmall, Bool syned, 673 X86AMode* src, HReg dst ); 674 extern X86Instr* X86Instr_Store ( UChar sz, HReg src, X86AMode* dst ); 675 extern X86Instr* X86Instr_Set32 ( X86CondCode cond, HReg dst ); 676 extern X86Instr* X86Instr_Bsfr32 ( Bool isFwds, HReg src, HReg dst ); 677 extern X86Instr* X86Instr_MFence ( UInt hwcaps ); 678 extern X86Instr* X86Instr_ACAS ( X86AMode* addr, UChar sz ); 679 extern X86Instr* X86Instr_DACAS ( X86AMode* addr ); 680 681 extern X86Instr* X86Instr_FpUnary ( X86FpOp op, HReg src, HReg dst ); 682 extern X86Instr* X86Instr_FpBinary ( X86FpOp op, HReg srcL, HReg srcR, HReg dst ); 683 extern X86Instr* X86Instr_FpLdSt ( Bool isLoad, UChar sz, HReg reg, X86AMode* ); 684 extern X86Instr* X86Instr_FpLdStI ( Bool isLoad, UChar sz, HReg reg, X86AMode* ); 685 extern X86Instr* X86Instr_Fp64to32 ( HReg src, HReg dst ); 686 extern X86Instr* X86Instr_FpCMov ( X86CondCode, HReg src, HReg dst ); 687 extern X86Instr* X86Instr_FpLdCW ( X86AMode* ); 688 extern X86Instr* X86Instr_FpStSW_AX ( void ); 689 extern X86Instr* X86Instr_FpCmp ( HReg srcL, HReg srcR, HReg dst ); 690 691 extern X86Instr* X86Instr_SseConst ( UShort con, HReg dst ); 692 extern X86Instr* X86Instr_SseLdSt ( Bool isLoad, HReg, X86AMode* ); 693 extern X86Instr* X86Instr_SseLdzLO ( Int sz, HReg, X86AMode* ); 694 extern X86Instr* X86Instr_Sse32Fx4 ( X86SseOp, HReg, HReg ); 695 extern X86Instr* X86Instr_Sse32FLo ( X86SseOp, HReg, HReg ); 696 extern X86Instr* X86Instr_Sse64Fx2 ( X86SseOp, HReg, HReg ); 697 extern X86Instr* X86Instr_Sse64FLo ( X86SseOp, HReg, HReg ); 698 extern X86Instr* X86Instr_SseReRg ( X86SseOp, HReg, HReg ); 699 extern X86Instr* X86Instr_SseCMov ( X86CondCode, HReg src, HReg dst ); 700 extern X86Instr* X86Instr_SseShuf ( Int order, HReg src, HReg dst ); 701 extern X86Instr* X86Instr_EvCheck ( X86AMode* amCounter, 702 X86AMode* amFailAddr ); 703 extern X86Instr* X86Instr_ProfInc ( void ); 704 705 706 extern void ppX86Instr ( X86Instr*, Bool ); 707 708 /* Some functions that insulate the register allocator from details 709 of the underlying instruction set. */ 710 extern void getRegUsage_X86Instr ( HRegUsage*, X86Instr*, Bool ); 711 extern void mapRegs_X86Instr ( HRegRemap*, X86Instr*, Bool ); 712 extern Bool isMove_X86Instr ( X86Instr*, HReg*, HReg* ); 713 extern Int emit_X86Instr ( /*MB_MOD*/Bool* is_profInc, 714 UChar* buf, Int nbuf, X86Instr* i, 715 Bool mode64, 716 void* disp_cp_chain_me_to_slowEP, 717 void* disp_cp_chain_me_to_fastEP, 718 void* disp_cp_xindir, 719 void* disp_cp_xassisted ); 720 721 extern void genSpill_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2, 722 HReg rreg, Int offset, Bool ); 723 extern void genReload_X86 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2, 724 HReg rreg, Int offset, Bool ); 725 726 extern X86Instr* directReload_X86 ( X86Instr* i, 727 HReg vreg, Short spill_off ); 728 extern void getAllocableRegs_X86 ( Int*, HReg** ); 729 extern HInstrArray* iselSB_X86 ( IRSB*, 730 VexArch, 731 VexArchInfo*, 732 VexAbiInfo*, 733 Int offs_Host_EvC_Counter, 734 Int offs_Host_EvC_FailAddr, 735 Bool chainingAllowed, 736 Bool addProfInc, 737 Addr64 max_ga ); 738 739 /* How big is an event check? This is kind of a kludge because it 740 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER, 741 and so assumes that they are both <= 128, and so can use the short 742 offset encoding. This is all checked with assertions, so in the 743 worst case we will merely assert at startup. */ 744 extern Int evCheckSzB_X86 ( void ); 745 746 /* Perform a chaining and unchaining of an XDirect jump. */ 747 extern VexInvalRange chainXDirect_X86 ( void* place_to_chain, 748 void* disp_cp_chain_me_EXPECTED, 749 void* place_to_jump_to ); 750 751 extern VexInvalRange unchainXDirect_X86 ( void* place_to_unchain, 752 void* place_to_jump_to_EXPECTED, 753 void* disp_cp_chain_me ); 754 755 /* Patch the counter location into an existing ProfInc point. */ 756 extern VexInvalRange patchProfInc_X86 ( void* place_to_patch, 757 ULong* location_of_counter ); 758 759 760 #endif /* ndef __VEX_HOST_X86_DEFS_H */ 761 762 /*---------------------------------------------------------------*/ 763 /*--- end host_x86_defs.h ---*/ 764 /*---------------------------------------------------------------*/ 765