• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are
6 // met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the distribution.
14 //
15 // - Neither the name of Sun Microsystems or the names of contributors may
16 // be used to endorse or promote products derived from this software without
17 // specific prior written permission.
18 //
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
20 // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 
31 // The original source code covered by the above license above has been
32 // modified significantly by Google Inc.
33 // Copyright 2012 the V8 project authors. All rights reserved.
34 
35 // A lightweight X64 Assembler.
36 
37 #ifndef V8_X64_ASSEMBLER_X64_H_
38 #define V8_X64_ASSEMBLER_X64_H_
39 
40 #include <deque>
41 #include <forward_list>
42 #include <vector>
43 
44 #include "src/assembler.h"
45 #include "src/x64/constants-x64.h"
46 #include "src/x64/sse-instr.h"
47 
48 namespace v8 {
49 namespace internal {
50 
51 // Utility functions
52 
53 #define GENERAL_REGISTERS(V) \
54   V(rax)                     \
55   V(rcx)                     \
56   V(rdx)                     \
57   V(rbx)                     \
58   V(rsp)                     \
59   V(rbp)                     \
60   V(rsi)                     \
61   V(rdi)                     \
62   V(r8)                      \
63   V(r9)                      \
64   V(r10)                     \
65   V(r11)                     \
66   V(r12)                     \
67   V(r13)                     \
68   V(r14)                     \
69   V(r15)
70 
71 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
72   V(rax)                                 \
73   V(rbx)                                 \
74   V(rdx)                                 \
75   V(rcx)                                 \
76   V(rsi)                                 \
77   V(rdi)                                 \
78   V(r8)                                  \
79   V(r9)                                  \
80   V(r11)                                 \
81   V(r12)                                 \
82   V(r14)                                 \
83   V(r15)
84 
85 enum RegisterCode {
86 #define REGISTER_CODE(R) kRegCode_##R,
87   GENERAL_REGISTERS(REGISTER_CODE)
88 #undef REGISTER_CODE
89       kRegAfterLast
90 };
91 
92 class Register : public RegisterBase<Register, kRegAfterLast> {
93  public:
is_byte_register()94   bool is_byte_register() const { return reg_code_ <= 3; }
95   // Return the high bit of the register code as a 0 or 1.  Used often
96   // when constructing the REX prefix byte.
high_bit()97   int high_bit() const { return reg_code_ >> 3; }
98   // Return the 3 low bits of the register code.  Used when encoding registers
99   // in modR/M, SIB, and opcode bytes.
low_bits()100   int low_bits() const { return reg_code_ & 0x7; }
101 
102  private:
103   friend class RegisterBase<Register, kRegAfterLast>;
Register(int code)104   explicit constexpr Register(int code) : RegisterBase(code) {}
105 };
106 
107 ASSERT_TRIVIALLY_COPYABLE(Register);
108 static_assert(sizeof(Register) == sizeof(int),
109               "Register can efficiently be passed by value");
110 
111 #define DECLARE_REGISTER(R) \
112   constexpr Register R = Register::from_code<kRegCode_##R>();
113 GENERAL_REGISTERS(DECLARE_REGISTER)
114 #undef DECLARE_REGISTER
115 constexpr Register no_reg = Register::no_reg();
116 
117 constexpr int kNumRegs = 16;
118 
119 constexpr RegList kJSCallerSaved =
120     Register::ListOf<rax, rcx, rdx,
121                      rbx,  // used as a caller-saved register in JavaScript code
122                      rdi   // callee function
123                      >();
124 
125 constexpr int kNumJSCallerSaved = 5;
126 
127 // Number of registers for which space is reserved in safepoints.
128 constexpr int kNumSafepointRegisters = 16;
129 
130 #ifdef _WIN64
131   // Windows calling convention
132 constexpr Register arg_reg_1 = rcx;
133 constexpr Register arg_reg_2 = rdx;
134 constexpr Register arg_reg_3 = r8;
135 constexpr Register arg_reg_4 = r9;
136 #else
137   // AMD64 calling convention
138 constexpr Register arg_reg_1 = rdi;
139 constexpr Register arg_reg_2 = rsi;
140 constexpr Register arg_reg_3 = rdx;
141 constexpr Register arg_reg_4 = rcx;
142 #endif  // _WIN64
143 
144 
145 #define DOUBLE_REGISTERS(V) \
146   V(xmm0)                   \
147   V(xmm1)                   \
148   V(xmm2)                   \
149   V(xmm3)                   \
150   V(xmm4)                   \
151   V(xmm5)                   \
152   V(xmm6)                   \
153   V(xmm7)                   \
154   V(xmm8)                   \
155   V(xmm9)                   \
156   V(xmm10)                  \
157   V(xmm11)                  \
158   V(xmm12)                  \
159   V(xmm13)                  \
160   V(xmm14)                  \
161   V(xmm15)
162 
163 #define FLOAT_REGISTERS DOUBLE_REGISTERS
164 #define SIMD128_REGISTERS DOUBLE_REGISTERS
165 
166 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
167   V(xmm0)                               \
168   V(xmm1)                               \
169   V(xmm2)                               \
170   V(xmm3)                               \
171   V(xmm4)                               \
172   V(xmm5)                               \
173   V(xmm6)                               \
174   V(xmm7)                               \
175   V(xmm8)                               \
176   V(xmm9)                               \
177   V(xmm10)                              \
178   V(xmm11)                              \
179   V(xmm12)                              \
180   V(xmm13)                              \
181   V(xmm14)
182 
183 constexpr bool kPadArguments = false;
184 constexpr bool kSimpleFPAliasing = true;
185 constexpr bool kSimdMaskRegisters = false;
186 
187 enum DoubleRegisterCode {
188 #define REGISTER_CODE(R) kDoubleCode_##R,
189   DOUBLE_REGISTERS(REGISTER_CODE)
190 #undef REGISTER_CODE
191       kDoubleAfterLast
192 };
193 
194 class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
195  public:
196   // Return the high bit of the register code as a 0 or 1.  Used often
197   // when constructing the REX prefix byte.
high_bit()198   int high_bit() const { return reg_code_ >> 3; }
199   // Return the 3 low bits of the register code.  Used when encoding registers
200   // in modR/M, SIB, and opcode bytes.
low_bits()201   int low_bits() const { return reg_code_ & 0x7; }
202 
203  private:
204   friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
XMMRegister(int code)205   explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
206 };
207 
208 ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
209 static_assert(sizeof(XMMRegister) == sizeof(int),
210               "XMMRegister can efficiently be passed by value");
211 
212 typedef XMMRegister FloatRegister;
213 
214 typedef XMMRegister DoubleRegister;
215 
216 typedef XMMRegister Simd128Register;
217 
218 #define DECLARE_REGISTER(R) \
219   constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
220 DOUBLE_REGISTERS(DECLARE_REGISTER)
221 #undef DECLARE_REGISTER
222 constexpr DoubleRegister no_double_reg = DoubleRegister::no_reg();
223 constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
224 
225 enum Condition {
226   // any value < 0 is considered no_condition
227   no_condition  = -1,
228 
229   overflow      =  0,
230   no_overflow   =  1,
231   below         =  2,
232   above_equal   =  3,
233   equal         =  4,
234   not_equal     =  5,
235   below_equal   =  6,
236   above         =  7,
237   negative      =  8,
238   positive      =  9,
239   parity_even   = 10,
240   parity_odd    = 11,
241   less          = 12,
242   greater_equal = 13,
243   less_equal    = 14,
244   greater       = 15,
245 
246   // Fake conditions that are handled by the
247   // opcodes using them.
248   always        = 16,
249   never         = 17,
250   // aliases
251   carry         = below,
252   not_carry     = above_equal,
253   zero          = equal,
254   not_zero      = not_equal,
255   sign          = negative,
256   not_sign      = positive,
257   last_condition = greater
258 };
259 
260 
261 // Returns the equivalent of !cc.
262 // Negation of the default no_condition (-1) results in a non-default
263 // no_condition value (-2). As long as tests for no_condition check
264 // for condition < 0, this will work as expected.
NegateCondition(Condition cc)265 inline Condition NegateCondition(Condition cc) {
266   return static_cast<Condition>(cc ^ 1);
267 }
268 
269 
270 enum RoundingMode {
271   kRoundToNearest = 0x0,
272   kRoundDown = 0x1,
273   kRoundUp = 0x2,
274   kRoundToZero = 0x3
275 };
276 
277 
278 // -----------------------------------------------------------------------------
279 // Machine instruction Immediates
280 
281 class Immediate {
282  public:
Immediate(int32_t value)283   explicit constexpr Immediate(int32_t value) : value_(value) {}
Immediate(int32_t value,RelocInfo::Mode rmode)284   explicit constexpr Immediate(int32_t value, RelocInfo::Mode rmode)
285       : value_(value), rmode_(rmode) {}
Immediate(Smi * value)286   explicit Immediate(Smi* value)
287       : value_(static_cast<int32_t>(reinterpret_cast<intptr_t>(value))) {
288     DCHECK(SmiValuesAre31Bits());  // Only available for 31-bit SMI.
289   }
290 
291  private:
292   const int32_t value_;
293   const RelocInfo::Mode rmode_ = RelocInfo::NONE;
294 
295   friend class Assembler;
296 };
297 ASSERT_TRIVIALLY_COPYABLE(Immediate);
298 static_assert(sizeof(Immediate) <= kPointerSize,
299               "Immediate must be small enough to pass it by value");
300 
301 // -----------------------------------------------------------------------------
302 // Machine instruction Operands
303 
304 enum ScaleFactor : int8_t {
305   times_1 = 0,
306   times_2 = 1,
307   times_4 = 2,
308   times_8 = 3,
309   times_int_size = times_4,
310   times_pointer_size = (kPointerSize == 8) ? times_8 : times_4
311 };
312 
313 class Operand {
314  public:
315   struct Data {
316     byte rex = 0;
317     byte buf[9];
318     byte len = 1;   // number of bytes of buf_ in use.
319     int8_t addend;  // for rip + offset + addend.
320   };
321 
322   // [base + disp/r]
323   Operand(Register base, int32_t disp);
324 
325   // [base + index*scale + disp/r]
326   Operand(Register base,
327           Register index,
328           ScaleFactor scale,
329           int32_t disp);
330 
331   // [index*scale + disp/r]
332   Operand(Register index,
333           ScaleFactor scale,
334           int32_t disp);
335 
336   // Offset from existing memory operand.
337   // Offset is added to existing displacement as 32-bit signed values and
338   // this must not overflow.
339   Operand(Operand base, int32_t offset);
340 
341   // [rip + disp/r]
342   explicit Operand(Label* label, int addend = 0);
343 
344   Operand(const Operand&) = default;
345 
346   // Checks whether either base or index register is the given register.
347   // Does not check the "reg" part of the Operand.
348   bool AddressUsesRegister(Register reg) const;
349 
350   // Queries related to the size of the generated instruction.
351   // Whether the generated instruction will have a REX prefix.
requires_rex()352   bool requires_rex() const { return data_.rex != 0; }
353   // Size of the ModR/M, SIB and displacement parts of the generated
354   // instruction.
operand_size()355   int operand_size() const { return data_.len; }
356 
data()357   const Data& data() const { return data_; }
358 
359  private:
360   const Data data_;
361 };
362 ASSERT_TRIVIALLY_COPYABLE(Operand);
363 static_assert(sizeof(Operand) <= 2 * kPointerSize,
364               "Operand must be small enough to pass it by value");
365 
366 #define ASSEMBLER_INSTRUCTION_LIST(V) \
367   V(add)                              \
368   V(and)                              \
369   V(cmp)                              \
370   V(cmpxchg)                          \
371   V(dec)                              \
372   V(idiv)                             \
373   V(div)                              \
374   V(imul)                             \
375   V(inc)                              \
376   V(lea)                              \
377   V(mov)                              \
378   V(movzxb)                           \
379   V(movzxw)                           \
380   V(neg)                              \
381   V(not)                              \
382   V(or)                               \
383   V(repmovs)                          \
384   V(sbb)                              \
385   V(sub)                              \
386   V(test)                             \
387   V(xchg)                             \
388   V(xor)
389 
390 // Shift instructions on operands/registers with kPointerSize, kInt32Size and
391 // kInt64Size.
392 #define SHIFT_INSTRUCTION_LIST(V) \
393   V(rol, 0x0)                     \
394   V(ror, 0x1)                     \
395   V(rcl, 0x2)                     \
396   V(rcr, 0x3)                     \
397   V(shl, 0x4)                     \
398   V(shr, 0x5)                     \
399   V(sar, 0x7)
400 
401 // Partial Constant Pool
402 // Different from complete constant pool (like arm does), partial constant pool
403 // only takes effects for shareable constants in order to reduce code size.
404 // Partial constant pool does not emit constant pool entries at the end of each
405 // code object. Instead, it keeps the first shareable constant inlined in the
406 // instructions and uses rip-relative memory loadings for the same constants in
407 // subsequent instructions. These rip-relative memory loadings will target at
408 // the position of the first inlined constant. For example:
409 //
410 //  REX.W movq r10,0x7f9f75a32c20   ; 10 bytes
411 //  …
412 //  REX.W movq r10,0x7f9f75a32c20   ; 10 bytes
413 //  …
414 //
415 // turns into
416 //
417 //  REX.W movq r10,0x7f9f75a32c20   ; 10 bytes
418 //  …
419 //  REX.W movq r10,[rip+0xffffff96] ; 7 bytes
420 //  …
421 
422 class ConstPool {
423  public:
ConstPool(Assembler * assm)424   explicit ConstPool(Assembler* assm) : assm_(assm) {}
425   // Returns true when partial constant pool is valid for this entry.
426   bool TryRecordEntry(intptr_t data, RelocInfo::Mode mode);
IsEmpty()427   bool IsEmpty() const { return entries_.empty(); }
428 
429   void PatchEntries();
430   // Discard any pending pool entries.
431   void Clear();
432 
433  private:
434   // Adds a shared entry to entries_. Returns true if this is not the first time
435   // we add this entry, false otherwise.
436   bool AddSharedEntry(uint64_t data, int offset);
437 
438   // Check if the instruction is a rip-relative move.
439   bool IsMoveRipRelative(byte* instr);
440 
441   Assembler* assm_;
442 
443   // Values, pc offsets of entries.
444   typedef std::multimap<uint64_t, int> EntryMap;
445   EntryMap entries_;
446 
447   // Number of bytes taken up by the displacement of rip-relative addressing.
448   static constexpr int kRipRelativeDispSize = 4;  // 32-bit displacement.
449   // Distance between the address of the displacement in the rip-relative move
450   // instruction and the head address of the instruction.
451   static constexpr int kMoveRipRelativeDispOffset =
452       3;  // REX Opcode ModRM Displacement
453   // Distance between the address of the imm64 in the 'movq reg, imm64'
454   // instruction and the head address of the instruction.
455   static constexpr int kMoveImm64Offset = 2;  // REX Opcode imm64
456   // A mask for rip-relative move instruction.
457   static constexpr uint32_t kMoveRipRelativeMask = 0x00C7FFFB;
458   // The bits for a rip-relative move instruction after mask.
459   static constexpr uint32_t kMoveRipRelativeInstr = 0x00058B48;
460 };
461 
462 class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
463  private:
464   // We check before assembling an instruction that there is sufficient
465   // space to write an instruction and its relocation information.
466   // The relocation writer's position must be kGap bytes above the end of
467   // the generated instructions. This leaves enough space for the
468   // longest possible x64 instruction, 15 bytes, and the longest possible
469   // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
470   // (There is a 15 byte limit on x64 instruction length that rules out some
471   // otherwise valid instructions.)
472   // This allows for a single, fast space check per instruction.
473   static constexpr int kGap = 32;
474 
475  public:
476   // Create an assembler. Instructions and relocation information are emitted
477   // into a buffer, with the instructions starting from the beginning and the
478   // relocation information starting from the end of the buffer. See CodeDesc
479   // for a detailed comment on the layout (globals.h).
480   //
481   // If the provided buffer is nullptr, the assembler allocates and grows its
482   // own buffer, and buffer_size determines the initial buffer size. The buffer
483   // is owned by the assembler and deallocated upon destruction of the
484   // assembler.
485   //
486   // If the provided buffer is not nullptr, the assembler uses the provided
487   // buffer for code generation and assumes its size to be buffer_size. If the
488   // buffer is too small, a fatal error occurs. No deallocation of the buffer is
489   // done upon destruction of the assembler.
490   Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
~Assembler()491   virtual ~Assembler() {}
492 
493   // GetCode emits any pending (non-emitted) code and fills the descriptor
494   // desc. GetCode() is idempotent; it returns the same result if no other
495   // Assembler functions are invoked in between GetCode() calls.
496   void GetCode(Isolate* isolate, CodeDesc* desc);
497 
498   // Read/Modify the code target in the relative branch/call instruction at pc.
499   // On the x64 architecture, we use relative jumps with a 32-bit displacement
500   // to jump to other Code objects in the Code space in the heap.
501   // Jumps to C functions are done indirectly through a 64-bit register holding
502   // the absolute address of the target.
503   // These functions convert between absolute Addresses of Code objects and
504   // the relative displacements stored in the code.
505   // The isolate argument is unused (and may be nullptr) when skipping flushing.
506   static inline Address target_address_at(Address pc, Address constant_pool);
507   static inline void set_target_address_at(
508       Address pc, Address constant_pool, Address target,
509       ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
510 
511   // Return the code target address at a call site from the return address
512   // of that call in the instruction stream.
513   static inline Address target_address_from_return_address(Address pc);
514 
515   // This sets the branch destination (which is in the instruction on x64).
516   // This is for calls and branches within generated code.
517   inline static void deserialization_set_special_target_at(
518       Address instruction_payload, Code* code, Address target);
519 
520   // Get the size of the special target encoded at 'instruction_payload'.
521   inline static int deserialization_special_target_size(
522       Address instruction_payload);
523 
524   // This sets the internal reference at the pc.
525   inline static void deserialization_set_target_internal_reference_at(
526       Address pc, Address target,
527       RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
528 
529   inline Handle<Code> code_target_object_handle_at(Address pc);
530   inline Address runtime_entry_at(Address pc);
531 
532   // Number of bytes taken up by the branch target in the code.
533   static constexpr int kSpecialTargetSize = 4;  // 32-bit displacement.
534   // Distance between the address of the code target in the call instruction
535   // and the return address pushed on the stack.
536   static constexpr int kCallTargetAddressOffset = 4;  // 32-bit displacement.
537   // The length of call(kScratchRegister).
538   static constexpr int kCallScratchRegisterInstructionLength = 3;
539   // The length of call(Immediate32).
540   static constexpr int kShortCallInstructionLength = 5;
541   // The length of movq(kScratchRegister, address).
542   static constexpr int kMoveAddressIntoScratchRegisterInstructionLength =
543       2 + kPointerSize;
544   // The length of movq(kScratchRegister, address) and call(kScratchRegister).
545   static constexpr int kCallSequenceLength =
546       kMoveAddressIntoScratchRegisterInstructionLength +
547       kCallScratchRegisterInstructionLength;
548 
549   // One byte opcode for test eax,0xXXXXXXXX.
550   static constexpr byte kTestEaxByte = 0xA9;
551   // One byte opcode for test al, 0xXX.
552   static constexpr byte kTestAlByte = 0xA8;
553   // One byte opcode for nop.
554   static constexpr byte kNopByte = 0x90;
555 
556   // One byte prefix for a short conditional jump.
557   static constexpr byte kJccShortPrefix = 0x70;
558   static constexpr byte kJncShortOpcode = kJccShortPrefix | not_carry;
559   static constexpr byte kJcShortOpcode = kJccShortPrefix | carry;
560   static constexpr byte kJnzShortOpcode = kJccShortPrefix | not_zero;
561   static constexpr byte kJzShortOpcode = kJccShortPrefix | zero;
562 
563   // VEX prefix encodings.
564   enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
565   enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128, kLZ = kL128 };
566   enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
567   enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x3 };
568 
569   // ---------------------------------------------------------------------------
570   // Code generation
571   //
572   // Function names correspond one-to-one to x64 instruction mnemonics.
573   // Unless specified otherwise, instructions operate on 64-bit operands.
574   //
575   // If we need versions of an assembly instruction that operate on different
576   // width arguments, we add a single-letter suffix specifying the width.
577   // This is done for the following instructions: mov, cmp, inc, dec,
578   // add, sub, and test.
579   // There are no versions of these instructions without the suffix.
580   // - Instructions on 8-bit (byte) operands/registers have a trailing 'b'.
581   // - Instructions on 16-bit (word) operands/registers have a trailing 'w'.
582   // - Instructions on 32-bit (doubleword) operands/registers use 'l'.
583   // - Instructions on 64-bit (quadword) operands/registers use 'q'.
584   // - Instructions on operands/registers with pointer size use 'p'.
585 
586   STATIC_ASSERT(kPointerSize == kInt64Size || kPointerSize == kInt32Size);
587 
588 #define DECLARE_INSTRUCTION(instruction)                \
589   template<class P1>                                    \
590   void instruction##p(P1 p1) {                          \
591     emit_##instruction(p1, kPointerSize);               \
592   }                                                     \
593                                                         \
594   template<class P1>                                    \
595   void instruction##l(P1 p1) {                          \
596     emit_##instruction(p1, kInt32Size);                 \
597   }                                                     \
598                                                         \
599   template<class P1>                                    \
600   void instruction##q(P1 p1) {                          \
601     emit_##instruction(p1, kInt64Size);                 \
602   }                                                     \
603                                                         \
604   template<class P1, class P2>                          \
605   void instruction##p(P1 p1, P2 p2) {                   \
606     emit_##instruction(p1, p2, kPointerSize);           \
607   }                                                     \
608                                                         \
609   template<class P1, class P2>                          \
610   void instruction##l(P1 p1, P2 p2) {                   \
611     emit_##instruction(p1, p2, kInt32Size);             \
612   }                                                     \
613                                                         \
614   template<class P1, class P2>                          \
615   void instruction##q(P1 p1, P2 p2) {                   \
616     emit_##instruction(p1, p2, kInt64Size);             \
617   }                                                     \
618                                                         \
619   template<class P1, class P2, class P3>                \
620   void instruction##p(P1 p1, P2 p2, P3 p3) {            \
621     emit_##instruction(p1, p2, p3, kPointerSize);       \
622   }                                                     \
623                                                         \
624   template<class P1, class P2, class P3>                \
625   void instruction##l(P1 p1, P2 p2, P3 p3) {            \
626     emit_##instruction(p1, p2, p3, kInt32Size);         \
627   }                                                     \
628                                                         \
629   template<class P1, class P2, class P3>                \
630   void instruction##q(P1 p1, P2 p2, P3 p3) {            \
631     emit_##instruction(p1, p2, p3, kInt64Size);         \
632   }
633   ASSEMBLER_INSTRUCTION_LIST(DECLARE_INSTRUCTION)
634 #undef DECLARE_INSTRUCTION
635 
636   // Insert the smallest number of nop instructions
637   // possible to align the pc offset to a multiple
638   // of m, where m must be a power of 2.
639   void Align(int m);
640   // Insert the smallest number of zero bytes possible to align the pc offset
641   // to a mulitple of m. m must be a power of 2 (>= 2).
642   void DataAlign(int m);
643   void Nop(int bytes = 1);
644   // Aligns code to something that's optimal for a jump target for the platform.
645   void CodeTargetAlign();
646 
647   // Stack
648   void pushfq();
649   void popfq();
650 
651   void pushq(Immediate value);
652   // Push a 32 bit integer, and guarantee that it is actually pushed as a
653   // 32 bit value, the normal push will optimize the 8 bit case.
654   void pushq_imm32(int32_t imm32);
655   void pushq(Register src);
656   void pushq(Operand src);
657 
658   void popq(Register dst);
659   void popq(Operand dst);
660 
661   void enter(Immediate size);
662   void leave();
663 
664   // Moves
665   void movb(Register dst, Operand src);
666   void movb(Register dst, Immediate imm);
667   void movb(Operand dst, Register src);
668   void movb(Operand dst, Immediate imm);
669 
670   // Move the low 16 bits of a 64-bit register value to a 16-bit
671   // memory location.
672   void movw(Register dst, Operand src);
673   void movw(Operand dst, Register src);
674   void movw(Operand dst, Immediate imm);
675 
676   // Move the offset of the label location relative to the current
677   // position (after the move) to the destination.
678   void movl(Operand dst, Label* src);
679 
680   // Loads a pointer into a register with a relocation mode.
681   void movp(Register dst, Address ptr, RelocInfo::Mode rmode);
682 
683   // Load a heap number into a register.
684   // The heap number will not be allocated and embedded into the code right
685   // away. Instead, we emit the load of a dummy object. Later, when calling
686   // Assembler::GetCode, the heap number will be allocated and the code will be
687   // patched by replacing the dummy with the actual object. The RelocInfo for
688   // the embedded object gets already recorded correctly when emitting the dummy
689   // move.
690   void movp_heap_number(Register dst, double value);
691 
692   // Loads a 64-bit immediate into a register.
693   void movq(Register dst, int64_t value,
694             RelocInfo::Mode rmode = RelocInfo::NONE);
695   void movq(Register dst, uint64_t value,
696             RelocInfo::Mode rmode = RelocInfo::NONE);
697 
698   void movsxbl(Register dst, Register src);
699   void movsxbl(Register dst, Operand src);
700   void movsxbq(Register dst, Register src);
701   void movsxbq(Register dst, Operand src);
702   void movsxwl(Register dst, Register src);
703   void movsxwl(Register dst, Operand src);
704   void movsxwq(Register dst, Register src);
705   void movsxwq(Register dst, Operand src);
706   void movsxlq(Register dst, Register src);
707   void movsxlq(Register dst, Operand src);
708 
709   // Repeated moves.
710 
711   void repmovsb();
712   void repmovsw();
repmovsp()713   void repmovsp() { emit_repmovs(kPointerSize); }
repmovsl()714   void repmovsl() { emit_repmovs(kInt32Size); }
repmovsq()715   void repmovsq() { emit_repmovs(kInt64Size); }
716 
717   // Instruction to load from an immediate 64-bit pointer into RAX.
718   void load_rax(Address value, RelocInfo::Mode rmode);
719   void load_rax(ExternalReference ext);
720 
721   // Conditional moves.
722   void cmovq(Condition cc, Register dst, Register src);
723   void cmovq(Condition cc, Register dst, Operand src);
724   void cmovl(Condition cc, Register dst, Register src);
725   void cmovl(Condition cc, Register dst, Operand src);
726 
cmpb(Register dst,Immediate src)727   void cmpb(Register dst, Immediate src) {
728     immediate_arithmetic_op_8(0x7, dst, src);
729   }
730 
731   void cmpb_al(Immediate src);
732 
cmpb(Register dst,Register src)733   void cmpb(Register dst, Register src) {
734     arithmetic_op_8(0x3A, dst, src);
735   }
736 
cmpb(Register dst,Operand src)737   void cmpb(Register dst, Operand src) { arithmetic_op_8(0x3A, dst, src); }
738 
cmpb(Operand dst,Register src)739   void cmpb(Operand dst, Register src) { arithmetic_op_8(0x38, src, dst); }
740 
cmpb(Operand dst,Immediate src)741   void cmpb(Operand dst, Immediate src) {
742     immediate_arithmetic_op_8(0x7, dst, src);
743   }
744 
cmpw(Operand dst,Immediate src)745   void cmpw(Operand dst, Immediate src) {
746     immediate_arithmetic_op_16(0x7, dst, src);
747   }
748 
cmpw(Register dst,Immediate src)749   void cmpw(Register dst, Immediate src) {
750     immediate_arithmetic_op_16(0x7, dst, src);
751   }
752 
cmpw(Register dst,Operand src)753   void cmpw(Register dst, Operand src) { arithmetic_op_16(0x3B, dst, src); }
754 
cmpw(Register dst,Register src)755   void cmpw(Register dst, Register src) {
756     arithmetic_op_16(0x3B, dst, src);
757   }
758 
cmpw(Operand dst,Register src)759   void cmpw(Operand dst, Register src) { arithmetic_op_16(0x39, src, dst); }
760 
testb(Register reg,Operand op)761   void testb(Register reg, Operand op) { testb(op, reg); }
762 
testw(Register reg,Operand op)763   void testw(Register reg, Operand op) { testw(op, reg); }
764 
andb(Register dst,Immediate src)765   void andb(Register dst, Immediate src) {
766     immediate_arithmetic_op_8(0x4, dst, src);
767   }
768 
769   void decb(Register dst);
770   void decb(Operand dst);
771 
772   // Lock prefix.
773   void lock();
774 
775   void xchgb(Register reg, Operand op);
776   void xchgw(Register reg, Operand op);
777 
778   void cmpxchgb(Operand dst, Register src);
779   void cmpxchgw(Operand dst, Register src);
780 
781   // Sign-extends rax into rdx:rax.
782   void cqo();
783   // Sign-extends eax into edx:eax.
784   void cdq();
785 
786   // Multiply eax by src, put the result in edx:eax.
787   void mull(Register src);
788   void mull(Operand src);
789   // Multiply rax by src, put the result in rdx:rax.
790   void mulq(Register src);
791 
792 #define DECLARE_SHIFT_INSTRUCTION(instruction, subcode)                       \
793   void instruction##p(Register dst, Immediate imm8) {                         \
794     shift(dst, imm8, subcode, kPointerSize);                                  \
795   }                                                                           \
796                                                                               \
797   void instruction##l(Register dst, Immediate imm8) {                         \
798     shift(dst, imm8, subcode, kInt32Size);                                    \
799   }                                                                           \
800                                                                               \
801   void instruction##q(Register dst, Immediate imm8) {                         \
802     shift(dst, imm8, subcode, kInt64Size);                                    \
803   }                                                                           \
804                                                                               \
805   void instruction##p(Operand dst, Immediate imm8) {                          \
806     shift(dst, imm8, subcode, kPointerSize);                                  \
807   }                                                                           \
808                                                                               \
809   void instruction##l(Operand dst, Immediate imm8) {                          \
810     shift(dst, imm8, subcode, kInt32Size);                                    \
811   }                                                                           \
812                                                                               \
813   void instruction##q(Operand dst, Immediate imm8) {                          \
814     shift(dst, imm8, subcode, kInt64Size);                                    \
815   }                                                                           \
816                                                                               \
817   void instruction##p_cl(Register dst) { shift(dst, subcode, kPointerSize); } \
818                                                                               \
819   void instruction##l_cl(Register dst) { shift(dst, subcode, kInt32Size); }   \
820                                                                               \
821   void instruction##q_cl(Register dst) { shift(dst, subcode, kInt64Size); }   \
822                                                                               \
823   void instruction##p_cl(Operand dst) { shift(dst, subcode, kPointerSize); }  \
824                                                                               \
825   void instruction##l_cl(Operand dst) { shift(dst, subcode, kInt32Size); }    \
826                                                                               \
827   void instruction##q_cl(Operand dst) { shift(dst, subcode, kInt64Size); }
828   SHIFT_INSTRUCTION_LIST(DECLARE_SHIFT_INSTRUCTION)
829 #undef DECLARE_SHIFT_INSTRUCTION
830 
831   // Shifts dst:src left by cl bits, affecting only dst.
832   void shld(Register dst, Register src);
833 
834   // Shifts src:dst right by cl bits, affecting only dst.
835   void shrd(Register dst, Register src);
836 
837   void store_rax(Address dst, RelocInfo::Mode mode);
838   void store_rax(ExternalReference ref);
839 
subb(Register dst,Immediate src)840   void subb(Register dst, Immediate src) {
841     immediate_arithmetic_op_8(0x5, dst, src);
842   }
843 
844   void sub_sp_32(uint32_t imm);
845 
846   void testb(Register dst, Register src);
847   void testb(Register reg, Immediate mask);
848   void testb(Operand op, Immediate mask);
849   void testb(Operand op, Register reg);
850 
851   void testw(Register dst, Register src);
852   void testw(Register reg, Immediate mask);
853   void testw(Operand op, Immediate mask);
854   void testw(Operand op, Register reg);
855 
856   // Bit operations.
857   void bswapl(Register dst);
858   void bswapq(Register dst);
859   void bt(Operand dst, Register src);
860   void bts(Operand dst, Register src);
861   void bsrq(Register dst, Register src);
862   void bsrq(Register dst, Operand src);
863   void bsrl(Register dst, Register src);
864   void bsrl(Register dst, Operand src);
865   void bsfq(Register dst, Register src);
866   void bsfq(Register dst, Operand src);
867   void bsfl(Register dst, Register src);
868   void bsfl(Register dst, Operand src);
869 
870   // Miscellaneous
871   void clc();
872   void cld();
873   void cpuid();
874   void hlt();
875   void int3();
876   void nop();
877   void ret(int imm16);
878   void ud2();
879   void setcc(Condition cc, Register reg);
880 
881   void pshufw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
882   void pshufw(XMMRegister dst, Operand src, uint8_t shuffle);
883   void pblendw(XMMRegister dst, Operand src, uint8_t mask);
884   void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
885   void palignr(XMMRegister dst, Operand src, uint8_t mask);
886   void palignr(XMMRegister dst, XMMRegister src, uint8_t mask);
887 
888   // Label operations & relative jumps (PPUM Appendix D)
889   //
890   // Takes a branch opcode (cc) and a label (L) and generates
891   // either a backward branch or a forward branch and links it
892   // to the label fixup chain. Usage:
893   //
894   // Label L;    // unbound label
895   // j(cc, &L);  // forward branch to unbound label
896   // bind(&L);   // bind label to the current pc
897   // j(cc, &L);  // backward branch to bound label
898   // bind(&L);   // illegal: a label may be bound only once
899   //
900   // Note: The same Label can be used for forward and backward branches
901   // but it may be bound only once.
902 
903   void bind(Label* L);  // binds an unbound label L to the current code position
904 
905   // Calls
906   // Call near relative 32-bit displacement, relative to next instruction.
907   void call(Label* L);
908   void call(Address entry, RelocInfo::Mode rmode);
909   void near_call(Address entry, RelocInfo::Mode rmode);
910   void near_jmp(Address entry, RelocInfo::Mode rmode);
911   void call(CodeStub* stub);
912   void call(Handle<Code> target,
913             RelocInfo::Mode rmode = RelocInfo::CODE_TARGET);
914 
915   // Calls directly to the given address using a relative offset.
916   // Should only ever be used in Code objects for calls within the
917   // same Code object. Should not be used when generating new code (use labels),
918   // but only when patching existing code.
919   void call(Address target);
920 
921   // Call near absolute indirect, address in register
922   void call(Register adr);
923 
924   // Jumps
925   // Jump short or near relative.
926   // Use a 32-bit signed displacement.
927   // Unconditional jump to L
928   void jmp(Label* L, Label::Distance distance = Label::kFar);
929   void jmp(Handle<Code> target, RelocInfo::Mode rmode);
930 
931   // Jump near absolute indirect (r64)
932   void jmp(Register adr);
933   void jmp(Operand src);
934 
935   // Conditional jumps
936   void j(Condition cc,
937          Label* L,
938          Label::Distance distance = Label::kFar);
939   void j(Condition cc, Address entry, RelocInfo::Mode rmode);
940   void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode);
941 
942   // Floating-point operations
943   void fld(int i);
944 
945   void fld1();
946   void fldz();
947   void fldpi();
948   void fldln2();
949 
950   void fld_s(Operand adr);
951   void fld_d(Operand adr);
952 
953   void fstp_s(Operand adr);
954   void fstp_d(Operand adr);
955   void fstp(int index);
956 
957   void fild_s(Operand adr);
958   void fild_d(Operand adr);
959 
960   void fist_s(Operand adr);
961 
962   void fistp_s(Operand adr);
963   void fistp_d(Operand adr);
964 
965   void fisttp_s(Operand adr);
966   void fisttp_d(Operand adr);
967 
968   void fabs();
969   void fchs();
970 
971   void fadd(int i);
972   void fsub(int i);
973   void fmul(int i);
974   void fdiv(int i);
975 
976   void fisub_s(Operand adr);
977 
978   void faddp(int i = 1);
979   void fsubp(int i = 1);
980   void fsubrp(int i = 1);
981   void fmulp(int i = 1);
982   void fdivp(int i = 1);
983   void fprem();
984   void fprem1();
985 
986   void fxch(int i = 1);
987   void fincstp();
988   void ffree(int i = 0);
989 
990   void ftst();
991   void fucomp(int i);
992   void fucompp();
993   void fucomi(int i);
994   void fucomip();
995 
996   void fcompp();
997   void fnstsw_ax();
998   void fwait();
999   void fnclex();
1000 
1001   void fsin();
1002   void fcos();
1003   void fptan();
1004   void fyl2x();
1005   void f2xm1();
1006   void fscale();
1007   void fninit();
1008 
1009   void frndint();
1010 
1011   void sahf();
1012 
1013   // SSE instructions
1014   void addss(XMMRegister dst, XMMRegister src);
1015   void addss(XMMRegister dst, Operand src);
1016   void subss(XMMRegister dst, XMMRegister src);
1017   void subss(XMMRegister dst, Operand src);
1018   void mulss(XMMRegister dst, XMMRegister src);
1019   void mulss(XMMRegister dst, Operand src);
1020   void divss(XMMRegister dst, XMMRegister src);
1021   void divss(XMMRegister dst, Operand src);
1022 
1023   void maxss(XMMRegister dst, XMMRegister src);
1024   void maxss(XMMRegister dst, Operand src);
1025   void minss(XMMRegister dst, XMMRegister src);
1026   void minss(XMMRegister dst, Operand src);
1027 
1028   void sqrtss(XMMRegister dst, XMMRegister src);
1029   void sqrtss(XMMRegister dst, Operand src);
1030 
1031   void ucomiss(XMMRegister dst, XMMRegister src);
1032   void ucomiss(XMMRegister dst, Operand src);
1033   void movaps(XMMRegister dst, XMMRegister src);
1034 
1035   // Don't use this unless it's important to keep the
1036   // top half of the destination register unchanged.
1037   // Use movaps when moving float values and movd for integer
1038   // values in xmm registers.
1039   void movss(XMMRegister dst, XMMRegister src);
1040 
1041   void movss(XMMRegister dst, Operand src);
1042   void movss(Operand dst, XMMRegister src);
1043   void shufps(XMMRegister dst, XMMRegister src, byte imm8);
1044 
1045   void cvttss2si(Register dst, Operand src);
1046   void cvttss2si(Register dst, XMMRegister src);
1047   void cvtlsi2ss(XMMRegister dst, Operand src);
1048   void cvtlsi2ss(XMMRegister dst, Register src);
1049 
1050   void andps(XMMRegister dst, XMMRegister src);
1051   void andps(XMMRegister dst, Operand src);
1052   void orps(XMMRegister dst, XMMRegister src);
1053   void orps(XMMRegister dst, Operand src);
1054   void xorps(XMMRegister dst, XMMRegister src);
1055   void xorps(XMMRegister dst, Operand src);
1056 
1057   void addps(XMMRegister dst, XMMRegister src);
1058   void addps(XMMRegister dst, Operand src);
1059   void subps(XMMRegister dst, XMMRegister src);
1060   void subps(XMMRegister dst, Operand src);
1061   void mulps(XMMRegister dst, XMMRegister src);
1062   void mulps(XMMRegister dst, Operand src);
1063   void divps(XMMRegister dst, XMMRegister src);
1064   void divps(XMMRegister dst, Operand src);
1065 
1066   void movmskps(Register dst, XMMRegister src);
1067 
1068   void vinstr(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
1069               SIMDPrefix pp, LeadingOpcode m, VexW w);
1070   void vinstr(byte op, XMMRegister dst, XMMRegister src1, Operand src2,
1071               SIMDPrefix pp, LeadingOpcode m, VexW w);
1072 
1073   // SSE2 instructions
1074   void sse2_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape,
1075                   byte opcode);
1076   void sse2_instr(XMMRegister dst, Operand src, byte prefix, byte escape,
1077                   byte opcode);
1078 #define DECLARE_SSE2_INSTRUCTION(instruction, prefix, escape, opcode) \
1079   void instruction(XMMRegister dst, XMMRegister src) {                \
1080     sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode);         \
1081   }                                                                   \
1082   void instruction(XMMRegister dst, Operand src) {                    \
1083     sse2_instr(dst, src, 0x##prefix, 0x##escape, 0x##opcode);         \
1084   }
1085 
1086   SSE2_INSTRUCTION_LIST(DECLARE_SSE2_INSTRUCTION)
1087 #undef DECLARE_SSE2_INSTRUCTION
1088 
1089 #define DECLARE_SSE2_AVX_INSTRUCTION(instruction, prefix, escape, opcode)    \
1090   void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1091     vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0);          \
1092   }                                                                          \
1093   void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) {     \
1094     vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape, kW0);          \
1095   }
1096 
1097   SSE2_INSTRUCTION_LIST(DECLARE_SSE2_AVX_INSTRUCTION)
1098 #undef DECLARE_SSE2_AVX_INSTRUCTION
1099 
1100   // SSE3
1101   void lddqu(XMMRegister dst, Operand src);
1102 
1103   // SSSE3
1104   void ssse3_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
1105                    byte escape2, byte opcode);
1106   void ssse3_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
1107                    byte escape2, byte opcode);
1108 
1109 #define DECLARE_SSSE3_INSTRUCTION(instruction, prefix, escape1, escape2,     \
1110                                   opcode)                                    \
1111   void instruction(XMMRegister dst, XMMRegister src) {                       \
1112     ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1113   }                                                                          \
1114   void instruction(XMMRegister dst, Operand src) {                           \
1115     ssse3_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1116   }
1117 
1118   SSSE3_INSTRUCTION_LIST(DECLARE_SSSE3_INSTRUCTION)
1119 #undef DECLARE_SSSE3_INSTRUCTION
1120 
1121   // SSE4
1122   void sse4_instr(XMMRegister dst, XMMRegister src, byte prefix, byte escape1,
1123                   byte escape2, byte opcode);
1124   void sse4_instr(XMMRegister dst, Operand src, byte prefix, byte escape1,
1125                   byte escape2, byte opcode);
1126 #define DECLARE_SSE4_INSTRUCTION(instruction, prefix, escape1, escape2,     \
1127                                  opcode)                                    \
1128   void instruction(XMMRegister dst, XMMRegister src) {                      \
1129     sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1130   }                                                                         \
1131   void instruction(XMMRegister dst, Operand src) {                          \
1132     sse4_instr(dst, src, 0x##prefix, 0x##escape1, 0x##escape2, 0x##opcode); \
1133   }
1134 
1135   SSE4_INSTRUCTION_LIST(DECLARE_SSE4_INSTRUCTION)
1136 #undef DECLARE_SSE4_INSTRUCTION
1137 
1138 #define DECLARE_SSE34_AVX_INSTRUCTION(instruction, prefix, escape1, escape2,  \
1139                                       opcode)                                 \
1140   void v##instruction(XMMRegister dst, XMMRegister src1, XMMRegister src2) {  \
1141     vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1142   }                                                                           \
1143   void v##instruction(XMMRegister dst, XMMRegister src1, Operand src2) {      \
1144     vinstr(0x##opcode, dst, src1, src2, k##prefix, k##escape1##escape2, kW0); \
1145   }
1146 
1147   SSSE3_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
1148   SSE4_INSTRUCTION_LIST(DECLARE_SSE34_AVX_INSTRUCTION)
1149 #undef DECLARE_SSE34_AVX_INSTRUCTION
1150 
1151   void movd(XMMRegister dst, Register src);
1152   void movd(XMMRegister dst, Operand src);
1153   void movd(Register dst, XMMRegister src);
1154   void movq(XMMRegister dst, Register src);
1155   void movq(Register dst, XMMRegister src);
1156   void movq(XMMRegister dst, XMMRegister src);
1157 
1158   // Don't use this unless it's important to keep the
1159   // top half of the destination register unchanged.
1160   // Use movapd when moving double values and movq for integer
1161   // values in xmm registers.
1162   void movsd(XMMRegister dst, XMMRegister src);
1163 
1164   void movsd(Operand dst, XMMRegister src);
1165   void movsd(XMMRegister dst, Operand src);
1166 
1167   void movdqa(Operand dst, XMMRegister src);
1168   void movdqa(XMMRegister dst, Operand src);
1169 
1170   void movdqu(Operand dst, XMMRegister src);
1171   void movdqu(XMMRegister dst, Operand src);
1172 
1173   void movapd(XMMRegister dst, XMMRegister src);
1174   void movupd(XMMRegister dst, Operand src);
1175   void movupd(Operand dst, XMMRegister src);
1176 
1177   void psllq(XMMRegister reg, byte imm8);
1178   void psrlq(XMMRegister reg, byte imm8);
1179   void psllw(XMMRegister reg, byte imm8);
1180   void pslld(XMMRegister reg, byte imm8);
1181   void psrlw(XMMRegister reg, byte imm8);
1182   void psrld(XMMRegister reg, byte imm8);
1183   void psraw(XMMRegister reg, byte imm8);
1184   void psrad(XMMRegister reg, byte imm8);
1185 
1186   void cvttsd2si(Register dst, Operand src);
1187   void cvttsd2si(Register dst, XMMRegister src);
1188   void cvttss2siq(Register dst, XMMRegister src);
1189   void cvttss2siq(Register dst, Operand src);
1190   void cvttsd2siq(Register dst, XMMRegister src);
1191   void cvttsd2siq(Register dst, Operand src);
1192   void cvttps2dq(XMMRegister dst, Operand src);
1193   void cvttps2dq(XMMRegister dst, XMMRegister src);
1194 
1195   void cvtlsi2sd(XMMRegister dst, Operand src);
1196   void cvtlsi2sd(XMMRegister dst, Register src);
1197 
1198   void cvtqsi2ss(XMMRegister dst, Operand src);
1199   void cvtqsi2ss(XMMRegister dst, Register src);
1200 
1201   void cvtqsi2sd(XMMRegister dst, Operand src);
1202   void cvtqsi2sd(XMMRegister dst, Register src);
1203 
1204 
1205   void cvtss2sd(XMMRegister dst, XMMRegister src);
1206   void cvtss2sd(XMMRegister dst, Operand src);
1207   void cvtsd2ss(XMMRegister dst, XMMRegister src);
1208   void cvtsd2ss(XMMRegister dst, Operand src);
1209 
1210   void cvtsd2si(Register dst, XMMRegister src);
1211   void cvtsd2siq(Register dst, XMMRegister src);
1212 
1213   void addsd(XMMRegister dst, XMMRegister src);
1214   void addsd(XMMRegister dst, Operand src);
1215   void subsd(XMMRegister dst, XMMRegister src);
1216   void subsd(XMMRegister dst, Operand src);
1217   void mulsd(XMMRegister dst, XMMRegister src);
1218   void mulsd(XMMRegister dst, Operand src);
1219   void divsd(XMMRegister dst, XMMRegister src);
1220   void divsd(XMMRegister dst, Operand src);
1221 
1222   void maxsd(XMMRegister dst, XMMRegister src);
1223   void maxsd(XMMRegister dst, Operand src);
1224   void minsd(XMMRegister dst, XMMRegister src);
1225   void minsd(XMMRegister dst, Operand src);
1226 
1227   void andpd(XMMRegister dst, XMMRegister src);
1228   void andpd(XMMRegister dst, Operand src);
1229   void orpd(XMMRegister dst, XMMRegister src);
1230   void orpd(XMMRegister dst, Operand src);
1231   void xorpd(XMMRegister dst, XMMRegister src);
1232   void xorpd(XMMRegister dst, Operand src);
1233   void sqrtsd(XMMRegister dst, XMMRegister src);
1234   void sqrtsd(XMMRegister dst, Operand src);
1235 
1236   void haddps(XMMRegister dst, XMMRegister src);
1237   void haddps(XMMRegister dst, Operand src);
1238 
1239   void ucomisd(XMMRegister dst, XMMRegister src);
1240   void ucomisd(XMMRegister dst, Operand src);
1241   void cmpltsd(XMMRegister dst, XMMRegister src);
1242 
1243   void movmskpd(Register dst, XMMRegister src);
1244 
1245   // SSE 4.1 instruction
1246   void insertps(XMMRegister dst, XMMRegister src, byte imm8);
1247   void extractps(Register dst, XMMRegister src, byte imm8);
1248   void pextrb(Register dst, XMMRegister src, int8_t imm8);
1249   void pextrb(Operand dst, XMMRegister src, int8_t imm8);
1250   void pextrw(Register dst, XMMRegister src, int8_t imm8);
1251   void pextrw(Operand dst, XMMRegister src, int8_t imm8);
1252   void pextrd(Register dst, XMMRegister src, int8_t imm8);
1253   void pextrd(Operand dst, XMMRegister src, int8_t imm8);
1254   void pinsrb(XMMRegister dst, Register src, int8_t imm8);
1255   void pinsrb(XMMRegister dst, Operand src, int8_t imm8);
1256   void pinsrw(XMMRegister dst, Register src, int8_t imm8);
1257   void pinsrw(XMMRegister dst, Operand src, int8_t imm8);
1258   void pinsrd(XMMRegister dst, Register src, int8_t imm8);
1259   void pinsrd(XMMRegister dst, Operand src, int8_t imm8);
1260 
1261   void roundss(XMMRegister dst, XMMRegister src, RoundingMode mode);
1262   void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
1263 
1264   void cmpps(XMMRegister dst, XMMRegister src, int8_t cmp);
1265   void cmpps(XMMRegister dst, Operand src, int8_t cmp);
1266   void cmppd(XMMRegister dst, XMMRegister src, int8_t cmp);
1267   void cmppd(XMMRegister dst, Operand src, int8_t cmp);
1268 
1269 #define SSE_CMP_P(instr, imm8)                                                \
1270   void instr##ps(XMMRegister dst, XMMRegister src) { cmpps(dst, src, imm8); } \
1271   void instr##ps(XMMRegister dst, Operand src) { cmpps(dst, src, imm8); }     \
1272   void instr##pd(XMMRegister dst, XMMRegister src) { cmppd(dst, src, imm8); } \
1273   void instr##pd(XMMRegister dst, Operand src) { cmppd(dst, src, imm8); }
1274 
1275   SSE_CMP_P(cmpeq, 0x0);
1276   SSE_CMP_P(cmplt, 0x1);
1277   SSE_CMP_P(cmple, 0x2);
1278   SSE_CMP_P(cmpneq, 0x4);
1279   SSE_CMP_P(cmpnlt, 0x5);
1280   SSE_CMP_P(cmpnle, 0x6);
1281 
1282 #undef SSE_CMP_P
1283 
1284   void minps(XMMRegister dst, XMMRegister src);
1285   void minps(XMMRegister dst, Operand src);
1286   void maxps(XMMRegister dst, XMMRegister src);
1287   void maxps(XMMRegister dst, Operand src);
1288   void rcpps(XMMRegister dst, XMMRegister src);
1289   void rcpps(XMMRegister dst, Operand src);
1290   void rsqrtps(XMMRegister dst, XMMRegister src);
1291   void rsqrtps(XMMRegister dst, Operand src);
1292   void sqrtps(XMMRegister dst, XMMRegister src);
1293   void sqrtps(XMMRegister dst, Operand src);
1294   void movups(XMMRegister dst, XMMRegister src);
1295   void movups(XMMRegister dst, Operand src);
1296   void movups(Operand dst, XMMRegister src);
1297   void psrldq(XMMRegister dst, uint8_t shift);
1298   void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1299   void pshufd(XMMRegister dst, Operand src, uint8_t shuffle);
1300   void pshufhw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1301   void pshuflw(XMMRegister dst, XMMRegister src, uint8_t shuffle);
1302   void cvtdq2ps(XMMRegister dst, XMMRegister src);
1303   void cvtdq2ps(XMMRegister dst, Operand src);
1304 
1305   // AVX instruction
vfmadd132sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1306   void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1307     vfmasd(0x99, dst, src1, src2);
1308   }
vfmadd213sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1309   void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1310     vfmasd(0xa9, dst, src1, src2);
1311   }
vfmadd231sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1312   void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1313     vfmasd(0xb9, dst, src1, src2);
1314   }
vfmadd132sd(XMMRegister dst,XMMRegister src1,Operand src2)1315   void vfmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1316     vfmasd(0x99, dst, src1, src2);
1317   }
vfmadd213sd(XMMRegister dst,XMMRegister src1,Operand src2)1318   void vfmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1319     vfmasd(0xa9, dst, src1, src2);
1320   }
vfmadd231sd(XMMRegister dst,XMMRegister src1,Operand src2)1321   void vfmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1322     vfmasd(0xb9, dst, src1, src2);
1323   }
vfmsub132sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1324   void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1325     vfmasd(0x9b, dst, src1, src2);
1326   }
vfmsub213sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1327   void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1328     vfmasd(0xab, dst, src1, src2);
1329   }
vfmsub231sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1330   void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1331     vfmasd(0xbb, dst, src1, src2);
1332   }
vfmsub132sd(XMMRegister dst,XMMRegister src1,Operand src2)1333   void vfmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1334     vfmasd(0x9b, dst, src1, src2);
1335   }
vfmsub213sd(XMMRegister dst,XMMRegister src1,Operand src2)1336   void vfmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1337     vfmasd(0xab, dst, src1, src2);
1338   }
vfmsub231sd(XMMRegister dst,XMMRegister src1,Operand src2)1339   void vfmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1340     vfmasd(0xbb, dst, src1, src2);
1341   }
vfnmadd132sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1342   void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1343     vfmasd(0x9d, dst, src1, src2);
1344   }
vfnmadd213sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1345   void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1346     vfmasd(0xad, dst, src1, src2);
1347   }
vfnmadd231sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1348   void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1349     vfmasd(0xbd, dst, src1, src2);
1350   }
vfnmadd132sd(XMMRegister dst,XMMRegister src1,Operand src2)1351   void vfnmadd132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1352     vfmasd(0x9d, dst, src1, src2);
1353   }
vfnmadd213sd(XMMRegister dst,XMMRegister src1,Operand src2)1354   void vfnmadd213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1355     vfmasd(0xad, dst, src1, src2);
1356   }
vfnmadd231sd(XMMRegister dst,XMMRegister src1,Operand src2)1357   void vfnmadd231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1358     vfmasd(0xbd, dst, src1, src2);
1359   }
vfnmsub132sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1360   void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1361     vfmasd(0x9f, dst, src1, src2);
1362   }
vfnmsub213sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1363   void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1364     vfmasd(0xaf, dst, src1, src2);
1365   }
vfnmsub231sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1366   void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1367     vfmasd(0xbf, dst, src1, src2);
1368   }
vfnmsub132sd(XMMRegister dst,XMMRegister src1,Operand src2)1369   void vfnmsub132sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1370     vfmasd(0x9f, dst, src1, src2);
1371   }
vfnmsub213sd(XMMRegister dst,XMMRegister src1,Operand src2)1372   void vfnmsub213sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1373     vfmasd(0xaf, dst, src1, src2);
1374   }
vfnmsub231sd(XMMRegister dst,XMMRegister src1,Operand src2)1375   void vfnmsub231sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1376     vfmasd(0xbf, dst, src1, src2);
1377   }
1378   void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1379   void vfmasd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1380 
vfmadd132ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1381   void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1382     vfmass(0x99, dst, src1, src2);
1383   }
vfmadd213ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1384   void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1385     vfmass(0xa9, dst, src1, src2);
1386   }
vfmadd231ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1387   void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1388     vfmass(0xb9, dst, src1, src2);
1389   }
vfmadd132ss(XMMRegister dst,XMMRegister src1,Operand src2)1390   void vfmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1391     vfmass(0x99, dst, src1, src2);
1392   }
vfmadd213ss(XMMRegister dst,XMMRegister src1,Operand src2)1393   void vfmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1394     vfmass(0xa9, dst, src1, src2);
1395   }
vfmadd231ss(XMMRegister dst,XMMRegister src1,Operand src2)1396   void vfmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1397     vfmass(0xb9, dst, src1, src2);
1398   }
vfmsub132ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1399   void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1400     vfmass(0x9b, dst, src1, src2);
1401   }
vfmsub213ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1402   void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1403     vfmass(0xab, dst, src1, src2);
1404   }
vfmsub231ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1405   void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1406     vfmass(0xbb, dst, src1, src2);
1407   }
vfmsub132ss(XMMRegister dst,XMMRegister src1,Operand src2)1408   void vfmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1409     vfmass(0x9b, dst, src1, src2);
1410   }
vfmsub213ss(XMMRegister dst,XMMRegister src1,Operand src2)1411   void vfmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1412     vfmass(0xab, dst, src1, src2);
1413   }
vfmsub231ss(XMMRegister dst,XMMRegister src1,Operand src2)1414   void vfmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1415     vfmass(0xbb, dst, src1, src2);
1416   }
vfnmadd132ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1417   void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1418     vfmass(0x9d, dst, src1, src2);
1419   }
vfnmadd213ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1420   void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1421     vfmass(0xad, dst, src1, src2);
1422   }
vfnmadd231ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1423   void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1424     vfmass(0xbd, dst, src1, src2);
1425   }
vfnmadd132ss(XMMRegister dst,XMMRegister src1,Operand src2)1426   void vfnmadd132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1427     vfmass(0x9d, dst, src1, src2);
1428   }
vfnmadd213ss(XMMRegister dst,XMMRegister src1,Operand src2)1429   void vfnmadd213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1430     vfmass(0xad, dst, src1, src2);
1431   }
vfnmadd231ss(XMMRegister dst,XMMRegister src1,Operand src2)1432   void vfnmadd231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1433     vfmass(0xbd, dst, src1, src2);
1434   }
vfnmsub132ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1435   void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1436     vfmass(0x9f, dst, src1, src2);
1437   }
vfnmsub213ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1438   void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1439     vfmass(0xaf, dst, src1, src2);
1440   }
vfnmsub231ss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1441   void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1442     vfmass(0xbf, dst, src1, src2);
1443   }
vfnmsub132ss(XMMRegister dst,XMMRegister src1,Operand src2)1444   void vfnmsub132ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1445     vfmass(0x9f, dst, src1, src2);
1446   }
vfnmsub213ss(XMMRegister dst,XMMRegister src1,Operand src2)1447   void vfnmsub213ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1448     vfmass(0xaf, dst, src1, src2);
1449   }
vfnmsub231ss(XMMRegister dst,XMMRegister src1,Operand src2)1450   void vfnmsub231ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1451     vfmass(0xbf, dst, src1, src2);
1452   }
1453   void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1454   void vfmass(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1455 
1456   void vmovd(XMMRegister dst, Register src);
1457   void vmovd(XMMRegister dst, Operand src);
1458   void vmovd(Register dst, XMMRegister src);
1459   void vmovq(XMMRegister dst, Register src);
1460   void vmovq(XMMRegister dst, Operand src);
1461   void vmovq(Register dst, XMMRegister src);
1462 
vmovsd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1463   void vmovsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1464     vsd(0x10, dst, src1, src2);
1465   }
vmovsd(XMMRegister dst,Operand src)1466   void vmovsd(XMMRegister dst, Operand src) { vsd(0x10, dst, xmm0, src); }
vmovsd(Operand dst,XMMRegister src)1467   void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
1468 
1469 #define AVX_SP_3(instr, opcode) \
1470   AVX_S_3(instr, opcode)        \
1471   AVX_P_3(instr, opcode)
1472 
1473 #define AVX_S_3(instr, opcode)  \
1474   AVX_3(instr##ss, opcode, vss) \
1475   AVX_3(instr##sd, opcode, vsd)
1476 
1477 #define AVX_P_3(instr, opcode)  \
1478   AVX_3(instr##ps, opcode, vps) \
1479   AVX_3(instr##pd, opcode, vpd)
1480 
1481 #define AVX_3(instr, opcode, impl)                                  \
1482   void instr(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1483     impl(opcode, dst, src1, src2);                                  \
1484   }                                                                 \
1485   void instr(XMMRegister dst, XMMRegister src1, Operand src2) {     \
1486     impl(opcode, dst, src1, src2);                                  \
1487   }
1488 
1489   AVX_SP_3(vsqrt, 0x51);
1490   AVX_SP_3(vadd, 0x58);
1491   AVX_SP_3(vsub, 0x5c);
1492   AVX_SP_3(vmul, 0x59);
1493   AVX_SP_3(vdiv, 0x5e);
1494   AVX_SP_3(vmin, 0x5d);
1495   AVX_SP_3(vmax, 0x5f);
1496   AVX_P_3(vand, 0x54);
1497   AVX_P_3(vor, 0x56);
1498   AVX_P_3(vxor, 0x57);
1499   AVX_3(vcvtsd2ss, 0x5a, vsd);
1500   AVX_3(vhaddps, 0x7c, vsd);
1501 
1502 #undef AVX_3
1503 #undef AVX_S_3
1504 #undef AVX_P_3
1505 #undef AVX_SP_3
1506 
vpsrlq(XMMRegister dst,XMMRegister src,byte imm8)1507   void vpsrlq(XMMRegister dst, XMMRegister src, byte imm8) {
1508     vpd(0x73, xmm2, dst, src);
1509     emit(imm8);
1510   }
vpsllq(XMMRegister dst,XMMRegister src,byte imm8)1511   void vpsllq(XMMRegister dst, XMMRegister src, byte imm8) {
1512     vpd(0x73, xmm6, dst, src);
1513     emit(imm8);
1514   }
vcvtss2sd(XMMRegister dst,XMMRegister src1,XMMRegister src2)1515   void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1516     vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1517   }
vcvtss2sd(XMMRegister dst,XMMRegister src1,Operand src2)1518   void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1519     vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
1520   }
vcvtlsi2sd(XMMRegister dst,XMMRegister src1,Register src2)1521   void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1522     XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1523     vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW0);
1524   }
vcvtlsi2sd(XMMRegister dst,XMMRegister src1,Operand src2)1525   void vcvtlsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1526     vinstr(0x2a, dst, src1, src2, kF2, k0F, kW0);
1527   }
vcvtlsi2ss(XMMRegister dst,XMMRegister src1,Register src2)1528   void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1529     XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1530     vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW0);
1531   }
vcvtlsi2ss(XMMRegister dst,XMMRegister src1,Operand src2)1532   void vcvtlsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1533     vinstr(0x2a, dst, src1, src2, kF3, k0F, kW0);
1534   }
vcvtqsi2ss(XMMRegister dst,XMMRegister src1,Register src2)1535   void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Register src2) {
1536     XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1537     vinstr(0x2a, dst, src1, isrc2, kF3, k0F, kW1);
1538   }
vcvtqsi2ss(XMMRegister dst,XMMRegister src1,Operand src2)1539   void vcvtqsi2ss(XMMRegister dst, XMMRegister src1, Operand src2) {
1540     vinstr(0x2a, dst, src1, src2, kF3, k0F, kW1);
1541   }
vcvtqsi2sd(XMMRegister dst,XMMRegister src1,Register src2)1542   void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Register src2) {
1543     XMMRegister isrc2 = XMMRegister::from_code(src2.code());
1544     vinstr(0x2a, dst, src1, isrc2, kF2, k0F, kW1);
1545   }
vcvtqsi2sd(XMMRegister dst,XMMRegister src1,Operand src2)1546   void vcvtqsi2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
1547     vinstr(0x2a, dst, src1, src2, kF2, k0F, kW1);
1548   }
vcvttss2si(Register dst,XMMRegister src)1549   void vcvttss2si(Register dst, XMMRegister src) {
1550     XMMRegister idst = XMMRegister::from_code(dst.code());
1551     vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1552   }
vcvttss2si(Register dst,Operand src)1553   void vcvttss2si(Register dst, Operand src) {
1554     XMMRegister idst = XMMRegister::from_code(dst.code());
1555     vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW0);
1556   }
vcvttsd2si(Register dst,XMMRegister src)1557   void vcvttsd2si(Register dst, XMMRegister src) {
1558     XMMRegister idst = XMMRegister::from_code(dst.code());
1559     vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1560   }
vcvttsd2si(Register dst,Operand src)1561   void vcvttsd2si(Register dst, Operand src) {
1562     XMMRegister idst = XMMRegister::from_code(dst.code());
1563     vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW0);
1564   }
vcvttss2siq(Register dst,XMMRegister src)1565   void vcvttss2siq(Register dst, XMMRegister src) {
1566     XMMRegister idst = XMMRegister::from_code(dst.code());
1567     vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1568   }
vcvttss2siq(Register dst,Operand src)1569   void vcvttss2siq(Register dst, Operand src) {
1570     XMMRegister idst = XMMRegister::from_code(dst.code());
1571     vinstr(0x2c, idst, xmm0, src, kF3, k0F, kW1);
1572   }
vcvttsd2siq(Register dst,XMMRegister src)1573   void vcvttsd2siq(Register dst, XMMRegister src) {
1574     XMMRegister idst = XMMRegister::from_code(dst.code());
1575     vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1576   }
vcvttsd2siq(Register dst,Operand src)1577   void vcvttsd2siq(Register dst, Operand src) {
1578     XMMRegister idst = XMMRegister::from_code(dst.code());
1579     vinstr(0x2c, idst, xmm0, src, kF2, k0F, kW1);
1580   }
vcvtsd2si(Register dst,XMMRegister src)1581   void vcvtsd2si(Register dst, XMMRegister src) {
1582     XMMRegister idst = XMMRegister::from_code(dst.code());
1583     vinstr(0x2d, idst, xmm0, src, kF2, k0F, kW0);
1584   }
vucomisd(XMMRegister dst,XMMRegister src)1585   void vucomisd(XMMRegister dst, XMMRegister src) {
1586     vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1587   }
vucomisd(XMMRegister dst,Operand src)1588   void vucomisd(XMMRegister dst, Operand src) {
1589     vinstr(0x2e, dst, xmm0, src, k66, k0F, kWIG);
1590   }
vroundss(XMMRegister dst,XMMRegister src1,XMMRegister src2,RoundingMode mode)1591   void vroundss(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1592                 RoundingMode mode) {
1593     vinstr(0x0a, dst, src1, src2, k66, k0F3A, kWIG);
1594     emit(static_cast<byte>(mode) | 0x8);  // Mask precision exception.
1595   }
vroundsd(XMMRegister dst,XMMRegister src1,XMMRegister src2,RoundingMode mode)1596   void vroundsd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
1597                 RoundingMode mode) {
1598     vinstr(0x0b, dst, src1, src2, k66, k0F3A, kWIG);
1599     emit(static_cast<byte>(mode) | 0x8);  // Mask precision exception.
1600   }
1601 
vsd(byte op,XMMRegister dst,XMMRegister src1,XMMRegister src2)1602   void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1603     vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1604   }
vsd(byte op,XMMRegister dst,XMMRegister src1,Operand src2)1605   void vsd(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
1606     vinstr(op, dst, src1, src2, kF2, k0F, kWIG);
1607   }
1608 
vmovss(XMMRegister dst,XMMRegister src1,XMMRegister src2)1609   void vmovss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
1610     vss(0x10, dst, src1, src2);
1611   }
vmovss(XMMRegister dst,Operand src)1612   void vmovss(XMMRegister dst, Operand src) { vss(0x10, dst, xmm0, src); }
vmovss(Operand dst,XMMRegister src)1613   void vmovss(Operand dst, XMMRegister src) { vss(0x11, src, xmm0, dst); }
1614   void vucomiss(XMMRegister dst, XMMRegister src);
1615   void vucomiss(XMMRegister dst, Operand src);
1616   void vss(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1617   void vss(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1618 
vmovaps(XMMRegister dst,XMMRegister src)1619   void vmovaps(XMMRegister dst, XMMRegister src) { vps(0x28, dst, xmm0, src); }
vmovups(XMMRegister dst,XMMRegister src)1620   void vmovups(XMMRegister dst, XMMRegister src) { vps(0x10, dst, xmm0, src); }
vmovups(XMMRegister dst,Operand src)1621   void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); }
vmovups(Operand dst,XMMRegister src)1622   void vmovups(Operand dst, XMMRegister src) { vps(0x11, src, xmm0, dst); }
vmovapd(XMMRegister dst,XMMRegister src)1623   void vmovapd(XMMRegister dst, XMMRegister src) { vpd(0x28, dst, xmm0, src); }
vmovupd(XMMRegister dst,Operand src)1624   void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); }
vmovupd(Operand dst,XMMRegister src)1625   void vmovupd(Operand dst, XMMRegister src) { vpd(0x11, src, xmm0, dst); }
vmovmskps(Register dst,XMMRegister src)1626   void vmovmskps(Register dst, XMMRegister src) {
1627     XMMRegister idst = XMMRegister::from_code(dst.code());
1628     vps(0x50, idst, xmm0, src);
1629   }
vmovmskpd(Register dst,XMMRegister src)1630   void vmovmskpd(Register dst, XMMRegister src) {
1631     XMMRegister idst = XMMRegister::from_code(dst.code());
1632     vpd(0x50, idst, xmm0, src);
1633   }
vcmpps(XMMRegister dst,XMMRegister src1,XMMRegister src2,int8_t cmp)1634   void vcmpps(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1635     vps(0xC2, dst, src1, src2);
1636     emit(cmp);
1637   }
vcmpps(XMMRegister dst,XMMRegister src1,Operand src2,int8_t cmp)1638   void vcmpps(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1639     vps(0xC2, dst, src1, src2);
1640     emit(cmp);
1641   }
vcmppd(XMMRegister dst,XMMRegister src1,XMMRegister src2,int8_t cmp)1642   void vcmppd(XMMRegister dst, XMMRegister src1, XMMRegister src2, int8_t cmp) {
1643     vpd(0xC2, dst, src1, src2);
1644     emit(cmp);
1645   }
vcmppd(XMMRegister dst,XMMRegister src1,Operand src2,int8_t cmp)1646   void vcmppd(XMMRegister dst, XMMRegister src1, Operand src2, int8_t cmp) {
1647     vpd(0xC2, dst, src1, src2);
1648     emit(cmp);
1649   }
1650 
1651 #define AVX_CMP_P(instr, imm8)                                          \
1652   void instr##ps(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1653     vcmpps(dst, src1, src2, imm8);                                      \
1654   }                                                                     \
1655   void instr##ps(XMMRegister dst, XMMRegister src1, Operand src2) {     \
1656     vcmpps(dst, src1, src2, imm8);                                      \
1657   }                                                                     \
1658   void instr##pd(XMMRegister dst, XMMRegister src1, XMMRegister src2) { \
1659     vcmppd(dst, src1, src2, imm8);                                      \
1660   }                                                                     \
1661   void instr##pd(XMMRegister dst, XMMRegister src1, Operand src2) {     \
1662     vcmppd(dst, src1, src2, imm8);                                      \
1663   }
1664 
1665   AVX_CMP_P(vcmpeq, 0x0);
1666   AVX_CMP_P(vcmplt, 0x1);
1667   AVX_CMP_P(vcmple, 0x2);
1668   AVX_CMP_P(vcmpneq, 0x4);
1669   AVX_CMP_P(vcmpnlt, 0x5);
1670   AVX_CMP_P(vcmpnle, 0x6);
1671 
1672 #undef AVX_CMP_P
1673 
vlddqu(XMMRegister dst,Operand src)1674   void vlddqu(XMMRegister dst, Operand src) {
1675     vinstr(0xF0, dst, xmm0, src, kF2, k0F, kWIG);
1676   }
vpsllw(XMMRegister dst,XMMRegister src,uint8_t imm8)1677   void vpsllw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1678     vinstr(0x71, xmm6, dst, src, k66, k0F, kWIG);
1679     emit(imm8);
1680   }
vpsrlw(XMMRegister dst,XMMRegister src,uint8_t imm8)1681   void vpsrlw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1682     vinstr(0x71, xmm2, dst, src, k66, k0F, kWIG);
1683     emit(imm8);
1684   }
vpsraw(XMMRegister dst,XMMRegister src,uint8_t imm8)1685   void vpsraw(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1686     vinstr(0x71, xmm4, dst, src, k66, k0F, kWIG);
1687     emit(imm8);
1688   }
vpslld(XMMRegister dst,XMMRegister src,uint8_t imm8)1689   void vpslld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1690     vinstr(0x72, xmm6, dst, src, k66, k0F, kWIG);
1691     emit(imm8);
1692   }
vpsrld(XMMRegister dst,XMMRegister src,uint8_t imm8)1693   void vpsrld(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1694     vinstr(0x72, xmm2, dst, src, k66, k0F, kWIG);
1695     emit(imm8);
1696   }
vpsrad(XMMRegister dst,XMMRegister src,uint8_t imm8)1697   void vpsrad(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1698     vinstr(0x72, xmm4, dst, src, k66, k0F, kWIG);
1699     emit(imm8);
1700   }
vpextrb(Register dst,XMMRegister src,uint8_t imm8)1701   void vpextrb(Register dst, XMMRegister src, uint8_t imm8) {
1702     XMMRegister idst = XMMRegister::from_code(dst.code());
1703     vinstr(0x14, src, xmm0, idst, k66, k0F3A, kW0);
1704     emit(imm8);
1705   }
vpextrb(Operand dst,XMMRegister src,uint8_t imm8)1706   void vpextrb(Operand dst, XMMRegister src, uint8_t imm8) {
1707     vinstr(0x14, src, xmm0, dst, k66, k0F3A, kW0);
1708     emit(imm8);
1709   }
vpextrw(Register dst,XMMRegister src,uint8_t imm8)1710   void vpextrw(Register dst, XMMRegister src, uint8_t imm8) {
1711     XMMRegister idst = XMMRegister::from_code(dst.code());
1712     vinstr(0xc5, idst, xmm0, src, k66, k0F, kW0);
1713     emit(imm8);
1714   }
vpextrw(Operand dst,XMMRegister src,uint8_t imm8)1715   void vpextrw(Operand dst, XMMRegister src, uint8_t imm8) {
1716     vinstr(0x15, src, xmm0, dst, k66, k0F3A, kW0);
1717     emit(imm8);
1718   }
vpextrd(Register dst,XMMRegister src,uint8_t imm8)1719   void vpextrd(Register dst, XMMRegister src, uint8_t imm8) {
1720     XMMRegister idst = XMMRegister::from_code(dst.code());
1721     vinstr(0x16, src, xmm0, idst, k66, k0F3A, kW0);
1722     emit(imm8);
1723   }
vpextrd(Operand dst,XMMRegister src,uint8_t imm8)1724   void vpextrd(Operand dst, XMMRegister src, uint8_t imm8) {
1725     vinstr(0x16, src, xmm0, dst, k66, k0F3A, kW0);
1726     emit(imm8);
1727   }
vpinsrb(XMMRegister dst,XMMRegister src1,Register src2,uint8_t imm8)1728   void vpinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1729     XMMRegister isrc = XMMRegister::from_code(src2.code());
1730     vinstr(0x20, dst, src1, isrc, k66, k0F3A, kW0);
1731     emit(imm8);
1732   }
vpinsrb(XMMRegister dst,XMMRegister src1,Operand src2,uint8_t imm8)1733   void vpinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1734     vinstr(0x20, dst, src1, src2, k66, k0F3A, kW0);
1735     emit(imm8);
1736   }
vpinsrw(XMMRegister dst,XMMRegister src1,Register src2,uint8_t imm8)1737   void vpinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1738     XMMRegister isrc = XMMRegister::from_code(src2.code());
1739     vinstr(0xc4, dst, src1, isrc, k66, k0F, kW0);
1740     emit(imm8);
1741   }
vpinsrw(XMMRegister dst,XMMRegister src1,Operand src2,uint8_t imm8)1742   void vpinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1743     vinstr(0xc4, dst, src1, src2, k66, k0F, kW0);
1744     emit(imm8);
1745   }
vpinsrd(XMMRegister dst,XMMRegister src1,Register src2,uint8_t imm8)1746   void vpinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8) {
1747     XMMRegister isrc = XMMRegister::from_code(src2.code());
1748     vinstr(0x22, dst, src1, isrc, k66, k0F3A, kW0);
1749     emit(imm8);
1750   }
vpinsrd(XMMRegister dst,XMMRegister src1,Operand src2,uint8_t imm8)1751   void vpinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) {
1752     vinstr(0x22, dst, src1, src2, k66, k0F3A, kW0);
1753     emit(imm8);
1754   }
vpshufd(XMMRegister dst,XMMRegister src,uint8_t imm8)1755   void vpshufd(XMMRegister dst, XMMRegister src, uint8_t imm8) {
1756     vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG);
1757     emit(imm8);
1758   }
1759 
1760   void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1761   void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1762   void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
1763   void vpd(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
1764 
1765   // BMI instruction
andnq(Register dst,Register src1,Register src2)1766   void andnq(Register dst, Register src1, Register src2) {
1767     bmi1q(0xf2, dst, src1, src2);
1768   }
andnq(Register dst,Register src1,Operand src2)1769   void andnq(Register dst, Register src1, Operand src2) {
1770     bmi1q(0xf2, dst, src1, src2);
1771   }
andnl(Register dst,Register src1,Register src2)1772   void andnl(Register dst, Register src1, Register src2) {
1773     bmi1l(0xf2, dst, src1, src2);
1774   }
andnl(Register dst,Register src1,Operand src2)1775   void andnl(Register dst, Register src1, Operand src2) {
1776     bmi1l(0xf2, dst, src1, src2);
1777   }
bextrq(Register dst,Register src1,Register src2)1778   void bextrq(Register dst, Register src1, Register src2) {
1779     bmi1q(0xf7, dst, src2, src1);
1780   }
bextrq(Register dst,Operand src1,Register src2)1781   void bextrq(Register dst, Operand src1, Register src2) {
1782     bmi1q(0xf7, dst, src2, src1);
1783   }
bextrl(Register dst,Register src1,Register src2)1784   void bextrl(Register dst, Register src1, Register src2) {
1785     bmi1l(0xf7, dst, src2, src1);
1786   }
bextrl(Register dst,Operand src1,Register src2)1787   void bextrl(Register dst, Operand src1, Register src2) {
1788     bmi1l(0xf7, dst, src2, src1);
1789   }
blsiq(Register dst,Register src)1790   void blsiq(Register dst, Register src) { bmi1q(0xf3, rbx, dst, src); }
blsiq(Register dst,Operand src)1791   void blsiq(Register dst, Operand src) { bmi1q(0xf3, rbx, dst, src); }
blsil(Register dst,Register src)1792   void blsil(Register dst, Register src) { bmi1l(0xf3, rbx, dst, src); }
blsil(Register dst,Operand src)1793   void blsil(Register dst, Operand src) { bmi1l(0xf3, rbx, dst, src); }
blsmskq(Register dst,Register src)1794   void blsmskq(Register dst, Register src) { bmi1q(0xf3, rdx, dst, src); }
blsmskq(Register dst,Operand src)1795   void blsmskq(Register dst, Operand src) { bmi1q(0xf3, rdx, dst, src); }
blsmskl(Register dst,Register src)1796   void blsmskl(Register dst, Register src) { bmi1l(0xf3, rdx, dst, src); }
blsmskl(Register dst,Operand src)1797   void blsmskl(Register dst, Operand src) { bmi1l(0xf3, rdx, dst, src); }
blsrq(Register dst,Register src)1798   void blsrq(Register dst, Register src) { bmi1q(0xf3, rcx, dst, src); }
blsrq(Register dst,Operand src)1799   void blsrq(Register dst, Operand src) { bmi1q(0xf3, rcx, dst, src); }
blsrl(Register dst,Register src)1800   void blsrl(Register dst, Register src) { bmi1l(0xf3, rcx, dst, src); }
blsrl(Register dst,Operand src)1801   void blsrl(Register dst, Operand src) { bmi1l(0xf3, rcx, dst, src); }
1802   void tzcntq(Register dst, Register src);
1803   void tzcntq(Register dst, Operand src);
1804   void tzcntl(Register dst, Register src);
1805   void tzcntl(Register dst, Operand src);
1806 
1807   void lzcntq(Register dst, Register src);
1808   void lzcntq(Register dst, Operand src);
1809   void lzcntl(Register dst, Register src);
1810   void lzcntl(Register dst, Operand src);
1811 
1812   void popcntq(Register dst, Register src);
1813   void popcntq(Register dst, Operand src);
1814   void popcntl(Register dst, Register src);
1815   void popcntl(Register dst, Operand src);
1816 
bzhiq(Register dst,Register src1,Register src2)1817   void bzhiq(Register dst, Register src1, Register src2) {
1818     bmi2q(kNone, 0xf5, dst, src2, src1);
1819   }
bzhiq(Register dst,Operand src1,Register src2)1820   void bzhiq(Register dst, Operand src1, Register src2) {
1821     bmi2q(kNone, 0xf5, dst, src2, src1);
1822   }
bzhil(Register dst,Register src1,Register src2)1823   void bzhil(Register dst, Register src1, Register src2) {
1824     bmi2l(kNone, 0xf5, dst, src2, src1);
1825   }
bzhil(Register dst,Operand src1,Register src2)1826   void bzhil(Register dst, Operand src1, Register src2) {
1827     bmi2l(kNone, 0xf5, dst, src2, src1);
1828   }
mulxq(Register dst1,Register dst2,Register src)1829   void mulxq(Register dst1, Register dst2, Register src) {
1830     bmi2q(kF2, 0xf6, dst1, dst2, src);
1831   }
mulxq(Register dst1,Register dst2,Operand src)1832   void mulxq(Register dst1, Register dst2, Operand src) {
1833     bmi2q(kF2, 0xf6, dst1, dst2, src);
1834   }
mulxl(Register dst1,Register dst2,Register src)1835   void mulxl(Register dst1, Register dst2, Register src) {
1836     bmi2l(kF2, 0xf6, dst1, dst2, src);
1837   }
mulxl(Register dst1,Register dst2,Operand src)1838   void mulxl(Register dst1, Register dst2, Operand src) {
1839     bmi2l(kF2, 0xf6, dst1, dst2, src);
1840   }
pdepq(Register dst,Register src1,Register src2)1841   void pdepq(Register dst, Register src1, Register src2) {
1842     bmi2q(kF2, 0xf5, dst, src1, src2);
1843   }
pdepq(Register dst,Register src1,Operand src2)1844   void pdepq(Register dst, Register src1, Operand src2) {
1845     bmi2q(kF2, 0xf5, dst, src1, src2);
1846   }
pdepl(Register dst,Register src1,Register src2)1847   void pdepl(Register dst, Register src1, Register src2) {
1848     bmi2l(kF2, 0xf5, dst, src1, src2);
1849   }
pdepl(Register dst,Register src1,Operand src2)1850   void pdepl(Register dst, Register src1, Operand src2) {
1851     bmi2l(kF2, 0xf5, dst, src1, src2);
1852   }
pextq(Register dst,Register src1,Register src2)1853   void pextq(Register dst, Register src1, Register src2) {
1854     bmi2q(kF3, 0xf5, dst, src1, src2);
1855   }
pextq(Register dst,Register src1,Operand src2)1856   void pextq(Register dst, Register src1, Operand src2) {
1857     bmi2q(kF3, 0xf5, dst, src1, src2);
1858   }
pextl(Register dst,Register src1,Register src2)1859   void pextl(Register dst, Register src1, Register src2) {
1860     bmi2l(kF3, 0xf5, dst, src1, src2);
1861   }
pextl(Register dst,Register src1,Operand src2)1862   void pextl(Register dst, Register src1, Operand src2) {
1863     bmi2l(kF3, 0xf5, dst, src1, src2);
1864   }
sarxq(Register dst,Register src1,Register src2)1865   void sarxq(Register dst, Register src1, Register src2) {
1866     bmi2q(kF3, 0xf7, dst, src2, src1);
1867   }
sarxq(Register dst,Operand src1,Register src2)1868   void sarxq(Register dst, Operand src1, Register src2) {
1869     bmi2q(kF3, 0xf7, dst, src2, src1);
1870   }
sarxl(Register dst,Register src1,Register src2)1871   void sarxl(Register dst, Register src1, Register src2) {
1872     bmi2l(kF3, 0xf7, dst, src2, src1);
1873   }
sarxl(Register dst,Operand src1,Register src2)1874   void sarxl(Register dst, Operand src1, Register src2) {
1875     bmi2l(kF3, 0xf7, dst, src2, src1);
1876   }
shlxq(Register dst,Register src1,Register src2)1877   void shlxq(Register dst, Register src1, Register src2) {
1878     bmi2q(k66, 0xf7, dst, src2, src1);
1879   }
shlxq(Register dst,Operand src1,Register src2)1880   void shlxq(Register dst, Operand src1, Register src2) {
1881     bmi2q(k66, 0xf7, dst, src2, src1);
1882   }
shlxl(Register dst,Register src1,Register src2)1883   void shlxl(Register dst, Register src1, Register src2) {
1884     bmi2l(k66, 0xf7, dst, src2, src1);
1885   }
shlxl(Register dst,Operand src1,Register src2)1886   void shlxl(Register dst, Operand src1, Register src2) {
1887     bmi2l(k66, 0xf7, dst, src2, src1);
1888   }
shrxq(Register dst,Register src1,Register src2)1889   void shrxq(Register dst, Register src1, Register src2) {
1890     bmi2q(kF2, 0xf7, dst, src2, src1);
1891   }
shrxq(Register dst,Operand src1,Register src2)1892   void shrxq(Register dst, Operand src1, Register src2) {
1893     bmi2q(kF2, 0xf7, dst, src2, src1);
1894   }
shrxl(Register dst,Register src1,Register src2)1895   void shrxl(Register dst, Register src1, Register src2) {
1896     bmi2l(kF2, 0xf7, dst, src2, src1);
1897   }
shrxl(Register dst,Operand src1,Register src2)1898   void shrxl(Register dst, Operand src1, Register src2) {
1899     bmi2l(kF2, 0xf7, dst, src2, src1);
1900   }
1901   void rorxq(Register dst, Register src, byte imm8);
1902   void rorxq(Register dst, Operand src, byte imm8);
1903   void rorxl(Register dst, Register src, byte imm8);
1904   void rorxl(Register dst, Operand src, byte imm8);
1905 
1906   void lfence();
1907   void pause();
1908 
1909   // Check the code size generated from label to here.
SizeOfCodeGeneratedSince(Label * label)1910   int SizeOfCodeGeneratedSince(Label* label) {
1911     return pc_offset() - label->pos();
1912   }
1913 
1914   // Record a comment relocation entry that can be used by a disassembler.
1915   // Use --code-comments to enable.
1916   void RecordComment(const char* msg);
1917 
1918   // Record a deoptimization reason that can be used by a log or cpu profiler.
1919   // Use --trace-deopt to enable.
1920   void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1921                          int id);
1922 
PatchConstantPoolAccessInstruction(int pc_offset,int offset,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)1923   void PatchConstantPoolAccessInstruction(int pc_offset, int offset,
1924                                           ConstantPoolEntry::Access access,
1925                                           ConstantPoolEntry::Type type) {
1926     // No embedded constant pool support.
1927     UNREACHABLE();
1928   }
1929 
1930   // Writes a single word of data in the code stream.
1931   // Used for inline tables, e.g., jump-tables.
1932   void db(uint8_t data);
1933   void dd(uint32_t data);
1934   void dq(uint64_t data);
dp(uintptr_t data)1935   void dp(uintptr_t data) { dq(data); }
1936   void dq(Label* label);
1937 
1938   // Patch entries for partial constant pool.
1939   void PatchConstPool();
1940 
1941   // Check if use partial constant pool for this rmode.
1942   static bool UseConstPoolFor(RelocInfo::Mode rmode);
1943 
1944   // Check if there is less than kGap bytes available in the buffer.
1945   // If this is the case, we need to grow the buffer before emitting
1946   // an instruction or relocation information.
buffer_overflow()1947   inline bool buffer_overflow() const {
1948     return pc_ >= reloc_info_writer.pos() - kGap;
1949   }
1950 
1951   // Get the number of bytes available in the buffer.
available_space()1952   inline int available_space() const {
1953     return static_cast<int>(reloc_info_writer.pos() - pc_);
1954   }
1955 
1956   static bool IsNop(Address addr);
1957 
1958   // Avoid overflows for displacements etc.
1959   static constexpr int kMaximalBufferSize = 512 * MB;
1960 
byte_at(int pos)1961   byte byte_at(int pos)  { return buffer_[pos]; }
set_byte_at(int pos,byte value)1962   void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
1963 
1964  protected:
1965   // Call near indirect
1966   void call(Operand operand);
1967 
1968  private:
addr_at(int pos)1969   byte* addr_at(int pos)  { return buffer_ + pos; }
long_at(int pos)1970   uint32_t long_at(int pos)  {
1971     return *reinterpret_cast<uint32_t*>(addr_at(pos));
1972   }
long_at_put(int pos,uint32_t x)1973   void long_at_put(int pos, uint32_t x)  {
1974     *reinterpret_cast<uint32_t*>(addr_at(pos)) = x;
1975   }
1976 
1977   // code emission
1978   void GrowBuffer();
1979 
emit(byte x)1980   void emit(byte x) { *pc_++ = x; }
1981   inline void emitl(uint32_t x);
1982   inline void emitp(Address x, RelocInfo::Mode rmode);
1983   inline void emitq(uint64_t x);
1984   inline void emitw(uint16_t x);
1985   inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode);
1986   inline void emit(Immediate x);
1987 
1988   // Emits a REX prefix that encodes a 64-bit operand size and
1989   // the top bit of both register codes.
1990   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
1991   // REX.W is set.
1992   inline void emit_rex_64(XMMRegister reg, Register rm_reg);
1993   inline void emit_rex_64(Register reg, XMMRegister rm_reg);
1994   inline void emit_rex_64(Register reg, Register rm_reg);
1995   inline void emit_rex_64(XMMRegister reg, XMMRegister rm_reg);
1996 
1997   // Emits a REX prefix that encodes a 64-bit operand size and
1998   // the top bit of the destination, index, and base register codes.
1999   // The high bit of reg is used for REX.R, the high bit of op's base
2000   // register is used for REX.B, and the high bit of op's index register
2001   // is used for REX.X.  REX.W is set.
2002   inline void emit_rex_64(Register reg, Operand op);
2003   inline void emit_rex_64(XMMRegister reg, Operand op);
2004 
2005   // Emits a REX prefix that encodes a 64-bit operand size and
2006   // the top bit of the register code.
2007   // The high bit of register is used for REX.B.
2008   // REX.W is set and REX.R and REX.X are clear.
2009   inline void emit_rex_64(Register rm_reg);
2010 
2011   // Emits a REX prefix that encodes a 64-bit operand size and
2012   // the top bit of the index and base register codes.
2013   // The high bit of op's base register is used for REX.B, and the high
2014   // bit of op's index register is used for REX.X.
2015   // REX.W is set and REX.R clear.
2016   inline void emit_rex_64(Operand op);
2017 
2018   // Emit a REX prefix that only sets REX.W to choose a 64-bit operand size.
emit_rex_64()2019   void emit_rex_64() { emit(0x48); }
2020 
2021   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
2022   // REX.W is clear.
2023   inline void emit_rex_32(Register reg, Register rm_reg);
2024 
2025   // The high bit of reg is used for REX.R, the high bit of op's base
2026   // register is used for REX.B, and the high bit of op's index register
2027   // is used for REX.X.  REX.W is cleared.
2028   inline void emit_rex_32(Register reg, Operand op);
2029 
2030   // High bit of rm_reg goes to REX.B.
2031   // REX.W, REX.R and REX.X are clear.
2032   inline void emit_rex_32(Register rm_reg);
2033 
2034   // High bit of base goes to REX.B and high bit of index to REX.X.
2035   // REX.W and REX.R are clear.
2036   inline void emit_rex_32(Operand op);
2037 
2038   // High bit of reg goes to REX.R, high bit of rm_reg goes to REX.B.
2039   // REX.W is cleared.  If no REX bits are set, no byte is emitted.
2040   inline void emit_optional_rex_32(Register reg, Register rm_reg);
2041 
2042   // The high bit of reg is used for REX.R, the high bit of op's base
2043   // register is used for REX.B, and the high bit of op's index register
2044   // is used for REX.X.  REX.W is cleared.  If no REX bits are set, nothing
2045   // is emitted.
2046   inline void emit_optional_rex_32(Register reg, Operand op);
2047 
2048   // As for emit_optional_rex_32(Register, Register), except that
2049   // the registers are XMM registers.
2050   inline void emit_optional_rex_32(XMMRegister reg, XMMRegister base);
2051 
2052   // As for emit_optional_rex_32(Register, Register), except that
2053   // one of the registers is an XMM registers.
2054   inline void emit_optional_rex_32(XMMRegister reg, Register base);
2055 
2056   // As for emit_optional_rex_32(Register, Register), except that
2057   // one of the registers is an XMM registers.
2058   inline void emit_optional_rex_32(Register reg, XMMRegister base);
2059 
2060   // As for emit_optional_rex_32(Register, Operand), except that
2061   // the register is an XMM register.
2062   inline void emit_optional_rex_32(XMMRegister reg, Operand op);
2063 
2064   // Optionally do as emit_rex_32(Register) if the register number has
2065   // the high bit set.
2066   inline void emit_optional_rex_32(Register rm_reg);
2067   inline void emit_optional_rex_32(XMMRegister rm_reg);
2068 
2069   // Optionally do as emit_rex_32(Operand) if the operand register
2070   // numbers have a high bit set.
2071   inline void emit_optional_rex_32(Operand op);
2072 
emit_rex(int size)2073   void emit_rex(int size) {
2074     if (size == kInt64Size) {
2075       emit_rex_64();
2076     } else {
2077       DCHECK_EQ(size, kInt32Size);
2078     }
2079   }
2080 
2081   template<class P1>
emit_rex(P1 p1,int size)2082   void emit_rex(P1 p1, int size) {
2083     if (size == kInt64Size) {
2084       emit_rex_64(p1);
2085     } else {
2086       DCHECK_EQ(size, kInt32Size);
2087       emit_optional_rex_32(p1);
2088     }
2089   }
2090 
2091   template<class P1, class P2>
emit_rex(P1 p1,P2 p2,int size)2092   void emit_rex(P1 p1, P2 p2, int size) {
2093     if (size == kInt64Size) {
2094       emit_rex_64(p1, p2);
2095     } else {
2096       DCHECK_EQ(size, kInt32Size);
2097       emit_optional_rex_32(p1, p2);
2098     }
2099   }
2100 
2101   // Emit vex prefix
emit_vex2_byte0()2102   void emit_vex2_byte0() { emit(0xc5); }
2103   inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
2104                               SIMDPrefix pp);
emit_vex3_byte0()2105   void emit_vex3_byte0() { emit(0xc4); }
2106   inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
2107   inline void emit_vex3_byte1(XMMRegister reg, Operand rm, LeadingOpcode m);
2108   inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
2109                               SIMDPrefix pp);
2110   inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
2111                               VectorLength l, SIMDPrefix pp, LeadingOpcode m,
2112                               VexW w);
2113   inline void emit_vex_prefix(Register reg, Register v, Register rm,
2114                               VectorLength l, SIMDPrefix pp, LeadingOpcode m,
2115                               VexW w);
2116   inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, Operand rm,
2117                               VectorLength l, SIMDPrefix pp, LeadingOpcode m,
2118                               VexW w);
2119   inline void emit_vex_prefix(Register reg, Register v, Operand rm,
2120                               VectorLength l, SIMDPrefix pp, LeadingOpcode m,
2121                               VexW w);
2122 
2123   // Emit the ModR/M byte, and optionally the SIB byte and
2124   // 1- or 4-byte offset for a memory operand.  Also encodes
2125   // the second operand of the operation, a register or operation
2126   // subcode, into the reg field of the ModR/M byte.
emit_operand(Register reg,Operand adr)2127   void emit_operand(Register reg, Operand adr) {
2128     emit_operand(reg.low_bits(), adr);
2129   }
2130 
2131   // Emit the ModR/M byte, and optionally the SIB byte and
2132   // 1- or 4-byte offset for a memory operand.  Also used to encode
2133   // a three-bit opcode extension into the ModR/M byte.
2134   void emit_operand(int rm, Operand adr);
2135 
2136   // Emit a ModR/M byte with registers coded in the reg and rm_reg fields.
emit_modrm(Register reg,Register rm_reg)2137   void emit_modrm(Register reg, Register rm_reg) {
2138     emit(0xC0 | reg.low_bits() << 3 | rm_reg.low_bits());
2139   }
2140 
2141   // Emit a ModR/M byte with an operation subcode in the reg field and
2142   // a register in the rm_reg field.
emit_modrm(int code,Register rm_reg)2143   void emit_modrm(int code, Register rm_reg) {
2144     DCHECK(is_uint3(code));
2145     emit(0xC0 | code << 3 | rm_reg.low_bits());
2146   }
2147 
2148   // Emit the code-object-relative offset of the label's position
2149   inline void emit_code_relative_offset(Label* label);
2150 
2151   // The first argument is the reg field, the second argument is the r/m field.
2152   void emit_sse_operand(XMMRegister dst, XMMRegister src);
2153   void emit_sse_operand(XMMRegister reg, Operand adr);
2154   void emit_sse_operand(Register reg, Operand adr);
2155   void emit_sse_operand(XMMRegister dst, Register src);
2156   void emit_sse_operand(Register dst, XMMRegister src);
2157   void emit_sse_operand(XMMRegister dst);
2158 
2159   // Emit machine code for one of the operations ADD, ADC, SUB, SBC,
2160   // AND, OR, XOR, or CMP.  The encodings of these operations are all
2161   // similar, differing just in the opcode or in the reg field of the
2162   // ModR/M byte.
2163   void arithmetic_op_8(byte opcode, Register reg, Register rm_reg);
2164   void arithmetic_op_8(byte opcode, Register reg, Operand rm_reg);
2165   void arithmetic_op_16(byte opcode, Register reg, Register rm_reg);
2166   void arithmetic_op_16(byte opcode, Register reg, Operand rm_reg);
2167   // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2168   void arithmetic_op(byte opcode, Register reg, Register rm_reg, int size);
2169   void arithmetic_op(byte opcode, Register reg, Operand rm_reg, int size);
2170   // Operate on a byte in memory or register.
2171   void immediate_arithmetic_op_8(byte subcode,
2172                                  Register dst,
2173                                  Immediate src);
2174   void immediate_arithmetic_op_8(byte subcode, Operand dst, Immediate src);
2175   // Operate on a word in memory or register.
2176   void immediate_arithmetic_op_16(byte subcode,
2177                                   Register dst,
2178                                   Immediate src);
2179   void immediate_arithmetic_op_16(byte subcode, Operand dst, Immediate src);
2180   // Operate on operands/registers with pointer size, 32-bit or 64-bit size.
2181   void immediate_arithmetic_op(byte subcode,
2182                                Register dst,
2183                                Immediate src,
2184                                int size);
2185   void immediate_arithmetic_op(byte subcode, Operand dst, Immediate src,
2186                                int size);
2187 
2188   // Emit machine code for a shift operation.
2189   void shift(Operand dst, Immediate shift_amount, int subcode, int size);
2190   void shift(Register dst, Immediate shift_amount, int subcode, int size);
2191   // Shift dst by cl % 64 bits.
2192   void shift(Register dst, int subcode, int size);
2193   void shift(Operand dst, int subcode, int size);
2194 
2195   void emit_farith(int b1, int b2, int i);
2196 
2197   // labels
2198   // void print(Label* L);
2199   void bind_to(Label* L, int pos);
2200 
2201   // record reloc info for current pc_
2202   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
2203 
2204   // Arithmetics
emit_add(Register dst,Register src,int size)2205   void emit_add(Register dst, Register src, int size) {
2206     arithmetic_op(0x03, dst, src, size);
2207   }
2208 
emit_add(Register dst,Immediate src,int size)2209   void emit_add(Register dst, Immediate src, int size) {
2210     immediate_arithmetic_op(0x0, dst, src, size);
2211   }
2212 
emit_add(Register dst,Operand src,int size)2213   void emit_add(Register dst, Operand src, int size) {
2214     arithmetic_op(0x03, dst, src, size);
2215   }
2216 
emit_add(Operand dst,Register src,int size)2217   void emit_add(Operand dst, Register src, int size) {
2218     arithmetic_op(0x1, src, dst, size);
2219   }
2220 
emit_add(Operand dst,Immediate src,int size)2221   void emit_add(Operand dst, Immediate src, int size) {
2222     immediate_arithmetic_op(0x0, dst, src, size);
2223   }
2224 
emit_and(Register dst,Register src,int size)2225   void emit_and(Register dst, Register src, int size) {
2226     arithmetic_op(0x23, dst, src, size);
2227   }
2228 
emit_and(Register dst,Operand src,int size)2229   void emit_and(Register dst, Operand src, int size) {
2230     arithmetic_op(0x23, dst, src, size);
2231   }
2232 
emit_and(Operand dst,Register src,int size)2233   void emit_and(Operand dst, Register src, int size) {
2234     arithmetic_op(0x21, src, dst, size);
2235   }
2236 
emit_and(Register dst,Immediate src,int size)2237   void emit_and(Register dst, Immediate src, int size) {
2238     immediate_arithmetic_op(0x4, dst, src, size);
2239   }
2240 
emit_and(Operand dst,Immediate src,int size)2241   void emit_and(Operand dst, Immediate src, int size) {
2242     immediate_arithmetic_op(0x4, dst, src, size);
2243   }
2244 
emit_cmp(Register dst,Register src,int size)2245   void emit_cmp(Register dst, Register src, int size) {
2246     arithmetic_op(0x3B, dst, src, size);
2247   }
2248 
emit_cmp(Register dst,Operand src,int size)2249   void emit_cmp(Register dst, Operand src, int size) {
2250     arithmetic_op(0x3B, dst, src, size);
2251   }
2252 
emit_cmp(Operand dst,Register src,int size)2253   void emit_cmp(Operand dst, Register src, int size) {
2254     arithmetic_op(0x39, src, dst, size);
2255   }
2256 
emit_cmp(Register dst,Immediate src,int size)2257   void emit_cmp(Register dst, Immediate src, int size) {
2258     immediate_arithmetic_op(0x7, dst, src, size);
2259   }
2260 
emit_cmp(Operand dst,Immediate src,int size)2261   void emit_cmp(Operand dst, Immediate src, int size) {
2262     immediate_arithmetic_op(0x7, dst, src, size);
2263   }
2264 
2265   // Compare {al,ax,eax,rax} with src.  If equal, set ZF and write dst into
2266   // src. Otherwise clear ZF and write src into {al,ax,eax,rax}.  This
2267   // operation is only atomic if prefixed by the lock instruction.
2268   void emit_cmpxchg(Operand dst, Register src, int size);
2269 
2270   void emit_dec(Register dst, int size);
2271   void emit_dec(Operand dst, int size);
2272 
2273   // Divide rdx:rax by src.  Quotient in rax, remainder in rdx when size is 64.
2274   // Divide edx:eax by lower 32 bits of src.  Quotient in eax, remainder in edx
2275   // when size is 32.
2276   void emit_idiv(Register src, int size);
2277   void emit_div(Register src, int size);
2278 
2279   // Signed multiply instructions.
2280   // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
2281   void emit_imul(Register src, int size);
2282   void emit_imul(Operand src, int size);
2283   void emit_imul(Register dst, Register src, int size);
2284   void emit_imul(Register dst, Operand src, int size);
2285   void emit_imul(Register dst, Register src, Immediate imm, int size);
2286   void emit_imul(Register dst, Operand src, Immediate imm, int size);
2287 
2288   void emit_inc(Register dst, int size);
2289   void emit_inc(Operand dst, int size);
2290 
2291   void emit_lea(Register dst, Operand src, int size);
2292 
2293   void emit_mov(Register dst, Operand src, int size);
2294   void emit_mov(Register dst, Register src, int size);
2295   void emit_mov(Operand dst, Register src, int size);
2296   void emit_mov(Register dst, Immediate value, int size);
2297   void emit_mov(Operand dst, Immediate value, int size);
2298 
2299   void emit_movzxb(Register dst, Operand src, int size);
2300   void emit_movzxb(Register dst, Register src, int size);
2301   void emit_movzxw(Register dst, Operand src, int size);
2302   void emit_movzxw(Register dst, Register src, int size);
2303 
2304   void emit_neg(Register dst, int size);
2305   void emit_neg(Operand dst, int size);
2306 
2307   void emit_not(Register dst, int size);
2308   void emit_not(Operand dst, int size);
2309 
emit_or(Register dst,Register src,int size)2310   void emit_or(Register dst, Register src, int size) {
2311     arithmetic_op(0x0B, dst, src, size);
2312   }
2313 
emit_or(Register dst,Operand src,int size)2314   void emit_or(Register dst, Operand src, int size) {
2315     arithmetic_op(0x0B, dst, src, size);
2316   }
2317 
emit_or(Operand dst,Register src,int size)2318   void emit_or(Operand dst, Register src, int size) {
2319     arithmetic_op(0x9, src, dst, size);
2320   }
2321 
emit_or(Register dst,Immediate src,int size)2322   void emit_or(Register dst, Immediate src, int size) {
2323     immediate_arithmetic_op(0x1, dst, src, size);
2324   }
2325 
emit_or(Operand dst,Immediate src,int size)2326   void emit_or(Operand dst, Immediate src, int size) {
2327     immediate_arithmetic_op(0x1, dst, src, size);
2328   }
2329 
2330   void emit_repmovs(int size);
2331 
emit_sbb(Register dst,Register src,int size)2332   void emit_sbb(Register dst, Register src, int size) {
2333     arithmetic_op(0x1b, dst, src, size);
2334   }
2335 
emit_sub(Register dst,Register src,int size)2336   void emit_sub(Register dst, Register src, int size) {
2337     arithmetic_op(0x2B, dst, src, size);
2338   }
2339 
emit_sub(Register dst,Immediate src,int size)2340   void emit_sub(Register dst, Immediate src, int size) {
2341     immediate_arithmetic_op(0x5, dst, src, size);
2342   }
2343 
emit_sub(Register dst,Operand src,int size)2344   void emit_sub(Register dst, Operand src, int size) {
2345     arithmetic_op(0x2B, dst, src, size);
2346   }
2347 
emit_sub(Operand dst,Register src,int size)2348   void emit_sub(Operand dst, Register src, int size) {
2349     arithmetic_op(0x29, src, dst, size);
2350   }
2351 
emit_sub(Operand dst,Immediate src,int size)2352   void emit_sub(Operand dst, Immediate src, int size) {
2353     immediate_arithmetic_op(0x5, dst, src, size);
2354   }
2355 
2356   void emit_test(Register dst, Register src, int size);
2357   void emit_test(Register reg, Immediate mask, int size);
2358   void emit_test(Operand op, Register reg, int size);
2359   void emit_test(Operand op, Immediate mask, int size);
emit_test(Register reg,Operand op,int size)2360   void emit_test(Register reg, Operand op, int size) {
2361     return emit_test(op, reg, size);
2362   }
2363 
2364   void emit_xchg(Register dst, Register src, int size);
2365   void emit_xchg(Register dst, Operand src, int size);
2366 
emit_xor(Register dst,Register src,int size)2367   void emit_xor(Register dst, Register src, int size) {
2368     if (size == kInt64Size && dst.code() == src.code()) {
2369     // 32 bit operations zero the top 32 bits of 64 bit registers. Therefore
2370     // there is no need to make this a 64 bit operation.
2371       arithmetic_op(0x33, dst, src, kInt32Size);
2372     } else {
2373       arithmetic_op(0x33, dst, src, size);
2374     }
2375   }
2376 
emit_xor(Register dst,Operand src,int size)2377   void emit_xor(Register dst, Operand src, int size) {
2378     arithmetic_op(0x33, dst, src, size);
2379   }
2380 
emit_xor(Register dst,Immediate src,int size)2381   void emit_xor(Register dst, Immediate src, int size) {
2382     immediate_arithmetic_op(0x6, dst, src, size);
2383   }
2384 
emit_xor(Operand dst,Immediate src,int size)2385   void emit_xor(Operand dst, Immediate src, int size) {
2386     immediate_arithmetic_op(0x6, dst, src, size);
2387   }
2388 
emit_xor(Operand dst,Register src,int size)2389   void emit_xor(Operand dst, Register src, int size) {
2390     arithmetic_op(0x31, src, dst, size);
2391   }
2392 
2393   // Most BMI instructions are similar.
2394   void bmi1q(byte op, Register reg, Register vreg, Register rm);
2395   void bmi1q(byte op, Register reg, Register vreg, Operand rm);
2396   void bmi1l(byte op, Register reg, Register vreg, Register rm);
2397   void bmi1l(byte op, Register reg, Register vreg, Operand rm);
2398   void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2399   void bmi2q(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2400   void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Register rm);
2401   void bmi2l(SIMDPrefix pp, byte op, Register reg, Register vreg, Operand rm);
2402 
2403   // record the position of jmp/jcc instruction
2404   void record_farjmp_position(Label* L, int pos);
2405 
2406   bool is_optimizable_farjmp(int idx);
2407 
2408   void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
2409 
2410   friend class EnsureSpace;
2411   friend class RegExpMacroAssemblerX64;
2412 
2413   // code generation
2414   RelocInfoWriter reloc_info_writer;
2415 
2416   // Internal reference positions, required for (potential) patching in
2417   // GrowBuffer(); contains only those internal references whose labels
2418   // are already bound.
2419   std::deque<int> internal_reference_positions_;
2420 
2421   // Variables for this instance of assembler
2422   int farjmp_num_ = 0;
2423   std::deque<int> farjmp_positions_;
2424   std::map<Label*, std::vector<int>> label_farjmp_maps_;
2425 
2426   ConstPool constpool_;
2427 
2428   friend class ConstPool;
2429 };
2430 
2431 
2432 // Helper class that ensures that there is enough space for generating
2433 // instructions and relocation information.  The constructor makes
2434 // sure that there is enough space and (in debug mode) the destructor
2435 // checks that we did not generate too much.
2436 class EnsureSpace BASE_EMBEDDED {
2437  public:
EnsureSpace(Assembler * assembler)2438   explicit EnsureSpace(Assembler* assembler) : assembler_(assembler) {
2439     if (assembler_->buffer_overflow()) assembler_->GrowBuffer();
2440 #ifdef DEBUG
2441     space_before_ = assembler_->available_space();
2442 #endif
2443   }
2444 
2445 #ifdef DEBUG
~EnsureSpace()2446   ~EnsureSpace() {
2447     int bytes_generated = space_before_ - assembler_->available_space();
2448     DCHECK(bytes_generated < assembler_->kGap);
2449   }
2450 #endif
2451 
2452  private:
2453   Assembler* assembler_;
2454 #ifdef DEBUG
2455   int space_before_;
2456 #endif
2457 };
2458 
2459 }  // namespace internal
2460 }  // namespace v8
2461 
2462 #endif  // V8_X64_ASSEMBLER_X64_H_
2463