1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
28
29 #include <wtf/Platform.h>
30
31 #if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
32
33 #include "AssemblerBuffer.h"
34 #include <stdint.h>
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
37
38 namespace JSC {
39
CAN_SIGN_EXTEND_8_32(int32_t value)40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
41 #if PLATFORM(X86_64)
CAN_SIGN_EXTEND_32_64(intptr_t value)42 inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
CAN_SIGN_EXTEND_U32_64(intptr_t value)43 inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
44 #endif
45
46 namespace X86 {
47 typedef enum {
48 eax,
49 ecx,
50 edx,
51 ebx,
52 esp,
53 ebp,
54 esi,
55 edi,
56
57 #if PLATFORM(X86_64)
58 r8,
59 r9,
60 r10,
61 r11,
62 r12,
63 r13,
64 r14,
65 r15,
66 #endif
67 } RegisterID;
68
69 typedef enum {
70 xmm0,
71 xmm1,
72 xmm2,
73 xmm3,
74 xmm4,
75 xmm5,
76 xmm6,
77 xmm7,
78 } XMMRegisterID;
79 }
80
81 class X86Assembler {
82 public:
83 typedef X86::RegisterID RegisterID;
84 typedef X86::XMMRegisterID XMMRegisterID;
85 typedef XMMRegisterID FPRegisterID;
86
87 typedef enum {
88 ConditionO,
89 ConditionNO,
90 ConditionB,
91 ConditionAE,
92 ConditionE,
93 ConditionNE,
94 ConditionBE,
95 ConditionA,
96 ConditionS,
97 ConditionNS,
98 ConditionP,
99 ConditionNP,
100 ConditionL,
101 ConditionGE,
102 ConditionLE,
103 ConditionG,
104
105 ConditionC = ConditionB,
106 ConditionNC = ConditionAE,
107 } Condition;
108
109 private:
110 typedef enum {
111 OP_ADD_EvGv = 0x01,
112 OP_ADD_GvEv = 0x03,
113 OP_OR_EvGv = 0x09,
114 OP_OR_GvEv = 0x0B,
115 OP_2BYTE_ESCAPE = 0x0F,
116 OP_AND_EvGv = 0x21,
117 OP_AND_GvEv = 0x23,
118 OP_SUB_EvGv = 0x29,
119 OP_SUB_GvEv = 0x2B,
120 PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
121 OP_XOR_EvGv = 0x31,
122 OP_XOR_GvEv = 0x33,
123 OP_CMP_EvGv = 0x39,
124 OP_CMP_GvEv = 0x3B,
125 #if PLATFORM(X86_64)
126 PRE_REX = 0x40,
127 #endif
128 OP_PUSH_EAX = 0x50,
129 OP_POP_EAX = 0x58,
130 #if PLATFORM(X86_64)
131 OP_MOVSXD_GvEv = 0x63,
132 #endif
133 PRE_OPERAND_SIZE = 0x66,
134 PRE_SSE_66 = 0x66,
135 OP_PUSH_Iz = 0x68,
136 OP_IMUL_GvEvIz = 0x69,
137 OP_GROUP1_EvIz = 0x81,
138 OP_GROUP1_EvIb = 0x83,
139 OP_TEST_EvGv = 0x85,
140 OP_XCHG_EvGv = 0x87,
141 OP_MOV_EvGv = 0x89,
142 OP_MOV_GvEv = 0x8B,
143 OP_LEA = 0x8D,
144 OP_GROUP1A_Ev = 0x8F,
145 OP_CDQ = 0x99,
146 OP_MOV_EAXOv = 0xA1,
147 OP_MOV_OvEAX = 0xA3,
148 OP_MOV_EAXIv = 0xB8,
149 OP_GROUP2_EvIb = 0xC1,
150 OP_RET = 0xC3,
151 OP_GROUP11_EvIz = 0xC7,
152 OP_INT3 = 0xCC,
153 OP_GROUP2_Ev1 = 0xD1,
154 OP_GROUP2_EvCL = 0xD3,
155 OP_CALL_rel32 = 0xE8,
156 OP_JMP_rel32 = 0xE9,
157 PRE_SSE_F2 = 0xF2,
158 OP_HLT = 0xF4,
159 OP_GROUP3_EbIb = 0xF6,
160 OP_GROUP3_Ev = 0xF7,
161 OP_GROUP3_EvIz = 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
162 OP_GROUP5_Ev = 0xFF,
163 } OneByteOpcodeID;
164
165 typedef enum {
166 OP2_MOVSD_VsdWsd = 0x10,
167 OP2_MOVSD_WsdVsd = 0x11,
168 OP2_CVTSI2SD_VsdEd = 0x2A,
169 OP2_CVTTSD2SI_GdWsd = 0x2C,
170 OP2_UCOMISD_VsdWsd = 0x2E,
171 OP2_ADDSD_VsdWsd = 0x58,
172 OP2_MULSD_VsdWsd = 0x59,
173 OP2_SUBSD_VsdWsd = 0x5C,
174 OP2_DIVSD_VsdWsd = 0x5E,
175 OP2_XORPD_VpdWpd = 0x57,
176 OP2_MOVD_VdEd = 0x6E,
177 OP2_MOVD_EdVd = 0x7E,
178 OP2_JCC_rel32 = 0x80,
179 OP_SETCC = 0x90,
180 OP2_IMUL_GvEv = 0xAF,
181 OP2_MOVZX_GvEb = 0xB6,
182 OP2_MOVZX_GvEw = 0xB7,
183 OP2_PEXTRW_GdUdIb = 0xC5,
184 } TwoByteOpcodeID;
185
jccRel32(Condition cond)186 TwoByteOpcodeID jccRel32(Condition cond)
187 {
188 return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
189 }
190
setccOpcode(Condition cond)191 TwoByteOpcodeID setccOpcode(Condition cond)
192 {
193 return (TwoByteOpcodeID)(OP_SETCC + cond);
194 }
195
196 typedef enum {
197 GROUP1_OP_ADD = 0,
198 GROUP1_OP_OR = 1,
199 GROUP1_OP_ADC = 2,
200 GROUP1_OP_AND = 4,
201 GROUP1_OP_SUB = 5,
202 GROUP1_OP_XOR = 6,
203 GROUP1_OP_CMP = 7,
204
205 GROUP1A_OP_POP = 0,
206
207 GROUP2_OP_SHL = 4,
208 GROUP2_OP_SAR = 7,
209
210 GROUP3_OP_TEST = 0,
211 GROUP3_OP_NOT = 2,
212 GROUP3_OP_NEG = 3,
213 GROUP3_OP_IDIV = 7,
214
215 GROUP5_OP_CALLN = 2,
216 GROUP5_OP_JMPN = 4,
217 GROUP5_OP_PUSH = 6,
218
219 GROUP11_MOV = 0,
220 } GroupOpcodeID;
221
222 class X86InstructionFormatter;
223 public:
224
225 class JmpSrc {
226 friend class X86Assembler;
227 friend class X86InstructionFormatter;
228 public:
JmpSrc()229 JmpSrc()
230 : m_offset(-1)
231 {
232 }
233
enableLatePatch()234 void enableLatePatch() { }
235 private:
JmpSrc(int offset)236 JmpSrc(int offset)
237 : m_offset(offset)
238 {
239 }
240
241 int m_offset;
242 };
243
244 class JmpDst {
245 friend class X86Assembler;
246 friend class X86InstructionFormatter;
247 public:
JmpDst()248 JmpDst()
249 : m_offset(-1)
250 , m_used(false)
251 {
252 }
253
isUsed()254 bool isUsed() const { return m_used; }
used()255 void used() { m_used = true; }
256 private:
JmpDst(int offset)257 JmpDst(int offset)
258 : m_offset(offset)
259 , m_used(false)
260 {
261 ASSERT(m_offset == offset);
262 }
263
264 int m_offset : 31;
265 bool m_used : 1;
266 };
267
X86Assembler()268 X86Assembler()
269 {
270 }
271
size()272 size_t size() const { return m_formatter.size(); }
273
274 // Stack operations:
275
push_r(RegisterID reg)276 void push_r(RegisterID reg)
277 {
278 m_formatter.oneByteOp(OP_PUSH_EAX, reg);
279 }
280
pop_r(RegisterID reg)281 void pop_r(RegisterID reg)
282 {
283 m_formatter.oneByteOp(OP_POP_EAX, reg);
284 }
285
push_i32(int imm)286 void push_i32(int imm)
287 {
288 m_formatter.oneByteOp(OP_PUSH_Iz);
289 m_formatter.immediate32(imm);
290 }
291
push_m(int offset,RegisterID base)292 void push_m(int offset, RegisterID base)
293 {
294 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_PUSH, base, offset);
295 }
296
pop_m(int offset,RegisterID base)297 void pop_m(int offset, RegisterID base)
298 {
299 m_formatter.oneByteOp(OP_GROUP1A_Ev, GROUP1A_OP_POP, base, offset);
300 }
301
302 // Arithmetic operations:
303
304 #if !PLATFORM(X86_64)
adcl_im(int imm,void * addr)305 void adcl_im(int imm, void* addr)
306 {
307 if (CAN_SIGN_EXTEND_8_32(imm)) {
308 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
309 m_formatter.immediate8(imm);
310 } else {
311 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
312 m_formatter.immediate32(imm);
313 }
314 }
315 #endif
316
addl_rr(RegisterID src,RegisterID dst)317 void addl_rr(RegisterID src, RegisterID dst)
318 {
319 m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
320 }
321
addl_mr(int offset,RegisterID base,RegisterID dst)322 void addl_mr(int offset, RegisterID base, RegisterID dst)
323 {
324 m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
325 }
326
addl_rm(RegisterID src,int offset,RegisterID base)327 void addl_rm(RegisterID src, int offset, RegisterID base)
328 {
329 m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
330 }
331
addl_ir(int imm,RegisterID dst)332 void addl_ir(int imm, RegisterID dst)
333 {
334 if (CAN_SIGN_EXTEND_8_32(imm)) {
335 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
336 m_formatter.immediate8(imm);
337 } else {
338 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
339 m_formatter.immediate32(imm);
340 }
341 }
342
addl_im(int imm,int offset,RegisterID base)343 void addl_im(int imm, int offset, RegisterID base)
344 {
345 if (CAN_SIGN_EXTEND_8_32(imm)) {
346 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
347 m_formatter.immediate8(imm);
348 } else {
349 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
350 m_formatter.immediate32(imm);
351 }
352 }
353
354 #if PLATFORM(X86_64)
addq_rr(RegisterID src,RegisterID dst)355 void addq_rr(RegisterID src, RegisterID dst)
356 {
357 m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
358 }
359
addq_ir(int imm,RegisterID dst)360 void addq_ir(int imm, RegisterID dst)
361 {
362 if (CAN_SIGN_EXTEND_8_32(imm)) {
363 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
364 m_formatter.immediate8(imm);
365 } else {
366 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
367 m_formatter.immediate32(imm);
368 }
369 }
370
addq_im(int imm,int offset,RegisterID base)371 void addq_im(int imm, int offset, RegisterID base)
372 {
373 if (CAN_SIGN_EXTEND_8_32(imm)) {
374 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
375 m_formatter.immediate8(imm);
376 } else {
377 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
378 m_formatter.immediate32(imm);
379 }
380 }
381 #else
addl_im(int imm,void * addr)382 void addl_im(int imm, void* addr)
383 {
384 if (CAN_SIGN_EXTEND_8_32(imm)) {
385 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
386 m_formatter.immediate8(imm);
387 } else {
388 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, addr);
389 m_formatter.immediate32(imm);
390 }
391 }
392 #endif
393
andl_rr(RegisterID src,RegisterID dst)394 void andl_rr(RegisterID src, RegisterID dst)
395 {
396 m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
397 }
398
andl_mr(int offset,RegisterID base,RegisterID dst)399 void andl_mr(int offset, RegisterID base, RegisterID dst)
400 {
401 m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
402 }
403
andl_rm(RegisterID src,int offset,RegisterID base)404 void andl_rm(RegisterID src, int offset, RegisterID base)
405 {
406 m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
407 }
408
andl_ir(int imm,RegisterID dst)409 void andl_ir(int imm, RegisterID dst)
410 {
411 if (CAN_SIGN_EXTEND_8_32(imm)) {
412 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
413 m_formatter.immediate8(imm);
414 } else {
415 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
416 m_formatter.immediate32(imm);
417 }
418 }
419
andl_im(int imm,int offset,RegisterID base)420 void andl_im(int imm, int offset, RegisterID base)
421 {
422 if (CAN_SIGN_EXTEND_8_32(imm)) {
423 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
424 m_formatter.immediate8(imm);
425 } else {
426 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
427 m_formatter.immediate32(imm);
428 }
429 }
430
431 #if PLATFORM(X86_64)
andq_rr(RegisterID src,RegisterID dst)432 void andq_rr(RegisterID src, RegisterID dst)
433 {
434 m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
435 }
436
andq_ir(int imm,RegisterID dst)437 void andq_ir(int imm, RegisterID dst)
438 {
439 if (CAN_SIGN_EXTEND_8_32(imm)) {
440 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_AND, dst);
441 m_formatter.immediate8(imm);
442 } else {
443 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_AND, dst);
444 m_formatter.immediate32(imm);
445 }
446 }
447 #else
andl_im(int imm,void * addr)448 void andl_im(int imm, void* addr)
449 {
450 if (CAN_SIGN_EXTEND_8_32(imm)) {
451 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
452 m_formatter.immediate8(imm);
453 } else {
454 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
455 m_formatter.immediate32(imm);
456 }
457 }
458 #endif
459
negl_r(RegisterID dst)460 void negl_r(RegisterID dst)
461 {
462 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
463 }
464
negl_m(int offset,RegisterID base)465 void negl_m(int offset, RegisterID base)
466 {
467 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
468 }
469
notl_r(RegisterID dst)470 void notl_r(RegisterID dst)
471 {
472 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
473 }
474
notl_m(int offset,RegisterID base)475 void notl_m(int offset, RegisterID base)
476 {
477 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
478 }
479
orl_rr(RegisterID src,RegisterID dst)480 void orl_rr(RegisterID src, RegisterID dst)
481 {
482 m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
483 }
484
orl_mr(int offset,RegisterID base,RegisterID dst)485 void orl_mr(int offset, RegisterID base, RegisterID dst)
486 {
487 m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
488 }
489
orl_rm(RegisterID src,int offset,RegisterID base)490 void orl_rm(RegisterID src, int offset, RegisterID base)
491 {
492 m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
493 }
494
orl_ir(int imm,RegisterID dst)495 void orl_ir(int imm, RegisterID dst)
496 {
497 if (CAN_SIGN_EXTEND_8_32(imm)) {
498 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
499 m_formatter.immediate8(imm);
500 } else {
501 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
502 m_formatter.immediate32(imm);
503 }
504 }
505
orl_im(int imm,int offset,RegisterID base)506 void orl_im(int imm, int offset, RegisterID base)
507 {
508 if (CAN_SIGN_EXTEND_8_32(imm)) {
509 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
510 m_formatter.immediate8(imm);
511 } else {
512 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
513 m_formatter.immediate32(imm);
514 }
515 }
516
517 #if PLATFORM(X86_64)
orq_rr(RegisterID src,RegisterID dst)518 void orq_rr(RegisterID src, RegisterID dst)
519 {
520 m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
521 }
522
orq_ir(int imm,RegisterID dst)523 void orq_ir(int imm, RegisterID dst)
524 {
525 if (CAN_SIGN_EXTEND_8_32(imm)) {
526 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
527 m_formatter.immediate8(imm);
528 } else {
529 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
530 m_formatter.immediate32(imm);
531 }
532 }
533 #else
orl_im(int imm,void * addr)534 void orl_im(int imm, void* addr)
535 {
536 if (CAN_SIGN_EXTEND_8_32(imm)) {
537 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
538 m_formatter.immediate8(imm);
539 } else {
540 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
541 m_formatter.immediate32(imm);
542 }
543 }
544 #endif
545
subl_rr(RegisterID src,RegisterID dst)546 void subl_rr(RegisterID src, RegisterID dst)
547 {
548 m_formatter.oneByteOp(OP_SUB_EvGv, src, dst);
549 }
550
subl_mr(int offset,RegisterID base,RegisterID dst)551 void subl_mr(int offset, RegisterID base, RegisterID dst)
552 {
553 m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
554 }
555
subl_rm(RegisterID src,int offset,RegisterID base)556 void subl_rm(RegisterID src, int offset, RegisterID base)
557 {
558 m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
559 }
560
subl_ir(int imm,RegisterID dst)561 void subl_ir(int imm, RegisterID dst)
562 {
563 if (CAN_SIGN_EXTEND_8_32(imm)) {
564 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
565 m_formatter.immediate8(imm);
566 } else {
567 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
568 m_formatter.immediate32(imm);
569 }
570 }
571
subl_im(int imm,int offset,RegisterID base)572 void subl_im(int imm, int offset, RegisterID base)
573 {
574 if (CAN_SIGN_EXTEND_8_32(imm)) {
575 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, base, offset);
576 m_formatter.immediate8(imm);
577 } else {
578 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, base, offset);
579 m_formatter.immediate32(imm);
580 }
581 }
582
583 #if PLATFORM(X86_64)
subq_rr(RegisterID src,RegisterID dst)584 void subq_rr(RegisterID src, RegisterID dst)
585 {
586 m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
587 }
588
subq_ir(int imm,RegisterID dst)589 void subq_ir(int imm, RegisterID dst)
590 {
591 if (CAN_SIGN_EXTEND_8_32(imm)) {
592 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
593 m_formatter.immediate8(imm);
594 } else {
595 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
596 m_formatter.immediate32(imm);
597 }
598 }
599 #else
subl_im(int imm,void * addr)600 void subl_im(int imm, void* addr)
601 {
602 if (CAN_SIGN_EXTEND_8_32(imm)) {
603 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
604 m_formatter.immediate8(imm);
605 } else {
606 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, addr);
607 m_formatter.immediate32(imm);
608 }
609 }
610 #endif
611
xorl_rr(RegisterID src,RegisterID dst)612 void xorl_rr(RegisterID src, RegisterID dst)
613 {
614 m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
615 }
616
xorl_mr(int offset,RegisterID base,RegisterID dst)617 void xorl_mr(int offset, RegisterID base, RegisterID dst)
618 {
619 m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
620 }
621
xorl_rm(RegisterID src,int offset,RegisterID base)622 void xorl_rm(RegisterID src, int offset, RegisterID base)
623 {
624 m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
625 }
626
xorl_im(int imm,int offset,RegisterID base)627 void xorl_im(int imm, int offset, RegisterID base)
628 {
629 if (CAN_SIGN_EXTEND_8_32(imm)) {
630 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
631 m_formatter.immediate8(imm);
632 } else {
633 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
634 m_formatter.immediate32(imm);
635 }
636 }
637
xorl_ir(int imm,RegisterID dst)638 void xorl_ir(int imm, RegisterID dst)
639 {
640 if (CAN_SIGN_EXTEND_8_32(imm)) {
641 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
642 m_formatter.immediate8(imm);
643 } else {
644 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
645 m_formatter.immediate32(imm);
646 }
647 }
648
649 #if PLATFORM(X86_64)
xorq_rr(RegisterID src,RegisterID dst)650 void xorq_rr(RegisterID src, RegisterID dst)
651 {
652 m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
653 }
654
xorq_ir(int imm,RegisterID dst)655 void xorq_ir(int imm, RegisterID dst)
656 {
657 if (CAN_SIGN_EXTEND_8_32(imm)) {
658 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
659 m_formatter.immediate8(imm);
660 } else {
661 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
662 m_formatter.immediate32(imm);
663 }
664 }
665 #endif
666
sarl_i8r(int imm,RegisterID dst)667 void sarl_i8r(int imm, RegisterID dst)
668 {
669 if (imm == 1)
670 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
671 else {
672 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
673 m_formatter.immediate8(imm);
674 }
675 }
676
sarl_CLr(RegisterID dst)677 void sarl_CLr(RegisterID dst)
678 {
679 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
680 }
681
shll_i8r(int imm,RegisterID dst)682 void shll_i8r(int imm, RegisterID dst)
683 {
684 if (imm == 1)
685 m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
686 else {
687 m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
688 m_formatter.immediate8(imm);
689 }
690 }
691
shll_CLr(RegisterID dst)692 void shll_CLr(RegisterID dst)
693 {
694 m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
695 }
696
697 #if PLATFORM(X86_64)
sarq_CLr(RegisterID dst)698 void sarq_CLr(RegisterID dst)
699 {
700 m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
701 }
702
sarq_i8r(int imm,RegisterID dst)703 void sarq_i8r(int imm, RegisterID dst)
704 {
705 if (imm == 1)
706 m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SAR, dst);
707 else {
708 m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SAR, dst);
709 m_formatter.immediate8(imm);
710 }
711 }
712 #endif
713
imull_rr(RegisterID src,RegisterID dst)714 void imull_rr(RegisterID src, RegisterID dst)
715 {
716 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
717 }
718
imull_mr(int offset,RegisterID base,RegisterID dst)719 void imull_mr(int offset, RegisterID base, RegisterID dst)
720 {
721 m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
722 }
723
imull_i32r(RegisterID src,int32_t value,RegisterID dst)724 void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
725 {
726 m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
727 m_formatter.immediate32(value);
728 }
729
idivl_r(RegisterID dst)730 void idivl_r(RegisterID dst)
731 {
732 m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_IDIV, dst);
733 }
734
735 // Comparisons:
736
cmpl_rr(RegisterID src,RegisterID dst)737 void cmpl_rr(RegisterID src, RegisterID dst)
738 {
739 m_formatter.oneByteOp(OP_CMP_EvGv, src, dst);
740 }
741
cmpl_rm(RegisterID src,int offset,RegisterID base)742 void cmpl_rm(RegisterID src, int offset, RegisterID base)
743 {
744 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, offset);
745 }
746
cmpl_mr(int offset,RegisterID base,RegisterID src)747 void cmpl_mr(int offset, RegisterID base, RegisterID src)
748 {
749 m_formatter.oneByteOp(OP_CMP_GvEv, src, base, offset);
750 }
751
cmpl_ir(int imm,RegisterID dst)752 void cmpl_ir(int imm, RegisterID dst)
753 {
754 if (CAN_SIGN_EXTEND_8_32(imm)) {
755 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
756 m_formatter.immediate8(imm);
757 } else {
758 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
759 m_formatter.immediate32(imm);
760 }
761 }
762
cmpl_ir_force32(int imm,RegisterID dst)763 void cmpl_ir_force32(int imm, RegisterID dst)
764 {
765 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
766 m_formatter.immediate32(imm);
767 }
768
cmpl_im(int imm,int offset,RegisterID base)769 void cmpl_im(int imm, int offset, RegisterID base)
770 {
771 if (CAN_SIGN_EXTEND_8_32(imm)) {
772 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
773 m_formatter.immediate8(imm);
774 } else {
775 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
776 m_formatter.immediate32(imm);
777 }
778 }
779
cmpl_im(int imm,int offset,RegisterID base,RegisterID index,int scale)780 void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
781 {
782 if (CAN_SIGN_EXTEND_8_32(imm)) {
783 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
784 m_formatter.immediate8(imm);
785 } else {
786 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
787 m_formatter.immediate32(imm);
788 }
789 }
790
cmpl_im_force32(int imm,int offset,RegisterID base)791 void cmpl_im_force32(int imm, int offset, RegisterID base)
792 {
793 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
794 m_formatter.immediate32(imm);
795 }
796
797 #if PLATFORM(X86_64)
cmpq_rr(RegisterID src,RegisterID dst)798 void cmpq_rr(RegisterID src, RegisterID dst)
799 {
800 m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
801 }
802
cmpq_rm(RegisterID src,int offset,RegisterID base)803 void cmpq_rm(RegisterID src, int offset, RegisterID base)
804 {
805 m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
806 }
807
cmpq_mr(int offset,RegisterID base,RegisterID src)808 void cmpq_mr(int offset, RegisterID base, RegisterID src)
809 {
810 m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
811 }
812
cmpq_ir(int imm,RegisterID dst)813 void cmpq_ir(int imm, RegisterID dst)
814 {
815 if (CAN_SIGN_EXTEND_8_32(imm)) {
816 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
817 m_formatter.immediate8(imm);
818 } else {
819 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
820 m_formatter.immediate32(imm);
821 }
822 }
823
cmpq_im(int imm,int offset,RegisterID base)824 void cmpq_im(int imm, int offset, RegisterID base)
825 {
826 if (CAN_SIGN_EXTEND_8_32(imm)) {
827 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, offset);
828 m_formatter.immediate8(imm);
829 } else {
830 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, offset);
831 m_formatter.immediate32(imm);
832 }
833 }
834
cmpq_im(int imm,int offset,RegisterID base,RegisterID index,int scale)835 void cmpq_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
836 {
837 if (CAN_SIGN_EXTEND_8_32(imm)) {
838 m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
839 m_formatter.immediate8(imm);
840 } else {
841 m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
842 m_formatter.immediate32(imm);
843 }
844 }
845 #else
cmpl_rm(RegisterID reg,void * addr)846 void cmpl_rm(RegisterID reg, void* addr)
847 {
848 m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
849 }
850
cmpl_im(int imm,void * addr)851 void cmpl_im(int imm, void* addr)
852 {
853 if (CAN_SIGN_EXTEND_8_32(imm)) {
854 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
855 m_formatter.immediate8(imm);
856 } else {
857 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, addr);
858 m_formatter.immediate32(imm);
859 }
860 }
861 #endif
862
cmpw_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)863 void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
864 {
865 m_formatter.prefix(PRE_OPERAND_SIZE);
866 m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
867 }
868
cmpw_im(int imm,int offset,RegisterID base,RegisterID index,int scale)869 void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
870 {
871 if (CAN_SIGN_EXTEND_8_32(imm)) {
872 m_formatter.prefix(PRE_OPERAND_SIZE);
873 m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
874 m_formatter.immediate8(imm);
875 } else {
876 m_formatter.prefix(PRE_OPERAND_SIZE);
877 m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
878 m_formatter.immediate16(imm);
879 }
880 }
881
testl_rr(RegisterID src,RegisterID dst)882 void testl_rr(RegisterID src, RegisterID dst)
883 {
884 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
885 }
886
testl_i32r(int imm,RegisterID dst)887 void testl_i32r(int imm, RegisterID dst)
888 {
889 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
890 m_formatter.immediate32(imm);
891 }
892
testl_i32m(int imm,int offset,RegisterID base)893 void testl_i32m(int imm, int offset, RegisterID base)
894 {
895 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
896 m_formatter.immediate32(imm);
897 }
898
testl_i32m(int imm,int offset,RegisterID base,RegisterID index,int scale)899 void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
900 {
901 m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
902 m_formatter.immediate32(imm);
903 }
904
905 #if PLATFORM(X86_64)
testq_rr(RegisterID src,RegisterID dst)906 void testq_rr(RegisterID src, RegisterID dst)
907 {
908 m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
909 }
910
testq_i32r(int imm,RegisterID dst)911 void testq_i32r(int imm, RegisterID dst)
912 {
913 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
914 m_formatter.immediate32(imm);
915 }
916
testq_i32m(int imm,int offset,RegisterID base)917 void testq_i32m(int imm, int offset, RegisterID base)
918 {
919 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
920 m_formatter.immediate32(imm);
921 }
922
testq_i32m(int imm,int offset,RegisterID base,RegisterID index,int scale)923 void testq_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
924 {
925 m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
926 m_formatter.immediate32(imm);
927 }
928 #endif
929
testw_rr(RegisterID src,RegisterID dst)930 void testw_rr(RegisterID src, RegisterID dst)
931 {
932 m_formatter.prefix(PRE_OPERAND_SIZE);
933 m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
934 }
935
testb_i8r(int imm,RegisterID dst)936 void testb_i8r(int imm, RegisterID dst)
937 {
938 m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
939 m_formatter.immediate8(imm);
940 }
941
setCC_r(Condition cond,RegisterID dst)942 void setCC_r(Condition cond, RegisterID dst)
943 {
944 m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
945 }
946
sete_r(RegisterID dst)947 void sete_r(RegisterID dst)
948 {
949 m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
950 }
951
setz_r(RegisterID dst)952 void setz_r(RegisterID dst)
953 {
954 sete_r(dst);
955 }
956
setne_r(RegisterID dst)957 void setne_r(RegisterID dst)
958 {
959 m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
960 }
961
setnz_r(RegisterID dst)962 void setnz_r(RegisterID dst)
963 {
964 setne_r(dst);
965 }
966
967 // Various move ops:
968
cdq()969 void cdq()
970 {
971 m_formatter.oneByteOp(OP_CDQ);
972 }
973
xchgl_rr(RegisterID src,RegisterID dst)974 void xchgl_rr(RegisterID src, RegisterID dst)
975 {
976 m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
977 }
978
979 #if PLATFORM(X86_64)
xchgq_rr(RegisterID src,RegisterID dst)980 void xchgq_rr(RegisterID src, RegisterID dst)
981 {
982 m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
983 }
984 #endif
985
movl_rr(RegisterID src,RegisterID dst)986 void movl_rr(RegisterID src, RegisterID dst)
987 {
988 m_formatter.oneByteOp(OP_MOV_EvGv, src, dst);
989 }
990
movl_rm(RegisterID src,int offset,RegisterID base)991 void movl_rm(RegisterID src, int offset, RegisterID base)
992 {
993 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, offset);
994 }
995
movl_rm_disp32(RegisterID src,int offset,RegisterID base)996 void movl_rm_disp32(RegisterID src, int offset, RegisterID base)
997 {
998 m_formatter.oneByteOp_disp32(OP_MOV_EvGv, src, base, offset);
999 }
1000
movl_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)1001 void movl_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1002 {
1003 m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
1004 }
1005
movl_mEAX(void * addr)1006 void movl_mEAX(void* addr)
1007 {
1008 m_formatter.oneByteOp(OP_MOV_EAXOv);
1009 #if PLATFORM(X86_64)
1010 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1011 #else
1012 m_formatter.immediate32(reinterpret_cast<int>(addr));
1013 #endif
1014 }
1015
movl_mr(int offset,RegisterID base,RegisterID dst)1016 void movl_mr(int offset, RegisterID base, RegisterID dst)
1017 {
1018 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, offset);
1019 }
1020
movl_mr_disp32(int offset,RegisterID base,RegisterID dst)1021 void movl_mr_disp32(int offset, RegisterID base, RegisterID dst)
1022 {
1023 m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
1024 }
1025
movl_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1026 void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1027 {
1028 m_formatter.oneByteOp(OP_MOV_GvEv, dst, base, index, scale, offset);
1029 }
1030
movl_i32r(int imm,RegisterID dst)1031 void movl_i32r(int imm, RegisterID dst)
1032 {
1033 m_formatter.oneByteOp(OP_MOV_EAXIv, dst);
1034 m_formatter.immediate32(imm);
1035 }
1036
movl_i32m(int imm,int offset,RegisterID base)1037 void movl_i32m(int imm, int offset, RegisterID base)
1038 {
1039 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1040 m_formatter.immediate32(imm);
1041 }
1042
movl_EAXm(void * addr)1043 void movl_EAXm(void* addr)
1044 {
1045 m_formatter.oneByteOp(OP_MOV_OvEAX);
1046 #if PLATFORM(X86_64)
1047 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1048 #else
1049 m_formatter.immediate32(reinterpret_cast<int>(addr));
1050 #endif
1051 }
1052
1053 #if PLATFORM(X86_64)
movq_rr(RegisterID src,RegisterID dst)1054 void movq_rr(RegisterID src, RegisterID dst)
1055 {
1056 m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
1057 }
1058
movq_rm(RegisterID src,int offset,RegisterID base)1059 void movq_rm(RegisterID src, int offset, RegisterID base)
1060 {
1061 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, offset);
1062 }
1063
movq_rm_disp32(RegisterID src,int offset,RegisterID base)1064 void movq_rm_disp32(RegisterID src, int offset, RegisterID base)
1065 {
1066 m_formatter.oneByteOp64_disp32(OP_MOV_EvGv, src, base, offset);
1067 }
1068
movq_rm(RegisterID src,int offset,RegisterID base,RegisterID index,int scale)1069 void movq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
1070 {
1071 m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
1072 }
1073
movq_mEAX(void * addr)1074 void movq_mEAX(void* addr)
1075 {
1076 m_formatter.oneByteOp64(OP_MOV_EAXOv);
1077 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1078 }
1079
movq_EAXm(void * addr)1080 void movq_EAXm(void* addr)
1081 {
1082 m_formatter.oneByteOp64(OP_MOV_OvEAX);
1083 m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
1084 }
1085
movq_mr(int offset,RegisterID base,RegisterID dst)1086 void movq_mr(int offset, RegisterID base, RegisterID dst)
1087 {
1088 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
1089 }
1090
movq_mr_disp32(int offset,RegisterID base,RegisterID dst)1091 void movq_mr_disp32(int offset, RegisterID base, RegisterID dst)
1092 {
1093 m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
1094 }
1095
movq_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1096 void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1097 {
1098 m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
1099 }
1100
movq_i32m(int imm,int offset,RegisterID base)1101 void movq_i32m(int imm, int offset, RegisterID base)
1102 {
1103 m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
1104 m_formatter.immediate32(imm);
1105 }
1106
movq_i64r(int64_t imm,RegisterID dst)1107 void movq_i64r(int64_t imm, RegisterID dst)
1108 {
1109 m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
1110 m_formatter.immediate64(imm);
1111 }
1112
movsxd_rr(RegisterID src,RegisterID dst)1113 void movsxd_rr(RegisterID src, RegisterID dst)
1114 {
1115 m_formatter.oneByteOp64(OP_MOVSXD_GvEv, dst, src);
1116 }
1117
1118
1119 #else
movl_rm(RegisterID src,void * addr)1120 void movl_rm(RegisterID src, void* addr)
1121 {
1122 if (src == X86::eax)
1123 movl_EAXm(addr);
1124 else
1125 m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
1126 }
1127
movl_mr(void * addr,RegisterID dst)1128 void movl_mr(void* addr, RegisterID dst)
1129 {
1130 if (dst == X86::eax)
1131 movl_mEAX(addr);
1132 else
1133 m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
1134 }
1135
movl_i32m(int imm,void * addr)1136 void movl_i32m(int imm, void* addr)
1137 {
1138 m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
1139 m_formatter.immediate32(imm);
1140 }
1141 #endif
1142
movzwl_mr(int offset,RegisterID base,RegisterID dst)1143 void movzwl_mr(int offset, RegisterID base, RegisterID dst)
1144 {
1145 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, offset);
1146 }
1147
movzwl_mr(int offset,RegisterID base,RegisterID index,int scale,RegisterID dst)1148 void movzwl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
1149 {
1150 m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
1151 }
1152
movzbl_rr(RegisterID src,RegisterID dst)1153 void movzbl_rr(RegisterID src, RegisterID dst)
1154 {
1155 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1156 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1157 // REX prefixes are defined to be silently ignored by the processor.
1158 m_formatter.twoByteOp8(OP2_MOVZX_GvEb, dst, src);
1159 }
1160
leal_mr(int offset,RegisterID base,RegisterID dst)1161 void leal_mr(int offset, RegisterID base, RegisterID dst)
1162 {
1163 m_formatter.oneByteOp(OP_LEA, dst, base, offset);
1164 }
1165 #if PLATFORM(X86_64)
leaq_mr(int offset,RegisterID base,RegisterID dst)1166 void leaq_mr(int offset, RegisterID base, RegisterID dst)
1167 {
1168 m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
1169 }
1170 #endif
1171
1172 // Flow control:
1173
call()1174 JmpSrc call()
1175 {
1176 m_formatter.oneByteOp(OP_CALL_rel32);
1177 return m_formatter.immediateRel32();
1178 }
1179
call(RegisterID dst)1180 JmpSrc call(RegisterID dst)
1181 {
1182 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
1183 return JmpSrc(m_formatter.size());
1184 }
1185
call_m(int offset,RegisterID base)1186 void call_m(int offset, RegisterID base)
1187 {
1188 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
1189 }
1190
jmp()1191 JmpSrc jmp()
1192 {
1193 m_formatter.oneByteOp(OP_JMP_rel32);
1194 return m_formatter.immediateRel32();
1195 }
1196
1197 // Return a JmpSrc so we have a label to the jump, so we can use this
1198 // To make a tail recursive call on x86-64. The MacroAssembler
1199 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
jmp_r(RegisterID dst)1200 JmpSrc jmp_r(RegisterID dst)
1201 {
1202 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
1203 return JmpSrc(m_formatter.size());
1204 }
1205
jmp_m(int offset,RegisterID base)1206 void jmp_m(int offset, RegisterID base)
1207 {
1208 m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
1209 }
1210
jne()1211 JmpSrc jne()
1212 {
1213 m_formatter.twoByteOp(jccRel32(ConditionNE));
1214 return m_formatter.immediateRel32();
1215 }
1216
jnz()1217 JmpSrc jnz()
1218 {
1219 return jne();
1220 }
1221
je()1222 JmpSrc je()
1223 {
1224 m_formatter.twoByteOp(jccRel32(ConditionE));
1225 return m_formatter.immediateRel32();
1226 }
1227
jz()1228 JmpSrc jz()
1229 {
1230 return je();
1231 }
1232
jl()1233 JmpSrc jl()
1234 {
1235 m_formatter.twoByteOp(jccRel32(ConditionL));
1236 return m_formatter.immediateRel32();
1237 }
1238
jb()1239 JmpSrc jb()
1240 {
1241 m_formatter.twoByteOp(jccRel32(ConditionB));
1242 return m_formatter.immediateRel32();
1243 }
1244
jle()1245 JmpSrc jle()
1246 {
1247 m_formatter.twoByteOp(jccRel32(ConditionLE));
1248 return m_formatter.immediateRel32();
1249 }
1250
jbe()1251 JmpSrc jbe()
1252 {
1253 m_formatter.twoByteOp(jccRel32(ConditionBE));
1254 return m_formatter.immediateRel32();
1255 }
1256
jge()1257 JmpSrc jge()
1258 {
1259 m_formatter.twoByteOp(jccRel32(ConditionGE));
1260 return m_formatter.immediateRel32();
1261 }
1262
jg()1263 JmpSrc jg()
1264 {
1265 m_formatter.twoByteOp(jccRel32(ConditionG));
1266 return m_formatter.immediateRel32();
1267 }
1268
ja()1269 JmpSrc ja()
1270 {
1271 m_formatter.twoByteOp(jccRel32(ConditionA));
1272 return m_formatter.immediateRel32();
1273 }
1274
jae()1275 JmpSrc jae()
1276 {
1277 m_formatter.twoByteOp(jccRel32(ConditionAE));
1278 return m_formatter.immediateRel32();
1279 }
1280
jo()1281 JmpSrc jo()
1282 {
1283 m_formatter.twoByteOp(jccRel32(ConditionO));
1284 return m_formatter.immediateRel32();
1285 }
1286
jp()1287 JmpSrc jp()
1288 {
1289 m_formatter.twoByteOp(jccRel32(ConditionP));
1290 return m_formatter.immediateRel32();
1291 }
1292
js()1293 JmpSrc js()
1294 {
1295 m_formatter.twoByteOp(jccRel32(ConditionS));
1296 return m_formatter.immediateRel32();
1297 }
1298
jCC(Condition cond)1299 JmpSrc jCC(Condition cond)
1300 {
1301 m_formatter.twoByteOp(jccRel32(cond));
1302 return m_formatter.immediateRel32();
1303 }
1304
1305 // SSE operations:
1306
addsd_rr(XMMRegisterID src,XMMRegisterID dst)1307 void addsd_rr(XMMRegisterID src, XMMRegisterID dst)
1308 {
1309 m_formatter.prefix(PRE_SSE_F2);
1310 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1311 }
1312
addsd_mr(int offset,RegisterID base,XMMRegisterID dst)1313 void addsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1314 {
1315 m_formatter.prefix(PRE_SSE_F2);
1316 m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
1317 }
1318
cvtsi2sd_rr(RegisterID src,XMMRegisterID dst)1319 void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
1320 {
1321 m_formatter.prefix(PRE_SSE_F2);
1322 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
1323 }
1324
cvtsi2sd_mr(int offset,RegisterID base,XMMRegisterID dst)1325 void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
1326 {
1327 m_formatter.prefix(PRE_SSE_F2);
1328 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
1329 }
1330
1331 #if !PLATFORM(X86_64)
cvtsi2sd_mr(void * address,XMMRegisterID dst)1332 void cvtsi2sd_mr(void* address, XMMRegisterID dst)
1333 {
1334 m_formatter.prefix(PRE_SSE_F2);
1335 m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
1336 }
1337 #endif
1338
cvttsd2si_rr(XMMRegisterID src,RegisterID dst)1339 void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
1340 {
1341 m_formatter.prefix(PRE_SSE_F2);
1342 m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
1343 }
1344
movd_rr(XMMRegisterID src,RegisterID dst)1345 void movd_rr(XMMRegisterID src, RegisterID dst)
1346 {
1347 m_formatter.prefix(PRE_SSE_66);
1348 m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
1349 }
1350
1351 #if PLATFORM(X86_64)
movq_rr(XMMRegisterID src,RegisterID dst)1352 void movq_rr(XMMRegisterID src, RegisterID dst)
1353 {
1354 m_formatter.prefix(PRE_SSE_66);
1355 m_formatter.twoByteOp64(OP2_MOVD_EdVd, (RegisterID)src, dst);
1356 }
1357
movq_rr(RegisterID src,XMMRegisterID dst)1358 void movq_rr(RegisterID src, XMMRegisterID dst)
1359 {
1360 m_formatter.prefix(PRE_SSE_66);
1361 m_formatter.twoByteOp64(OP2_MOVD_VdEd, (RegisterID)dst, src);
1362 }
1363 #endif
1364
movsd_rm(XMMRegisterID src,int offset,RegisterID base)1365 void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
1366 {
1367 m_formatter.prefix(PRE_SSE_F2);
1368 m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
1369 }
1370
movsd_mr(int offset,RegisterID base,XMMRegisterID dst)1371 void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1372 {
1373 m_formatter.prefix(PRE_SSE_F2);
1374 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
1375 }
1376
1377 #if !PLATFORM(X86_64)
movsd_mr(void * address,XMMRegisterID dst)1378 void movsd_mr(void* address, XMMRegisterID dst)
1379 {
1380 m_formatter.prefix(PRE_SSE_F2);
1381 m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
1382 }
1383 #endif
1384
mulsd_rr(XMMRegisterID src,XMMRegisterID dst)1385 void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
1386 {
1387 m_formatter.prefix(PRE_SSE_F2);
1388 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1389 }
1390
mulsd_mr(int offset,RegisterID base,XMMRegisterID dst)1391 void mulsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1392 {
1393 m_formatter.prefix(PRE_SSE_F2);
1394 m_formatter.twoByteOp(OP2_MULSD_VsdWsd, (RegisterID)dst, base, offset);
1395 }
1396
pextrw_irr(int whichWord,XMMRegisterID src,RegisterID dst)1397 void pextrw_irr(int whichWord, XMMRegisterID src, RegisterID dst)
1398 {
1399 m_formatter.prefix(PRE_SSE_66);
1400 m_formatter.twoByteOp(OP2_PEXTRW_GdUdIb, (RegisterID)dst, (RegisterID)src);
1401 m_formatter.immediate8(whichWord);
1402 }
1403
subsd_rr(XMMRegisterID src,XMMRegisterID dst)1404 void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
1405 {
1406 m_formatter.prefix(PRE_SSE_F2);
1407 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1408 }
1409
subsd_mr(int offset,RegisterID base,XMMRegisterID dst)1410 void subsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1411 {
1412 m_formatter.prefix(PRE_SSE_F2);
1413 m_formatter.twoByteOp(OP2_SUBSD_VsdWsd, (RegisterID)dst, base, offset);
1414 }
1415
ucomisd_rr(XMMRegisterID src,XMMRegisterID dst)1416 void ucomisd_rr(XMMRegisterID src, XMMRegisterID dst)
1417 {
1418 m_formatter.prefix(PRE_SSE_66);
1419 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1420 }
1421
ucomisd_mr(int offset,RegisterID base,XMMRegisterID dst)1422 void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
1423 {
1424 m_formatter.prefix(PRE_SSE_66);
1425 m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
1426 }
1427
divsd_rr(XMMRegisterID src,XMMRegisterID dst)1428 void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
1429 {
1430 m_formatter.prefix(PRE_SSE_F2);
1431 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
1432 }
1433
divsd_mr(int offset,RegisterID base,XMMRegisterID dst)1434 void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
1435 {
1436 m_formatter.prefix(PRE_SSE_F2);
1437 m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
1438 }
1439
xorpd_rr(XMMRegisterID src,XMMRegisterID dst)1440 void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
1441 {
1442 m_formatter.prefix(PRE_SSE_66);
1443 m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
1444 }
1445
1446 // Misc instructions:
1447
int3()1448 void int3()
1449 {
1450 m_formatter.oneByteOp(OP_INT3);
1451 }
1452
ret()1453 void ret()
1454 {
1455 m_formatter.oneByteOp(OP_RET);
1456 }
1457
predictNotTaken()1458 void predictNotTaken()
1459 {
1460 m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
1461 }
1462
1463 // Assembler admin methods:
1464
label()1465 JmpDst label()
1466 {
1467 return JmpDst(m_formatter.size());
1468 }
1469
1470 static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
1471 {
1472 return JmpDst(jump.m_offset + offset);
1473 }
1474
align(int alignment)1475 JmpDst align(int alignment)
1476 {
1477 while (!m_formatter.isAligned(alignment))
1478 m_formatter.oneByteOp(OP_HLT);
1479
1480 return label();
1481 }
1482
1483 // Linking & patching:
1484 //
1485 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1486 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1487 // code has been finalized it is (platform support permitting) within a non-
1488 // writable region of memory; to modify the code in an execute-only execuable
1489 // pool the 'repatch' and 'relink' methods should be used.
1490
linkJump(JmpSrc from,JmpDst to)1491 void linkJump(JmpSrc from, JmpDst to)
1492 {
1493 ASSERT(from.m_offset != -1);
1494 ASSERT(to.m_offset != -1);
1495
1496 char* code = reinterpret_cast<char*>(m_formatter.data());
1497 setRel32(code + from.m_offset, code + to.m_offset);
1498 }
1499
linkJump(void * code,JmpSrc from,void * to)1500 static void linkJump(void* code, JmpSrc from, void* to)
1501 {
1502 ASSERT(from.m_offset != -1);
1503
1504 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1505 }
1506
linkCall(void * code,JmpSrc from,void * to)1507 static void linkCall(void* code, JmpSrc from, void* to)
1508 {
1509 ASSERT(from.m_offset != -1);
1510
1511 setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
1512 }
1513
linkPointer(void * code,JmpDst where,void * value)1514 static void linkPointer(void* code, JmpDst where, void* value)
1515 {
1516 ASSERT(where.m_offset != -1);
1517
1518 setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
1519 }
1520
relinkJump(void * from,void * to)1521 static void relinkJump(void* from, void* to)
1522 {
1523 setRel32(from, to);
1524 }
1525
relinkCall(void * from,void * to)1526 static void relinkCall(void* from, void* to)
1527 {
1528 setRel32(from, to);
1529 }
1530
repatchInt32(void * where,int32_t value)1531 static void repatchInt32(void* where, int32_t value)
1532 {
1533 setInt32(where, value);
1534 }
1535
repatchPointer(void * where,void * value)1536 static void repatchPointer(void* where, void* value)
1537 {
1538 setPointer(where, value);
1539 }
1540
repatchLoadPtrToLEA(void * where)1541 static void repatchLoadPtrToLEA(void* where)
1542 {
1543 #if PLATFORM(X86_64)
1544 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1545 // Skip over the prefix byte.
1546 where = reinterpret_cast<char*>(where) + 1;
1547 #endif
1548 *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
1549 }
1550
getCallReturnOffset(JmpSrc call)1551 static unsigned getCallReturnOffset(JmpSrc call)
1552 {
1553 ASSERT(call.m_offset >= 0);
1554 return call.m_offset;
1555 }
1556
getRelocatedAddress(void * code,JmpSrc jump)1557 static void* getRelocatedAddress(void* code, JmpSrc jump)
1558 {
1559 ASSERT(jump.m_offset != -1);
1560
1561 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
1562 }
1563
getRelocatedAddress(void * code,JmpDst destination)1564 static void* getRelocatedAddress(void* code, JmpDst destination)
1565 {
1566 ASSERT(destination.m_offset != -1);
1567
1568 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
1569 }
1570
getDifferenceBetweenLabels(JmpDst src,JmpDst dst)1571 static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
1572 {
1573 return dst.m_offset - src.m_offset;
1574 }
1575
getDifferenceBetweenLabels(JmpDst src,JmpSrc dst)1576 static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
1577 {
1578 return dst.m_offset - src.m_offset;
1579 }
1580
getDifferenceBetweenLabels(JmpSrc src,JmpDst dst)1581 static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
1582 {
1583 return dst.m_offset - src.m_offset;
1584 }
1585
executableCopy(ExecutablePool * allocator)1586 void* executableCopy(ExecutablePool* allocator)
1587 {
1588 void* copy = m_formatter.executableCopy(allocator);
1589 ASSERT(copy);
1590 return copy;
1591 }
1592
1593 private:
1594
setPointer(void * where,void * value)1595 static void setPointer(void* where, void* value)
1596 {
1597 reinterpret_cast<void**>(where)[-1] = value;
1598 }
1599
setInt32(void * where,int32_t value)1600 static void setInt32(void* where, int32_t value)
1601 {
1602 reinterpret_cast<int32_t*>(where)[-1] = value;
1603 }
1604
setRel32(void * from,void * to)1605 static void setRel32(void* from, void* to)
1606 {
1607 intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
1608 ASSERT(offset == static_cast<int32_t>(offset));
1609
1610 setInt32(from, offset);
1611 }
1612
1613 class X86InstructionFormatter {
1614
1615 static const int maxInstructionSize = 16;
1616
1617 public:
1618
1619 // Legacy prefix bytes:
1620 //
1621 // These are emmitted prior to the instruction.
1622
prefix(OneByteOpcodeID pre)1623 void prefix(OneByteOpcodeID pre)
1624 {
1625 m_buffer.putByte(pre);
1626 }
1627
1628 // Word-sized operands / no operand instruction formatters.
1629 //
1630 // In addition to the opcode, the following operand permutations are supported:
1631 // * None - instruction takes no operands.
1632 // * One register - the low three bits of the RegisterID are added into the opcode.
1633 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1634 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1635 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1636 //
1637 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1638 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1639 //
1640 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1641
oneByteOp(OneByteOpcodeID opcode)1642 void oneByteOp(OneByteOpcodeID opcode)
1643 {
1644 m_buffer.ensureSpace(maxInstructionSize);
1645 m_buffer.putByteUnchecked(opcode);
1646 }
1647
oneByteOp(OneByteOpcodeID opcode,RegisterID reg)1648 void oneByteOp(OneByteOpcodeID opcode, RegisterID reg)
1649 {
1650 m_buffer.ensureSpace(maxInstructionSize);
1651 emitRexIfNeeded(0, 0, reg);
1652 m_buffer.putByteUnchecked(opcode + (reg & 7));
1653 }
1654
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID rm)1655 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID rm)
1656 {
1657 m_buffer.ensureSpace(maxInstructionSize);
1658 emitRexIfNeeded(reg, 0, rm);
1659 m_buffer.putByteUnchecked(opcode);
1660 registerModRM(reg, rm);
1661 }
1662
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1663 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1664 {
1665 m_buffer.ensureSpace(maxInstructionSize);
1666 emitRexIfNeeded(reg, 0, base);
1667 m_buffer.putByteUnchecked(opcode);
1668 memoryModRM(reg, base, offset);
1669 }
1670
oneByteOp_disp32(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1671 void oneByteOp_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1672 {
1673 m_buffer.ensureSpace(maxInstructionSize);
1674 emitRexIfNeeded(reg, 0, base);
1675 m_buffer.putByteUnchecked(opcode);
1676 memoryModRM_disp32(reg, base, offset);
1677 }
1678
oneByteOp(OneByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1679 void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1680 {
1681 m_buffer.ensureSpace(maxInstructionSize);
1682 emitRexIfNeeded(reg, index, base);
1683 m_buffer.putByteUnchecked(opcode);
1684 memoryModRM(reg, base, index, scale, offset);
1685 }
1686
1687 #if !PLATFORM(X86_64)
oneByteOp(OneByteOpcodeID opcode,int reg,void * address)1688 void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
1689 {
1690 m_buffer.ensureSpace(maxInstructionSize);
1691 m_buffer.putByteUnchecked(opcode);
1692 memoryModRM(reg, address);
1693 }
1694 #endif
1695
twoByteOp(TwoByteOpcodeID opcode)1696 void twoByteOp(TwoByteOpcodeID opcode)
1697 {
1698 m_buffer.ensureSpace(maxInstructionSize);
1699 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1700 m_buffer.putByteUnchecked(opcode);
1701 }
1702
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID rm)1703 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1704 {
1705 m_buffer.ensureSpace(maxInstructionSize);
1706 emitRexIfNeeded(reg, 0, rm);
1707 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1708 m_buffer.putByteUnchecked(opcode);
1709 registerModRM(reg, rm);
1710 }
1711
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID base,int offset)1712 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, int offset)
1713 {
1714 m_buffer.ensureSpace(maxInstructionSize);
1715 emitRexIfNeeded(reg, 0, base);
1716 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1717 m_buffer.putByteUnchecked(opcode);
1718 memoryModRM(reg, base, offset);
1719 }
1720
twoByteOp(TwoByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1721 void twoByteOp(TwoByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1722 {
1723 m_buffer.ensureSpace(maxInstructionSize);
1724 emitRexIfNeeded(reg, index, base);
1725 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1726 m_buffer.putByteUnchecked(opcode);
1727 memoryModRM(reg, base, index, scale, offset);
1728 }
1729
1730 #if !PLATFORM(X86_64)
twoByteOp(TwoByteOpcodeID opcode,int reg,void * address)1731 void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
1732 {
1733 m_buffer.ensureSpace(maxInstructionSize);
1734 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1735 m_buffer.putByteUnchecked(opcode);
1736 memoryModRM(reg, address);
1737 }
1738 #endif
1739
1740 #if PLATFORM(X86_64)
1741 // Quad-word-sized operands:
1742 //
1743 // Used to format 64-bit operantions, planting a REX.w prefix.
1744 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1745 // the normal (non-'64'-postfixed) formatters should be used.
1746
oneByteOp64(OneByteOpcodeID opcode)1747 void oneByteOp64(OneByteOpcodeID opcode)
1748 {
1749 m_buffer.ensureSpace(maxInstructionSize);
1750 emitRexW(0, 0, 0);
1751 m_buffer.putByteUnchecked(opcode);
1752 }
1753
oneByteOp64(OneByteOpcodeID opcode,RegisterID reg)1754 void oneByteOp64(OneByteOpcodeID opcode, RegisterID reg)
1755 {
1756 m_buffer.ensureSpace(maxInstructionSize);
1757 emitRexW(0, 0, reg);
1758 m_buffer.putByteUnchecked(opcode + (reg & 7));
1759 }
1760
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID rm)1761 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID rm)
1762 {
1763 m_buffer.ensureSpace(maxInstructionSize);
1764 emitRexW(reg, 0, rm);
1765 m_buffer.putByteUnchecked(opcode);
1766 registerModRM(reg, rm);
1767 }
1768
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1769 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1770 {
1771 m_buffer.ensureSpace(maxInstructionSize);
1772 emitRexW(reg, 0, base);
1773 m_buffer.putByteUnchecked(opcode);
1774 memoryModRM(reg, base, offset);
1775 }
1776
oneByteOp64_disp32(OneByteOpcodeID opcode,int reg,RegisterID base,int offset)1777 void oneByteOp64_disp32(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
1778 {
1779 m_buffer.ensureSpace(maxInstructionSize);
1780 emitRexW(reg, 0, base);
1781 m_buffer.putByteUnchecked(opcode);
1782 memoryModRM_disp32(reg, base, offset);
1783 }
1784
oneByteOp64(OneByteOpcodeID opcode,int reg,RegisterID base,RegisterID index,int scale,int offset)1785 void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
1786 {
1787 m_buffer.ensureSpace(maxInstructionSize);
1788 emitRexW(reg, index, base);
1789 m_buffer.putByteUnchecked(opcode);
1790 memoryModRM(reg, base, index, scale, offset);
1791 }
1792
twoByteOp64(TwoByteOpcodeID opcode,int reg,RegisterID rm)1793 void twoByteOp64(TwoByteOpcodeID opcode, int reg, RegisterID rm)
1794 {
1795 m_buffer.ensureSpace(maxInstructionSize);
1796 emitRexW(reg, 0, rm);
1797 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1798 m_buffer.putByteUnchecked(opcode);
1799 registerModRM(reg, rm);
1800 }
1801 #endif
1802
1803 // Byte-operands:
1804 //
1805 // These methods format byte operations. Byte operations differ from the normal
1806 // formatters in the circumstances under which they will decide to emit REX prefixes.
1807 // These should be used where any register operand signifies a byte register.
1808 //
1809 // The disctinction is due to the handling of register numbers in the range 4..7 on
1810 // x86-64. These register numbers may either represent the second byte of the first
1811 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1812 //
1813 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1814 // be accessed where a REX prefix is present), these are likely best treated as
1815 // deprecated. In order to ensure the correct registers spl..dil are selected a
1816 // REX prefix will be emitted for any byte register operand in the range 4..15.
1817 //
1818 // These formatters may be used in instructions where a mix of operand sizes, in which
1819 // case an unnecessary REX will be emitted, for example:
1820 // movzbl %al, %edi
1821 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1822 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1823 // be silently ignored by the processor.
1824 //
1825 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1826 // is provided to check byte register operands.
1827
oneByteOp8(OneByteOpcodeID opcode,GroupOpcodeID groupOp,RegisterID rm)1828 void oneByteOp8(OneByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1829 {
1830 m_buffer.ensureSpace(maxInstructionSize);
1831 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1832 m_buffer.putByteUnchecked(opcode);
1833 registerModRM(groupOp, rm);
1834 }
1835
twoByteOp8(TwoByteOpcodeID opcode,RegisterID reg,RegisterID rm)1836 void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
1837 {
1838 m_buffer.ensureSpace(maxInstructionSize);
1839 emitRexIf(byteRegRequiresRex(reg)|byteRegRequiresRex(rm), reg, 0, rm);
1840 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1841 m_buffer.putByteUnchecked(opcode);
1842 registerModRM(reg, rm);
1843 }
1844
twoByteOp8(TwoByteOpcodeID opcode,GroupOpcodeID groupOp,RegisterID rm)1845 void twoByteOp8(TwoByteOpcodeID opcode, GroupOpcodeID groupOp, RegisterID rm)
1846 {
1847 m_buffer.ensureSpace(maxInstructionSize);
1848 emitRexIf(byteRegRequiresRex(rm), 0, 0, rm);
1849 m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
1850 m_buffer.putByteUnchecked(opcode);
1851 registerModRM(groupOp, rm);
1852 }
1853
1854 // Immediates:
1855 //
1856 // An immedaite should be appended where appropriate after an op has been emitted.
1857 // The writes are unchecked since the opcode formatters above will have ensured space.
1858
immediate8(int imm)1859 void immediate8(int imm)
1860 {
1861 m_buffer.putByteUnchecked(imm);
1862 }
1863
immediate16(int imm)1864 void immediate16(int imm)
1865 {
1866 m_buffer.putShortUnchecked(imm);
1867 }
1868
immediate32(int imm)1869 void immediate32(int imm)
1870 {
1871 m_buffer.putIntUnchecked(imm);
1872 }
1873
immediate64(int64_t imm)1874 void immediate64(int64_t imm)
1875 {
1876 m_buffer.putInt64Unchecked(imm);
1877 }
1878
immediateRel32()1879 JmpSrc immediateRel32()
1880 {
1881 m_buffer.putIntUnchecked(0);
1882 return JmpSrc(m_buffer.size());
1883 }
1884
1885 // Administrative methods:
1886
size()1887 size_t size() const { return m_buffer.size(); }
isAligned(int alignment)1888 bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
data()1889 void* data() const { return m_buffer.data(); }
executableCopy(ExecutablePool * allocator)1890 void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
1891
1892 private:
1893
1894 // Internals; ModRm and REX formatters.
1895
1896 static const RegisterID noBase = X86::ebp;
1897 static const RegisterID hasSib = X86::esp;
1898 static const RegisterID noIndex = X86::esp;
1899 #if PLATFORM(X86_64)
1900 static const RegisterID noBase2 = X86::r13;
1901 static const RegisterID hasSib2 = X86::r12;
1902
1903 // Registers r8 & above require a REX prefixe.
regRequiresRex(int reg)1904 inline bool regRequiresRex(int reg)
1905 {
1906 return (reg >= X86::r8);
1907 }
1908
1909 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
byteRegRequiresRex(int reg)1910 inline bool byteRegRequiresRex(int reg)
1911 {
1912 return (reg >= X86::esp);
1913 }
1914
1915 // Format a REX prefix byte.
emitRex(bool w,int r,int x,int b)1916 inline void emitRex(bool w, int r, int x, int b)
1917 {
1918 m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
1919 }
1920
1921 // Used to plant a REX byte with REX.w set (for 64-bit operations).
emitRexW(int r,int x,int b)1922 inline void emitRexW(int r, int x, int b)
1923 {
1924 emitRex(true, r, x, b);
1925 }
1926
1927 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1928 // regRequiresRex() to check other registers (i.e. address base & index).
emitRexIf(bool condition,int r,int x,int b)1929 inline void emitRexIf(bool condition, int r, int x, int b)
1930 {
1931 if (condition) emitRex(false, r, x, b);
1932 }
1933
1934 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
emitRexIfNeeded(int r,int x,int b)1935 inline void emitRexIfNeeded(int r, int x, int b)
1936 {
1937 emitRexIf(regRequiresRex(r) || regRequiresRex(x) || regRequiresRex(b), r, x, b);
1938 }
1939 #else
1940 // No REX prefix bytes on 32-bit x86.
regRequiresRex(int)1941 inline bool regRequiresRex(int) { return false; }
byteRegRequiresRex(int)1942 inline bool byteRegRequiresRex(int) { return false; }
emitRexIf(bool,int,int,int)1943 inline void emitRexIf(bool, int, int, int) {}
emitRexIfNeeded(int,int,int)1944 inline void emitRexIfNeeded(int, int, int) {}
1945 #endif
1946
1947 enum ModRmMode {
1948 ModRmMemoryNoDisp,
1949 ModRmMemoryDisp8,
1950 ModRmMemoryDisp32,
1951 ModRmRegister,
1952 };
1953
putModRm(ModRmMode mode,int reg,RegisterID rm)1954 void putModRm(ModRmMode mode, int reg, RegisterID rm)
1955 {
1956 m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
1957 }
1958
putModRmSib(ModRmMode mode,int reg,RegisterID base,RegisterID index,int scale)1959 void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
1960 {
1961 ASSERT(mode != ModRmRegister);
1962
1963 putModRm(mode, reg, hasSib);
1964 m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
1965 }
1966
registerModRM(int reg,RegisterID rm)1967 void registerModRM(int reg, RegisterID rm)
1968 {
1969 putModRm(ModRmRegister, reg, rm);
1970 }
1971
memoryModRM(int reg,RegisterID base,int offset)1972 void memoryModRM(int reg, RegisterID base, int offset)
1973 {
1974 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1975 #if PLATFORM(X86_64)
1976 if ((base == hasSib) || (base == hasSib2)) {
1977 #else
1978 if (base == hasSib) {
1979 #endif
1980 if (!offset) // No need to check if the base is noBase, since we know it is hasSib!
1981 putModRmSib(ModRmMemoryNoDisp, reg, base, noIndex, 0);
1982 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1983 putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
1984 m_buffer.putByteUnchecked(offset);
1985 } else {
1986 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
1987 m_buffer.putIntUnchecked(offset);
1988 }
1989 } else {
1990 #if PLATFORM(X86_64)
1991 if (!offset && (base != noBase) && (base != noBase2))
1992 #else
1993 if (!offset && (base != noBase))
1994 #endif
1995 putModRm(ModRmMemoryNoDisp, reg, base);
1996 else if (CAN_SIGN_EXTEND_8_32(offset)) {
1997 putModRm(ModRmMemoryDisp8, reg, base);
1998 m_buffer.putByteUnchecked(offset);
1999 } else {
2000 putModRm(ModRmMemoryDisp32, reg, base);
2001 m_buffer.putIntUnchecked(offset);
2002 }
2003 }
2004 }
2005
2006 void memoryModRM_disp32(int reg, RegisterID base, int offset)
2007 {
2008 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2009 #if PLATFORM(X86_64)
2010 if ((base == hasSib) || (base == hasSib2)) {
2011 #else
2012 if (base == hasSib) {
2013 #endif
2014 putModRmSib(ModRmMemoryDisp32, reg, base, noIndex, 0);
2015 m_buffer.putIntUnchecked(offset);
2016 } else {
2017 putModRm(ModRmMemoryDisp32, reg, base);
2018 m_buffer.putIntUnchecked(offset);
2019 }
2020 }
2021
2022 void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
2023 {
2024 ASSERT(index != noIndex);
2025
2026 #if PLATFORM(X86_64)
2027 if (!offset && (base != noBase) && (base != noBase2))
2028 #else
2029 if (!offset && (base != noBase))
2030 #endif
2031 putModRmSib(ModRmMemoryNoDisp, reg, base, index, scale);
2032 else if (CAN_SIGN_EXTEND_8_32(offset)) {
2033 putModRmSib(ModRmMemoryDisp8, reg, base, index, scale);
2034 m_buffer.putByteUnchecked(offset);
2035 } else {
2036 putModRmSib(ModRmMemoryDisp32, reg, base, index, scale);
2037 m_buffer.putIntUnchecked(offset);
2038 }
2039 }
2040
2041 #if !PLATFORM(X86_64)
2042 void memoryModRM(int reg, void* address)
2043 {
2044 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2045 putModRm(ModRmMemoryNoDisp, reg, noBase);
2046 m_buffer.putIntUnchecked(reinterpret_cast<int32_t>(address));
2047 }
2048 #endif
2049
2050 AssemblerBuffer m_buffer;
2051 } m_formatter;
2052 };
2053
2054 } // namespace JSC
2055
2056 #endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
2057
2058 #endif // X86Assembler_h
2059